virtqueue.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*-
  2. * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
  3. * Copyright (c) 2016 Freescale Semiconductor, Inc.
  4. * Copyright 2016-2019 NXP
  5. * All rights reserved.
  6. *
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice unmodified, this list of conditions, and the following
  13. * disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. #include "rpmsg_env.h"
  30. #include "virtqueue.h"
  31. /* Prototype for internal functions. */
  32. static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx);
  33. static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len);
  34. static uint16_t vq_ring_add_buffer(
  35. struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length);
  36. static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc);
  37. static int32_t vq_ring_must_notify_host(struct virtqueue *vq);
  38. static void vq_ring_notify_host(struct virtqueue *vq);
  39. static uint16_t virtqueue_nused(struct virtqueue *vq);
  40. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  41. /*!
  42. * virtqueue_create_static - Creates new VirtIO queue - static version
  43. *
  44. * @param id - VirtIO queue ID , must be unique
  45. * @param name - Name of VirtIO queue
  46. * @param ring - Pointer to vring_alloc_info control block
  47. * @param callback - Pointer to callback function, invoked
  48. * when message is available on VirtIO queue
  49. * @param notify - Pointer to notify function, used to notify
  50. * other side that there is job available for it
  51. * @param v_queue - Created VirtIO queue.
  52. * @param vq_ctxt - Statically allocated virtqueue context
  53. *
  54. * @return - Function status
  55. */
  56. int32_t virtqueue_create_static(uint16_t id,
  57. const char *name,
  58. struct vring_alloc_info *ring,
  59. void (*callback_fc)(struct virtqueue *vq),
  60. void (*notify_fc)(struct virtqueue *vq),
  61. struct virtqueue **v_queue,
  62. struct vq_static_context *vq_ctxt)
  63. {
  64. struct virtqueue *vq = VQ_NULL;
  65. volatile int32_t status = VQUEUE_SUCCESS;
  66. uint32_t vq_size = 0U;
  67. VQ_PARAM_CHK(vq_ctxt == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
  68. VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
  69. VQ_PARAM_CHK(ring->num_descs == 0U, status, ERROR_VQUEUE_INVLD_PARAM);
  70. VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1U), status, ERROR_VRING_ALIGN);
  71. if (status == VQUEUE_SUCCESS)
  72. {
  73. vq_size = sizeof(struct virtqueue);
  74. vq = &vq_ctxt->vq;
  75. env_memset(vq, 0x00, vq_size);
  76. env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
  77. vq->vq_queue_index = id;
  78. vq->vq_alignment = (int32_t)(ring->align);
  79. vq->vq_nentries = ring->num_descs;
  80. vq->callback_fc = callback_fc;
  81. vq->notify_fc = notify_fc;
  82. // indirect addition is not supported
  83. vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
  84. vq->vq_ring_mem = (void *)ring->phy_addr;
  85. vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment);
  86. *v_queue = vq;
  87. }
  88. return (status);
  89. }
  90. #else
  91. /*!
  92. * virtqueue_create - Creates new VirtIO queue
  93. *
  94. * @param id - VirtIO queue ID , must be unique
  95. * @param name - Name of VirtIO queue
  96. * @param ring - Pointer to vring_alloc_info control block
  97. * @param callback - Pointer to callback function, invoked
  98. * when message is available on VirtIO queue
  99. * @param notify - Pointer to notify function, used to notify
  100. * other side that there is job available for it
  101. * @param v_queue - Created VirtIO queue.
  102. *
  103. * @return - Function status
  104. */
  105. int32_t virtqueue_create(uint16_t id,
  106. const char *name,
  107. struct vring_alloc_info *ring,
  108. void (*callback_fc)(struct virtqueue *vq),
  109. void (*notify_fc)(struct virtqueue *vq),
  110. struct virtqueue **v_queue)
  111. {
  112. struct virtqueue *vq = VQ_NULL;
  113. volatile int32_t status = VQUEUE_SUCCESS;
  114. uint32_t vq_size = 0U;
  115. VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
  116. VQ_PARAM_CHK(ring->num_descs == 0U, status, ERROR_VQUEUE_INVLD_PARAM);
  117. VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1U), status, ERROR_VRING_ALIGN);
  118. if (status == VQUEUE_SUCCESS)
  119. {
  120. vq_size = sizeof(struct virtqueue);
  121. vq = (struct virtqueue *)env_allocate_memory(vq_size);
  122. if (vq == VQ_NULL)
  123. {
  124. return (ERROR_NO_MEM);
  125. }
  126. env_memset(vq, 0x00, vq_size);
  127. env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
  128. vq->vq_queue_index = id;
  129. vq->vq_alignment = (int32_t)(ring->align);
  130. vq->vq_nentries = ring->num_descs;
  131. vq->callback_fc = callback_fc;
  132. vq->notify_fc = notify_fc;
  133. // indirect addition is not supported
  134. vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
  135. vq->vq_ring_mem = (void *)ring->phy_addr;
  136. vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment);
  137. *v_queue = vq;
  138. }
  139. return (status);
  140. }
  141. #endif /* RL_USE_STATIC_API */
  142. /*!
  143. * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
  144. * by other side.
  145. *
  146. * @param vq - Pointer to VirtIO queue control block.
  147. * @param head_idx - Index of buffer to be added to the avail ring
  148. *
  149. * @return - Function status
  150. */
  151. int32_t virtqueue_add_buffer(struct virtqueue *vq, uint16_t head_idx)
  152. {
  153. volatile int32_t status = VQUEUE_SUCCESS;
  154. VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
  155. VQUEUE_BUSY(vq, avail_write);
  156. if (status == VQUEUE_SUCCESS)
  157. {
  158. VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
  159. /*
  160. * Update vring_avail control block fields so that other
  161. * side can get buffer using it.
  162. */
  163. vq_ring_update_avail(vq, head_idx);
  164. }
  165. VQUEUE_IDLE(vq, avail_write);
  166. return (status);
  167. }
  168. /*!
  169. * virtqueue_fill_avail_buffers - Enqueues single buffer in vring, updates avail
  170. *
  171. * @param vq - Pointer to VirtIO queue control block
  172. * @param buffer - Address of buffer
  173. * @param len - Length of buffer
  174. *
  175. * @return - Function status
  176. */
  177. int32_t virtqueue_fill_avail_buffers(struct virtqueue *vq, void *buffer, uint32_t len)
  178. {
  179. struct vring_desc *dp;
  180. uint16_t head_idx;
  181. volatile int32_t status = VQUEUE_SUCCESS;
  182. VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
  183. VQUEUE_BUSY(vq, avail_write);
  184. if (status == VQUEUE_SUCCESS)
  185. {
  186. head_idx = vq->vq_desc_head_idx;
  187. dp = &vq->vq_ring.desc[head_idx];
  188. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  189. dp->addr = env_map_vatopa(vq->env, buffer);
  190. #else
  191. dp->addr = env_map_vatopa(buffer);
  192. #endif
  193. dp->len = len;
  194. dp->flags = VRING_DESC_F_WRITE;
  195. vq->vq_desc_head_idx++;
  196. vq_ring_update_avail(vq, head_idx);
  197. }
  198. VQUEUE_IDLE(vq, avail_write);
  199. return (status);
  200. }
  201. /*!
  202. * virtqueue_get_buffer - Returns used buffers from VirtIO queue
  203. *
  204. * @param vq - Pointer to VirtIO queue control block
  205. * @param len - Length of consumed buffer
  206. * @param idx - Index to buffer descriptor pool
  207. *
  208. * @return - Pointer to used buffer
  209. */
  210. void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
  211. {
  212. struct vring_used_elem *uep;
  213. uint16_t used_idx, desc_idx;
  214. if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
  215. {
  216. return (VQ_NULL);
  217. }
  218. VQUEUE_BUSY(vq, used_read);
  219. used_idx = (uint16_t)(vq->vq_used_cons_idx & ((uint16_t)(vq->vq_nentries - 1U)));
  220. uep = &vq->vq_ring.used->ring[used_idx];
  221. env_rmb();
  222. desc_idx = (uint16_t)uep->id;
  223. if (len != VQ_NULL)
  224. {
  225. *len = uep->len;
  226. }
  227. if (idx != VQ_NULL)
  228. {
  229. *idx = desc_idx;
  230. }
  231. vq->vq_used_cons_idx++;
  232. VQUEUE_IDLE(vq, used_read);
  233. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  234. return env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[desc_idx].addr)));
  235. #else
  236. return env_map_patova((uint32_t)(vq->vq_ring.desc[desc_idx].addr));
  237. #endif
  238. }
  239. /*!
  240. * virtqueue_get_buffer_length - Returns size of a buffer
  241. *
  242. * @param vq - Pointer to VirtIO queue control block
  243. * @param idx - Index to buffer descriptor pool
  244. *
  245. * @return - Buffer length
  246. */
  247. uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
  248. {
  249. return vq->vq_ring.desc[idx].len;
  250. }
  251. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  252. /*!
  253. * virtqueue_free - Frees VirtIO queue resources - static version
  254. *
  255. * @param vq - Pointer to VirtIO queue control block
  256. *
  257. */
  258. void virtqueue_free_static(struct virtqueue *vq)
  259. {
  260. if (vq != VQ_NULL)
  261. {
  262. if (vq->vq_ring_mem != VQ_NULL)
  263. {
  264. vq->vq_ring_size = 0;
  265. vq->vq_ring_mem = VQ_NULL;
  266. }
  267. }
  268. }
  269. #else
  270. /*!
  271. * virtqueue_free - Frees VirtIO queue resources
  272. *
  273. * @param vq - Pointer to VirtIO queue control block
  274. *
  275. */
  276. void virtqueue_free(struct virtqueue *vq)
  277. {
  278. if (vq != VQ_NULL)
  279. {
  280. if (vq->vq_ring_mem != VQ_NULL)
  281. {
  282. vq->vq_ring_size = 0;
  283. vq->vq_ring_mem = VQ_NULL;
  284. }
  285. env_free_memory(vq);
  286. }
  287. }
  288. #endif /* RL_USE_STATIC_API */
  289. /*!
  290. * virtqueue_get_available_buffer - Returns buffer available for use in the
  291. * VirtIO queue
  292. *
  293. * @param vq - Pointer to VirtIO queue control block
  294. * @param avail_idx - Pointer to index used in vring desc table
  295. * @param len - Length of buffer
  296. *
  297. * @return - Pointer to available buffer
  298. */
  299. void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, uint32_t *len)
  300. {
  301. uint16_t head_idx = 0;
  302. void *buffer;
  303. if (vq->vq_available_idx == vq->vq_ring.avail->idx)
  304. {
  305. return (VQ_NULL);
  306. }
  307. VQUEUE_BUSY(vq, avail_read);
  308. head_idx = (uint16_t)(vq->vq_available_idx++ & ((uint16_t)(vq->vq_nentries - 1U)));
  309. *avail_idx = vq->vq_ring.avail->ring[head_idx];
  310. env_rmb();
  311. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  312. buffer = env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[*avail_idx].addr)));
  313. #else
  314. buffer = env_map_patova((uint32_t)(vq->vq_ring.desc[*avail_idx].addr));
  315. #endif
  316. *len = vq->vq_ring.desc[*avail_idx].len;
  317. VQUEUE_IDLE(vq, avail_read);
  318. return (buffer);
  319. }
  320. /*!
  321. * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
  322. *
  323. * @param vq - Pointer to VirtIO queue control block
  324. * @param head_idx - Index of vring desc containing used buffer
  325. * @param len - Length of buffer
  326. *
  327. * @return - Function status
  328. */
  329. int32_t virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len)
  330. {
  331. if (head_idx > vq->vq_nentries)
  332. {
  333. return (ERROR_VRING_NO_BUFF);
  334. }
  335. VQUEUE_BUSY(vq, used_write);
  336. vq_ring_update_used(vq, head_idx, len);
  337. VQUEUE_IDLE(vq, used_write);
  338. return (VQUEUE_SUCCESS);
  339. }
  340. /*!
  341. * virtqueue_fill_used_buffers - Fill used buffer ring
  342. *
  343. * @param vq - Pointer to VirtIO queue control block
  344. * @param buffer - Buffer to add
  345. * @param len - Length of buffer
  346. *
  347. * @return - Function status
  348. */
  349. int32_t virtqueue_fill_used_buffers(struct virtqueue *vq, void *buffer, uint32_t len)
  350. {
  351. uint16_t head_idx;
  352. uint16_t idx;
  353. VQUEUE_BUSY(vq, used_write);
  354. head_idx = vq->vq_desc_head_idx;
  355. VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
  356. /* Enqueue buffer onto the ring. */
  357. idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer, len);
  358. vq->vq_desc_head_idx = idx;
  359. vq_ring_update_used(vq, head_idx, len);
  360. VQUEUE_IDLE(vq, used_write);
  361. return (VQUEUE_SUCCESS);
  362. }
  363. /*!
  364. * virtqueue_enable_cb - Enables callback generation
  365. *
  366. * @param vq - Pointer to VirtIO queue control block
  367. *
  368. * @return - Function status
  369. */
  370. int32_t virtqueue_enable_cb(struct virtqueue *vq)
  371. {
  372. /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
  373. * implementation */
  374. return (vq_ring_enable_interrupt(vq, 0));
  375. }
  376. /* coco end */
  377. /*!
  378. * virtqueue_enable_cb - Disables callback generation
  379. *
  380. * @param vq - Pointer to VirtIO queue control block
  381. *
  382. */
  383. void virtqueue_disable_cb(struct virtqueue *vq)
  384. {
  385. VQUEUE_BUSY(vq, avail_write);
  386. if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
  387. {
  388. /* coco begin validated: This part does not need to be tested because VIRTQUEUE_FLAG_EVENT_IDX is not being
  389. * utilized in rpmsg_lite implementation */
  390. vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1U;
  391. }
  392. /* coco end */
  393. else
  394. {
  395. vq->vq_ring.avail->flags |= (uint16_t)VRING_AVAIL_F_NO_INTERRUPT;
  396. }
  397. VQUEUE_IDLE(vq, avail_write);
  398. }
  399. /*!
  400. * virtqueue_kick - Notifies other side that there is buffer available for it.
  401. *
  402. * @param vq - Pointer to VirtIO queue control block
  403. */
  404. void virtqueue_kick(struct virtqueue *vq)
  405. {
  406. VQUEUE_BUSY(vq, avail_write);
  407. /* Ensure updated avail->idx is visible to host. */
  408. env_mb();
  409. if (0 != vq_ring_must_notify_host(vq))
  410. {
  411. vq_ring_notify_host(vq);
  412. }
  413. vq->vq_queued_cnt = 0;
  414. VQUEUE_IDLE(vq, avail_write);
  415. }
  416. /*!
  417. * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
  418. *
  419. * @param vq - Pointer to VirtIO queue control block
  420. */
  421. void virtqueue_dump(struct virtqueue *vq)
  422. {
  423. /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
  424. * implementation */
  425. if (vq == VQ_NULL)
  426. {
  427. return;
  428. }
  429. env_print(
  430. "VQ: %s - size=%d; used=%d; queued=%d; "
  431. "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
  432. "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
  433. vq->vq_name, vq->vq_nentries, virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
  434. vq->vq_ring.avail->idx, vq->vq_used_cons_idx, vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
  435. vq->vq_ring.used->flags);
  436. }
  437. /* coco end */
  438. /*!
  439. * virtqueue_get_desc_size - Returns vring descriptor size
  440. *
  441. * @param vq - Pointer to VirtIO queue control block
  442. *
  443. * @return - Descriptor length
  444. */
  445. uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
  446. {
  447. /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
  448. * implementation */
  449. uint16_t head_idx;
  450. uint16_t avail_idx;
  451. uint32_t len;
  452. if (vq->vq_available_idx == vq->vq_ring.avail->idx)
  453. {
  454. return 0;
  455. }
  456. head_idx = (uint16_t)(vq->vq_available_idx & ((uint16_t)(vq->vq_nentries - 1U)));
  457. avail_idx = vq->vq_ring.avail->ring[head_idx];
  458. len = vq->vq_ring.desc[avail_idx].len;
  459. return (len);
  460. }
  461. /* coco end */
  462. /**************************************************************************
  463. * Helper Functions *
  464. **************************************************************************/
  465. /*!
  466. *
  467. * vq_ring_add_buffer
  468. *
  469. */
  470. static uint16_t vq_ring_add_buffer(
  471. struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length)
  472. {
  473. struct vring_desc *dp;
  474. if (buffer == VQ_NULL)
  475. {
  476. return head_idx; /* coco validated: line never reached, vq_ring_add_buffer() is called from
  477. rpmsg_lite_master_init() only and the buffer parameter not being null check is done before
  478. passing the parameter */
  479. }
  480. VQASSERT(vq, head_idx != VQ_RING_DESC_CHAIN_END, "premature end of free desc chain");
  481. dp = &desc[head_idx];
  482. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  483. dp->addr = env_map_vatopa(vq->env, buffer);
  484. #else
  485. dp->addr = env_map_vatopa(buffer);
  486. #endif
  487. dp->len = length;
  488. dp->flags = VRING_DESC_F_WRITE;
  489. return (head_idx + 1U);
  490. }
  491. /*!
  492. *
  493. * vq_ring_init
  494. *
  495. */
  496. void vq_ring_init(struct virtqueue *vq)
  497. {
  498. struct vring *vr;
  499. uint32_t i, size;
  500. size = (uint32_t)(vq->vq_nentries);
  501. vr = &vq->vq_ring;
  502. for (i = 0U; i < size - 1U; i++)
  503. {
  504. vr->desc[i].next = (uint16_t)(i + 1U);
  505. }
  506. vr->desc[i].next = (uint16_t)VQ_RING_DESC_CHAIN_END;
  507. }
  508. /*!
  509. *
  510. * vq_ring_update_avail
  511. *
  512. */
  513. static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
  514. {
  515. uint16_t avail_idx;
  516. /*
  517. * Place the head of the descriptor chain into the next slot and make
  518. * it usable to the host. The chain is made available now rather than
  519. * deferring to virtqueue_notify() in the hopes that if the host is
  520. * currently running on another CPU, we can keep it processing the new
  521. * descriptor.
  522. */
  523. avail_idx = (uint16_t)(vq->vq_ring.avail->idx & ((uint16_t)(vq->vq_nentries - 1U)));
  524. vq->vq_ring.avail->ring[avail_idx] = desc_idx;
  525. env_wmb();
  526. vq->vq_ring.avail->idx++;
  527. /* Keep pending count until virtqueue_notify(). */
  528. vq->vq_queued_cnt++;
  529. }
  530. /*!
  531. *
  532. * vq_ring_update_used
  533. *
  534. */
  535. static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len)
  536. {
  537. uint16_t used_idx;
  538. struct vring_used_elem *used_desc = VQ_NULL;
  539. /*
  540. * Place the head of the descriptor chain into the next slot and make
  541. * it usable to the host. The chain is made available now rather than
  542. * deferring to virtqueue_notify() in the hopes that if the host is
  543. * currently running on another CPU, we can keep it processing the new
  544. * descriptor.
  545. */
  546. used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1U);
  547. used_desc = &(vq->vq_ring.used->ring[used_idx]);
  548. used_desc->id = head_idx;
  549. used_desc->len = len;
  550. env_wmb();
  551. vq->vq_ring.used->idx++;
  552. }
  553. /*!
  554. *
  555. * vq_ring_enable_interrupt
  556. *
  557. */
  558. static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
  559. {
  560. /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
  561. * implementation */
  562. /*
  563. * Enable interrupts, making sure we get the latest index of
  564. * what's already been consumed.
  565. */
  566. if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
  567. {
  568. vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
  569. }
  570. else
  571. {
  572. vq->vq_ring.avail->flags &= ~(uint16_t)VRING_AVAIL_F_NO_INTERRUPT;
  573. }
  574. env_mb();
  575. /*
  576. * Enough items may have already been consumed to meet our threshold
  577. * since we last checked. Let our caller know so it processes the new
  578. * entries.
  579. */
  580. if (virtqueue_nused(vq) > ndesc)
  581. {
  582. return (1);
  583. }
  584. return (0);
  585. }
  586. /* coco end */
  587. /*!
  588. *
  589. * virtqueue_interrupt
  590. *
  591. */
  592. void virtqueue_notification(struct virtqueue *vq)
  593. {
  594. if (vq != VQ_NULL)
  595. {
  596. if (vq->callback_fc != VQ_NULL)
  597. {
  598. vq->callback_fc(vq);
  599. }
  600. }
  601. }
  602. /*!
  603. *
  604. * vq_ring_must_notify_host
  605. *
  606. */
  607. static int32_t vq_ring_must_notify_host(struct virtqueue *vq)
  608. {
  609. uint16_t new_idx, prev_idx;
  610. uint16_t event_idx;
  611. if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
  612. {
  613. /* coco begin validated: This part does not need to be tested because VIRTQUEUE_FLAG_EVENT_IDX is not being
  614. * utilized in rpmsg_lite implementation */
  615. new_idx = vq->vq_ring.avail->idx;
  616. prev_idx = new_idx - vq->vq_queued_cnt;
  617. event_idx = (uint16_t)vring_avail_event(&vq->vq_ring);
  618. return ((vring_need_event(event_idx, new_idx, prev_idx) != 0) ? 1 : 0);
  619. }
  620. /* coco end */
  621. return (((vq->vq_ring.used->flags & ((uint16_t)VRING_USED_F_NO_NOTIFY)) == 0U) ? 1 : 0);
  622. }
  623. /*!
  624. *
  625. * vq_ring_notify_host
  626. *
  627. */
  628. static void vq_ring_notify_host(struct virtqueue *vq)
  629. {
  630. if (vq->notify_fc != VQ_NULL)
  631. {
  632. vq->notify_fc(vq);
  633. }
  634. }
  635. /*!
  636. *
  637. * virtqueue_nused
  638. *
  639. */
  640. static uint16_t virtqueue_nused(struct virtqueue *vq)
  641. {
  642. /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
  643. * implementation */
  644. uint16_t used_idx, nused;
  645. used_idx = vq->vq_ring.used->idx;
  646. nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
  647. VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
  648. return (nused);
  649. }
  650. /* coco end */