rpmsg_lite.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
  1. /*
  2. * Copyright (c) 2014, Mentor Graphics Corporation
  3. * Copyright (c) 2015 Xilinx, Inc.
  4. * Copyright (c) 2016 Freescale Semiconductor, Inc.
  5. * Copyright 2016-2022 NXP
  6. * Copyright 2021 ACRIOS Systems s.r.o.
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright notice,
  13. * this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright notice,
  15. * this list of conditions and the following disclaimer in the documentation
  16. * and/or other materials provided with the distribution.
  17. * 3. Neither the name of the copyright holder nor the names of its
  18. * contributors may be used to endorse or promote products derived from this
  19. * software without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  22. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
  25. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  28. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  29. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  30. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  31. * POSSIBILITY OF SUCH DAMAGE.
  32. */
  33. #include "rpmsg_lite.h"
  34. #include "rpmsg_platform.h"
  35. /* Interface which is used to interact with the virtqueue layer,
  36. * a different interface is used, when the local processor is the MASTER
  37. * and when it is the REMOTE.
  38. */
  39. struct virtqueue_ops
  40. {
  41. void (*vq_tx)(struct virtqueue *vq, void *buffer, uint32_t len, uint16_t idx);
  42. void *(*vq_tx_alloc)(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
  43. void *(*vq_rx)(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
  44. void (*vq_rx_free)(struct virtqueue *vq, void *buffer, uint32_t len, uint16_t idx);
  45. };
  46. /* Zero-Copy extension macros */
  47. #define RPMSG_STD_MSG_FROM_BUF(buf) (struct rpmsg_std_msg *)(void *)((char *)(buf)-offsetof(struct rpmsg_std_msg, data))
  48. #if !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1))
  49. /* Check RL_BUFFER_COUNT and RL_BUFFER_SIZE only when RL_ALLOW_CUSTOM_SHMEM_CONFIG is not set to 1 */
  50. #if (!RL_BUFFER_COUNT) || (RL_BUFFER_COUNT & (RL_BUFFER_COUNT - 1))
  51. #error "RL_BUFFER_COUNT must be power of two (2, 4, ...)"
  52. #endif
  53. /* Buffer is formed by payload and struct rpmsg_std_hdr */
  54. #define RL_BUFFER_SIZE (RL_BUFFER_PAYLOAD_SIZE + 16UL)
  55. #if (!RL_BUFFER_SIZE) || (RL_BUFFER_SIZE & (RL_BUFFER_SIZE - 1))
  56. #error \
  57. "RL_BUFFER_SIZE must be power of two (256, 512, ...)"\
  58. "RL_BUFFER_PAYLOAD_SIZE must be equal to (240, 496, 1008, ...) [2^n - 16]."
  59. #endif
  60. #endif /* !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)) */
  61. /*!
  62. * @brief
  63. * Traverse the linked list of endpoints to get the one with defined address.
  64. *
  65. * @param rpmsg_lite_dev RPMsg Lite instance
  66. * @param addr Local endpoint address
  67. *
  68. * @return RL_NULL if not found, node pointer containing the ept on success
  69. *
  70. */
  71. static struct llist *rpmsg_lite_get_endpoint_from_addr(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t addr)
  72. {
  73. struct llist *rl_ept_lut_head;
  74. rl_ept_lut_head = rpmsg_lite_dev->rl_endpoints;
  75. while (rl_ept_lut_head != RL_NULL)
  76. {
  77. struct rpmsg_lite_endpoint *rl_ept = (struct rpmsg_lite_endpoint *)rl_ept_lut_head->data;
  78. if (rl_ept->addr == addr)
  79. {
  80. return rl_ept_lut_head;
  81. }
  82. rl_ept_lut_head = rl_ept_lut_head->next;
  83. }
  84. return RL_NULL;
  85. }
  86. /***************************************************************
  87. mmm mm m m mmmmm mm mmm m m mmmm
  88. m" " ## # # # # ## m" " # m" #" "
  89. # # # # # #mmmm" # # # #m# "#mmm
  90. # #mm# # # # # #mm# # # #m "#
  91. "mmm" # # #mmmmm #mmmmm #mmmm" # # "mmm" # "m "mmm#"
  92. ****************************************************************/
  93. /*!
  94. * @brief
  95. * Called when remote side calls virtqueue_kick()
  96. * at its transmit virtqueue.
  97. * In this callback, the buffer is read-out
  98. * of the rvq and user callback is called.
  99. *
  100. * @param vq Virtqueue affected by the kick
  101. *
  102. */
  103. static void rpmsg_lite_rx_callback(struct virtqueue *vq)
  104. {
  105. struct rpmsg_std_msg *rpmsg_msg;
  106. uint32_t len;
  107. uint16_t idx;
  108. struct rpmsg_lite_endpoint *ept;
  109. int32_t cb_ret;
  110. struct llist *node;
  111. struct rpmsg_lite_instance *rpmsg_lite_dev = (struct rpmsg_lite_instance *)vq->priv;
  112. #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
  113. uint32_t rx_freed = RL_FALSE;
  114. #endif
  115. RL_ASSERT(rpmsg_lite_dev != RL_NULL);
  116. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  117. env_lock_mutex(rpmsg_lite_dev->lock);
  118. #endif
  119. /* Process the received data from remote node */
  120. rpmsg_msg = (struct rpmsg_std_msg *)rpmsg_lite_dev->vq_ops->vq_rx(rpmsg_lite_dev->rvq, &len, &idx);
  121. while (rpmsg_msg != RL_NULL)
  122. {
  123. node = rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, rpmsg_msg->hdr.dst);
  124. cb_ret = RL_RELEASE;
  125. if (node != RL_NULL)
  126. {
  127. ept = (struct rpmsg_lite_endpoint *)node->data;
  128. cb_ret = ept->rx_cb(rpmsg_msg->data, rpmsg_msg->hdr.len, rpmsg_msg->hdr.src, ept->rx_cb_data);
  129. }
  130. if (cb_ret == RL_HOLD)
  131. {
  132. rpmsg_msg->hdr.reserved.idx = idx;
  133. }
  134. else
  135. {
  136. rpmsg_lite_dev->vq_ops->vq_rx_free(rpmsg_lite_dev->rvq, rpmsg_msg, len, idx);
  137. #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
  138. rx_freed = RL_TRUE;
  139. #endif
  140. }
  141. rpmsg_msg = (struct rpmsg_std_msg *)rpmsg_lite_dev->vq_ops->vq_rx(rpmsg_lite_dev->rvq, &len, &idx);
  142. #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
  143. if ((rpmsg_msg == RL_NULL) && (rx_freed == RL_TRUE))
  144. {
  145. /* Let the remote device know that some buffers have been freed */
  146. virtqueue_kick(rpmsg_lite_dev->rvq);
  147. }
  148. #endif
  149. }
  150. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  151. env_unlock_mutex(rpmsg_lite_dev->lock);
  152. #endif
  153. }
  154. /*!
  155. * @brief
  156. * Called when remote side calls virtqueue_kick()
  157. * at its receive virtqueue.
  158. *
  159. * @param vq Virtqueue affected by the kick
  160. *
  161. */
  162. static void rpmsg_lite_tx_callback(struct virtqueue *vq)
  163. {
  164. struct rpmsg_lite_instance *rpmsg_lite_dev = (struct rpmsg_lite_instance *)vq->priv;
  165. RL_ASSERT(rpmsg_lite_dev != RL_NULL);
  166. rpmsg_lite_dev->link_state = 1U;
  167. env_tx_callback(rpmsg_lite_dev->link_id);
  168. }
  169. /****************************************************************************
  170. m m mmmm m m mm mm m mmmm m mmmmm mm m mmm
  171. "m m" m" "m # # ## #"m # # "m # # #"m # m" "
  172. # # # # #mmmm# # # # #m # # # # # # #m # # mm
  173. "mm" # # # # #mm# # # # # # # # # # # # #
  174. ## #mm#" # # # # # ## #mmm" #mmmmm mm#mm # ## "mmm"
  175. #
  176. In case this processor has the REMOTE role
  177. *****************************************************************************/
  178. /*!
  179. * @brief
  180. * Places buffer on the virtqueue for consumption by the other side.
  181. *
  182. * @param vq Virtqueue to use
  183. * @param buffer Buffer pointer
  184. * @param len Buffer length
  185. * @idx Buffer index
  186. *
  187. * @return Status of function execution
  188. *
  189. */
  190. static void vq_tx_remote(struct virtqueue *tvq, void *buffer, uint32_t len, uint16_t idx)
  191. {
  192. int32_t status;
  193. status = virtqueue_add_consumed_buffer(tvq, idx, len);
  194. RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
  195. /* As long as the length of the virtqueue ring buffer is not shorter
  196. * than the number of buffers in the pool, this function should not fail.
  197. * This condition is always met, so we don't need to return anything here */
  198. }
  199. /*!
  200. * @brief
  201. * Provides buffer to transmit messages.
  202. *
  203. * @param vq Virtqueue to use
  204. * @param len Length of returned buffer
  205. * @param idx Buffer index
  206. *
  207. * return Pointer to buffer.
  208. */
  209. static void *vq_tx_alloc_remote(struct virtqueue *tvq, uint32_t *len, uint16_t *idx)
  210. {
  211. return virtqueue_get_available_buffer(tvq, idx, len);
  212. }
  213. /*!
  214. * @brief
  215. * Retrieves the received buffer from the virtqueue.
  216. *
  217. * @param vq Virtqueue to use
  218. * @param len Size of received buffer
  219. * @param idx Index of buffer
  220. *
  221. * @return Pointer to received buffer
  222. *
  223. */
  224. static void *vq_rx_remote(struct virtqueue *rvq, uint32_t *len, uint16_t *idx)
  225. {
  226. return virtqueue_get_available_buffer(rvq, idx, len);
  227. }
  228. /*!
  229. * @brief
  230. * Places the used buffer back on the virtqueue.
  231. *
  232. * @param vq Virtqueue to use
  233. * @param len Size of received buffer
  234. * @param idx Index of buffer
  235. *
  236. */
  237. static void vq_rx_free_remote(struct virtqueue *rvq, void *buffer, uint32_t len, uint16_t idx)
  238. {
  239. int32_t status;
  240. #if defined(RL_CLEAR_USED_BUFFERS) && (RL_CLEAR_USED_BUFFERS == 1)
  241. env_memset(buffer, 0x00, len);
  242. #endif
  243. status = virtqueue_add_consumed_buffer(rvq, idx, len);
  244. RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
  245. /* As long as the length of the virtqueue ring buffer is not shorter
  246. * than the number of buffers in the pool, this function should not fail.
  247. * This condition is always met, so we don't need to return anything here */
  248. }
  249. /****************************************************************************
  250. m m mmmm m m mm mm m mmmm m mmmmm mm m mmm
  251. "m m" m" "m # # ## #"m # # "m # # #"m # m" "
  252. # # # # #mmmm# # # # #m # # # # # # #m # # mm
  253. "mm" # # # # #mm# # # # # # # # # # # # #
  254. ## #mm#" # # # # # ## #mmm" #mmmmm mm#mm # ## "mmm"
  255. #
  256. In case this processor has the MASTER role
  257. *****************************************************************************/
  258. /*!
  259. * @brief
  260. * Places buffer on the virtqueue for consumption by the other side.
  261. *
  262. * @param tvq Virtqueue to use
  263. * @param buffer Buffer pointer
  264. * @param len Buffer length
  265. * @param idx Buffer index
  266. *
  267. * @return Status of function execution
  268. *
  269. */
  270. static void vq_tx_master(struct virtqueue *tvq, void *buffer, uint32_t len, uint16_t idx)
  271. {
  272. int32_t status;
  273. status = virtqueue_add_buffer(tvq, idx);
  274. RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
  275. /* As long as the length of the virtqueue ring buffer is not shorter
  276. * than the number of buffers in the pool, this function should not fail.
  277. * This condition is always met, so we don't need to return anything here */
  278. }
  279. /*!
  280. * @brief
  281. * Provides buffer to transmit messages.
  282. *
  283. * @param tvq Virtqueue to use
  284. * @param len Length of returned buffer
  285. * @param idx Buffer index
  286. *
  287. * return Pointer to buffer.
  288. */
  289. static void *vq_tx_alloc_master(struct virtqueue *tvq, uint32_t *len, uint16_t *idx)
  290. {
  291. return virtqueue_get_buffer(tvq, len, idx);
  292. }
  293. /*!
  294. * @brief
  295. * Retrieves the received buffer from the virtqueue.
  296. *
  297. * @param rvq Virtqueue to use
  298. * @param len Size of received buffer
  299. * @param idx Index of buffer
  300. *
  301. * @return Pointer to received buffer
  302. *
  303. */
  304. static void *vq_rx_master(struct virtqueue *rvq, uint32_t *len, uint16_t *idx)
  305. {
  306. return virtqueue_get_buffer(rvq, len, idx);
  307. }
  308. /*!
  309. * @brief
  310. * Places the used buffer back on the virtqueue.
  311. *
  312. * @param rvq Virtqueue to use
  313. * @param buffer Buffer pointer
  314. * @param len Size of received buffer
  315. * @param idx Index of buffer
  316. *
  317. */
  318. static void vq_rx_free_master(struct virtqueue *rvq, void *buffer, uint32_t len, uint16_t idx)
  319. {
  320. int32_t status;
  321. #if defined(RL_CLEAR_USED_BUFFERS) && (RL_CLEAR_USED_BUFFERS == 1)
  322. env_memset(buffer, 0x00, len);
  323. #endif
  324. status = virtqueue_add_buffer(rvq, idx);
  325. RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
  326. /* As long as the length of the virtqueue ring buffer is not shorter
  327. * than the number of buffers in the pool, this function should not fail.
  328. * This condition is always met, so we don't need to return anything here */
  329. }
  330. /* Interface used in case this processor is MASTER */
  331. static const struct virtqueue_ops master_vq_ops = {
  332. vq_tx_master,
  333. vq_tx_alloc_master,
  334. vq_rx_master,
  335. vq_rx_free_master,
  336. };
  337. /* Interface used in case this processor is REMOTE */
  338. static const struct virtqueue_ops remote_vq_ops = {
  339. vq_tx_remote,
  340. vq_tx_alloc_remote,
  341. vq_rx_remote,
  342. vq_rx_free_remote,
  343. };
  344. /* helper function for virtqueue notification */
  345. static void virtqueue_notify(struct virtqueue *vq)
  346. {
  347. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  348. struct rpmsg_lite_instance *inst = vq->priv;
  349. platform_notify(inst->env ? env_get_platform_context(inst->env) : RL_NULL, vq->vq_queue_index);
  350. #else
  351. platform_notify(vq->vq_queue_index);
  352. #endif
  353. }
  354. /*************************************************
  355. mmmmmm mmmmm mmmmmmm mm m mmmmmmm m
  356. # # "# # #"m # # # # #
  357. #mmmmm #mmm#" # # #m # #mmmmm" #"# #
  358. # # # # # # # ## ##"
  359. #mmmmm # # # ## #mmmmm # #
  360. **************************************************/
  361. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  362. struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev,
  363. uint32_t addr,
  364. rl_ept_rx_cb_t rx_cb,
  365. void *rx_cb_data,
  366. struct rpmsg_lite_ept_static_context *ept_context)
  367. #else
  368. struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev,
  369. uint32_t addr,
  370. rl_ept_rx_cb_t rx_cb,
  371. void *rx_cb_data)
  372. #endif
  373. {
  374. struct rpmsg_lite_endpoint *rl_ept;
  375. struct llist *node;
  376. uint32_t i;
  377. if (rpmsg_lite_dev == RL_NULL)
  378. {
  379. return RL_NULL;
  380. }
  381. env_lock_mutex(rpmsg_lite_dev->lock);
  382. {
  383. if (addr == RL_ADDR_ANY)
  384. {
  385. /* find lowest free address */
  386. for (i = 1; i < 0xFFFFFFFFU; i++)
  387. {
  388. if (rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, i) == RL_NULL)
  389. {
  390. addr = i;
  391. break;
  392. }
  393. }
  394. if (addr == RL_ADDR_ANY)
  395. {
  396. /* no address is free, cannot happen normally */
  397. env_unlock_mutex(rpmsg_lite_dev->lock);
  398. return RL_NULL;
  399. }
  400. }
  401. else
  402. {
  403. if (rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, addr) != RL_NULL)
  404. {
  405. /* Already exists! */
  406. env_unlock_mutex(rpmsg_lite_dev->lock);
  407. return RL_NULL;
  408. }
  409. }
  410. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  411. if (ept_context == RL_NULL)
  412. {
  413. env_unlock_mutex(rpmsg_lite_dev->lock);
  414. return RL_NULL;
  415. }
  416. rl_ept = &(ept_context->ept);
  417. node = &(ept_context->node);
  418. #else
  419. rl_ept = env_allocate_memory(sizeof(struct rpmsg_lite_endpoint));
  420. if (rl_ept == RL_NULL)
  421. {
  422. env_unlock_mutex(rpmsg_lite_dev->lock);
  423. return RL_NULL;
  424. }
  425. node = env_allocate_memory(sizeof(struct llist));
  426. if (node == RL_NULL)
  427. {
  428. env_free_memory(rl_ept);
  429. env_unlock_mutex(rpmsg_lite_dev->lock);
  430. return RL_NULL;
  431. }
  432. #endif /* RL_USE_STATIC_API */
  433. env_memset(rl_ept, 0x00, sizeof(struct rpmsg_lite_endpoint));
  434. rl_ept->addr = addr;
  435. rl_ept->rx_cb = rx_cb;
  436. rl_ept->rx_cb_data = rx_cb_data;
  437. node->data = rl_ept;
  438. add_to_list((struct llist **)&rpmsg_lite_dev->rl_endpoints, node);
  439. }
  440. env_unlock_mutex(rpmsg_lite_dev->lock);
  441. return rl_ept;
  442. }
  443. /*************************************************
  444. mmmmmm mmmmm mmmmmmm mmmm mmmmmm m
  445. # # "# # # "m # #
  446. #mmmmm #mmm#" # # # #mmmmm #
  447. # # # # # # #
  448. #mmmmm # # #mmm" #mmmmm #mmmmm
  449. **************************************************/
  450. int32_t rpmsg_lite_destroy_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, struct rpmsg_lite_endpoint *rl_ept)
  451. {
  452. struct llist *node;
  453. if (rpmsg_lite_dev == RL_NULL)
  454. {
  455. return RL_ERR_PARAM;
  456. }
  457. if (rl_ept == RL_NULL)
  458. {
  459. return RL_ERR_PARAM;
  460. }
  461. env_lock_mutex(rpmsg_lite_dev->lock);
  462. node = rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, rl_ept->addr);
  463. if (node != RL_NULL)
  464. {
  465. remove_from_list((struct llist **)&rpmsg_lite_dev->rl_endpoints, node);
  466. env_unlock_mutex(rpmsg_lite_dev->lock);
  467. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  468. env_free_memory(node);
  469. env_free_memory(rl_ept);
  470. #endif
  471. return RL_SUCCESS;
  472. }
  473. else
  474. {
  475. env_unlock_mutex(rpmsg_lite_dev->lock);
  476. return RL_ERR_PARAM;
  477. }
  478. }
  479. /******************************************
  480. mmmmmmm m m mm mmmmm mmmmm
  481. # # # ## # "# #
  482. # ## # # #mmm#" #
  483. # m""m #mm# # #
  484. # m" "m # # # mm#mm
  485. *******************************************/
  486. uint32_t rpmsg_lite_is_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev)
  487. {
  488. if (rpmsg_lite_dev == RL_NULL)
  489. {
  490. return 0U;
  491. }
  492. return (RL_TRUE == rpmsg_lite_dev->link_state ? RL_TRUE : RL_FALSE);
  493. }
  494. uint32_t rpmsg_lite_wait_for_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t timeout)
  495. {
  496. if (rpmsg_lite_dev == RL_NULL)
  497. {
  498. return 0U;
  499. }
  500. return env_wait_for_link_up(&rpmsg_lite_dev->link_state, rpmsg_lite_dev->link_id, timeout);
  501. }
  502. /*!
  503. * @brief
  504. * Internal function to format a RPMsg compatible
  505. * message and sends it
  506. *
  507. * @param rpmsg_lite_dev RPMsg Lite instance
  508. * @param src Local endpoint address
  509. * @param dst Remote endpoint address
  510. * @param data Payload buffer
  511. * @param size Size of payload, in bytes
  512. * @param flags Value of flags field
  513. * @param timeout Timeout in ms, 0 if nonblocking
  514. *
  515. * @return Status of function execution, RL_SUCCESS on success
  516. *
  517. */
  518. static int32_t rpmsg_lite_format_message(struct rpmsg_lite_instance *rpmsg_lite_dev,
  519. uint32_t src,
  520. uint32_t dst,
  521. char *data,
  522. uint32_t size,
  523. uint32_t flags,
  524. uint32_t timeout)
  525. {
  526. struct rpmsg_std_msg *rpmsg_msg;
  527. void *buffer;
  528. uint16_t idx;
  529. uint32_t tick_count = 0U;
  530. uint32_t buff_len;
  531. if (rpmsg_lite_dev == RL_NULL)
  532. {
  533. return RL_ERR_PARAM;
  534. }
  535. if (data == RL_NULL)
  536. {
  537. return RL_ERR_PARAM;
  538. }
  539. if (rpmsg_lite_dev->link_state != RL_TRUE)
  540. {
  541. return RL_NOT_READY;
  542. }
  543. /* Lock the device to enable exclusive access to virtqueues */
  544. env_lock_mutex(rpmsg_lite_dev->lock);
  545. /* Get rpmsg buffer for sending message. */
  546. buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, &buff_len, &idx);
  547. env_unlock_mutex(rpmsg_lite_dev->lock);
  548. if ((buffer == RL_NULL) && (timeout == RL_FALSE))
  549. {
  550. return RL_ERR_NO_MEM;
  551. }
  552. while (buffer == RL_NULL)
  553. {
  554. env_sleep_msec(RL_MS_PER_INTERVAL);
  555. env_lock_mutex(rpmsg_lite_dev->lock);
  556. buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, &buff_len, &idx);
  557. env_unlock_mutex(rpmsg_lite_dev->lock);
  558. tick_count += (uint32_t)RL_MS_PER_INTERVAL;
  559. if ((tick_count >= timeout) && (buffer == RL_NULL))
  560. {
  561. return RL_ERR_NO_MEM;
  562. }
  563. }
  564. rpmsg_msg = (struct rpmsg_std_msg *)buffer;
  565. /* Initialize RPMSG header. */
  566. rpmsg_msg->hdr.dst = dst;
  567. rpmsg_msg->hdr.src = src;
  568. rpmsg_msg->hdr.len = (uint16_t)size;
  569. rpmsg_msg->hdr.flags = (uint16_t)(flags & 0xFFFFU);
  570. /* Copy data to rpmsg buffer. */
  571. env_memcpy(rpmsg_msg->data, data, size);
  572. env_lock_mutex(rpmsg_lite_dev->lock);
  573. /* Enqueue buffer on virtqueue. */
  574. rpmsg_lite_dev->vq_ops->vq_tx(rpmsg_lite_dev->tvq, buffer, buff_len, idx);
  575. /* Let the other side know that there is a job to process. */
  576. virtqueue_kick(rpmsg_lite_dev->tvq);
  577. env_unlock_mutex(rpmsg_lite_dev->lock);
  578. return RL_SUCCESS;
  579. }
  580. int32_t rpmsg_lite_send(struct rpmsg_lite_instance *rpmsg_lite_dev,
  581. struct rpmsg_lite_endpoint *ept,
  582. uint32_t dst,
  583. char *data,
  584. uint32_t size,
  585. uintptr_t timeout)
  586. {
  587. if (ept == RL_NULL)
  588. {
  589. return RL_ERR_PARAM;
  590. }
  591. // FIXME : may be just copy the data size equal to buffer length and Tx it.
  592. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  593. if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE(rpmsg_lite_dev->link_id))
  594. #else
  595. if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE)
  596. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  597. {
  598. return RL_ERR_BUFF_SIZE;
  599. }
  600. return rpmsg_lite_format_message(rpmsg_lite_dev, ept->addr, dst, data, size, RL_NO_FLAGS, timeout);
  601. }
  602. #if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1)
  603. void *rpmsg_lite_alloc_tx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t *size, uintptr_t timeout)
  604. {
  605. struct rpmsg_std_msg *rpmsg_msg;
  606. void *buffer;
  607. uint16_t idx;
  608. uint32_t tick_count = 0U;
  609. if (size == RL_NULL)
  610. {
  611. return RL_NULL;
  612. }
  613. if (rpmsg_lite_dev->link_state != RL_TRUE)
  614. {
  615. *size = 0;
  616. return RL_NULL;
  617. }
  618. /* Lock the device to enable exclusive access to virtqueues */
  619. env_lock_mutex(rpmsg_lite_dev->lock);
  620. /* Get rpmsg buffer for sending message. */
  621. buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, size, &idx);
  622. env_unlock_mutex(rpmsg_lite_dev->lock);
  623. if ((buffer == RL_NULL) && (timeout == RL_FALSE))
  624. {
  625. *size = 0;
  626. return RL_NULL;
  627. }
  628. while (buffer == RL_NULL)
  629. {
  630. env_sleep_msec(RL_MS_PER_INTERVAL);
  631. env_lock_mutex(rpmsg_lite_dev->lock);
  632. buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, size, &idx);
  633. env_unlock_mutex(rpmsg_lite_dev->lock);
  634. tick_count += (uint32_t)RL_MS_PER_INTERVAL;
  635. if ((tick_count >= timeout) && (buffer == RL_NULL))
  636. {
  637. *size = 0;
  638. return RL_NULL;
  639. }
  640. }
  641. rpmsg_msg = (struct rpmsg_std_msg *)buffer;
  642. /* keep idx and totlen information for nocopy tx function */
  643. rpmsg_msg->hdr.reserved.idx = idx;
  644. /* return the maximum payload size */
  645. *size -= sizeof(struct rpmsg_std_hdr);
  646. return rpmsg_msg->data;
  647. }
  648. int32_t rpmsg_lite_send_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev,
  649. struct rpmsg_lite_endpoint *ept,
  650. uint32_t dst,
  651. void *data,
  652. uint32_t size)
  653. {
  654. struct rpmsg_std_msg *rpmsg_msg;
  655. uint32_t src;
  656. if ((ept == RL_NULL) || (data == RL_NULL))
  657. {
  658. return RL_ERR_PARAM;
  659. }
  660. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  661. if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE(rpmsg_lite_dev->link_id))
  662. #else
  663. if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE)
  664. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  665. {
  666. return RL_ERR_BUFF_SIZE;
  667. }
  668. if (rpmsg_lite_dev->link_state != RL_TRUE)
  669. {
  670. return RL_NOT_READY;
  671. }
  672. src = ept->addr;
  673. rpmsg_msg = RPMSG_STD_MSG_FROM_BUF(data);
  674. #if defined(RL_DEBUG_CHECK_BUFFERS) && (RL_DEBUG_CHECK_BUFFERS == 1)
  675. /* Check that the to-be-sent buffer is in the VirtIO ring descriptors list */
  676. int32_t idx = rpmsg_lite_dev->tvq->vq_nentries - 1;
  677. while ((idx >= 0) && (rpmsg_lite_dev->tvq->vq_ring.desc[idx].addr != (uint64_t)rpmsg_msg))
  678. {
  679. idx--;
  680. }
  681. RL_ASSERT(idx >= 0);
  682. #endif
  683. /* Initialize RPMSG header. */
  684. rpmsg_msg->hdr.dst = dst;
  685. rpmsg_msg->hdr.src = src;
  686. rpmsg_msg->hdr.len = (uint16_t)size;
  687. rpmsg_msg->hdr.flags = (uint16_t)(RL_NO_FLAGS & 0xFFFFU);
  688. env_lock_mutex(rpmsg_lite_dev->lock);
  689. /* Enqueue buffer on virtqueue. */
  690. rpmsg_lite_dev->vq_ops->vq_tx(
  691. rpmsg_lite_dev->tvq, (void *)rpmsg_msg,
  692. (uint32_t)virtqueue_get_buffer_length(rpmsg_lite_dev->tvq, rpmsg_msg->hdr.reserved.idx),
  693. rpmsg_msg->hdr.reserved.idx);
  694. /* Let the other side know that there is a job to process. */
  695. virtqueue_kick(rpmsg_lite_dev->tvq);
  696. env_unlock_mutex(rpmsg_lite_dev->lock);
  697. return RL_SUCCESS;
  698. }
  699. /******************************************
  700. mmmmm m m mm mmmmm mmmmm
  701. # "# # # ## # "# #
  702. #mmmm" ## # # #mmm#" #
  703. # "m m""m #mm# # #
  704. # " m" "m # # # mm#mm
  705. *******************************************/
  706. int32_t rpmsg_lite_release_rx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, void *rxbuf)
  707. {
  708. struct rpmsg_std_msg *rpmsg_msg;
  709. if (rpmsg_lite_dev == RL_NULL)
  710. {
  711. return RL_ERR_PARAM;
  712. }
  713. if (rxbuf == RL_NULL)
  714. {
  715. return RL_ERR_PARAM;
  716. }
  717. rpmsg_msg = RPMSG_STD_MSG_FROM_BUF(rxbuf);
  718. #if defined(RL_DEBUG_CHECK_BUFFERS) && (RL_DEBUG_CHECK_BUFFERS == 1)
  719. /* Check that the to-be-released buffer is in the VirtIO ring descriptors list */
  720. int32_t idx = rpmsg_lite_dev->rvq->vq_nentries - 1;
  721. while ((idx >= 0) && (rpmsg_lite_dev->rvq->vq_ring.desc[idx].addr != (uint64_t)rpmsg_msg))
  722. {
  723. idx--;
  724. }
  725. RL_ASSERT(idx >= 0);
  726. #endif
  727. env_lock_mutex(rpmsg_lite_dev->lock);
  728. /* Return used buffer, with total length (header length + buffer size). */
  729. rpmsg_lite_dev->vq_ops->vq_rx_free(
  730. rpmsg_lite_dev->rvq, rpmsg_msg,
  731. (uint32_t)virtqueue_get_buffer_length(rpmsg_lite_dev->rvq, rpmsg_msg->hdr.reserved.idx),
  732. rpmsg_msg->hdr.reserved.idx);
  733. #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
  734. /* Let the remote device know that a buffer has been freed */
  735. virtqueue_kick(rpmsg_lite_dev->rvq);
  736. #endif
  737. env_unlock_mutex(rpmsg_lite_dev->lock);
  738. return RL_SUCCESS;
  739. }
  740. #endif /* RL_API_HAS_ZEROCOPY */
  741. /******************************
  742. mmmmm mm m mmmmm mmmmmmm
  743. # #"m # # #
  744. # # #m # # #
  745. # # # # # #
  746. mm#mm # ## mm#mm #
  747. *****************************/
  748. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  749. struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr,
  750. size_t shmem_length,
  751. uint32_t link_id,
  752. uint32_t init_flags,
  753. struct rpmsg_lite_instance *static_context)
  754. #elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  755. struct rpmsg_lite_instance *rpmsg_lite_master_init(
  756. void *shmem_addr, size_t shmem_length, uint32_t link_id, uint32_t init_flags, void *env_cfg)
  757. #else
  758. struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr,
  759. size_t shmem_length,
  760. uint32_t link_id,
  761. uint32_t init_flags)
  762. #endif
  763. {
  764. int32_t status;
  765. void (*callback[2])(struct virtqueue *vq);
  766. const char *vq_names[2];
  767. struct vring_alloc_info ring_info;
  768. struct virtqueue *vqs[2] = {0};
  769. void *buffer;
  770. uint32_t idx, j;
  771. struct rpmsg_lite_instance *rpmsg_lite_dev = RL_NULL;
  772. if (link_id > RL_PLATFORM_HIGHEST_LINK_ID)
  773. {
  774. return RL_NULL;
  775. }
  776. if (shmem_addr == RL_NULL)
  777. {
  778. return RL_NULL;
  779. }
  780. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  781. /* Get the custom shmem configuration defined per each rpmsg_lite instance
  782. (i.e. per each link id) from the platform layer */
  783. rpmsg_platform_shmem_config_t shmem_config = {0};
  784. if (RL_SUCCESS != platform_get_custom_shmem_config(link_id, &shmem_config))
  785. {
  786. return RL_NULL;
  787. }
  788. /* shmem_config.buffer_count must be power of two (2, 4, ...) */
  789. if (0U != (shmem_config.buffer_count & (shmem_config.buffer_count - 1U)))
  790. {
  791. return RL_NULL;
  792. }
  793. /* buffer size must be power of two (256, 512, ...) */
  794. if (0U != ((shmem_config.buffer_payload_size + 16UL) & ((shmem_config.buffer_payload_size + 16UL) - 1U)))
  795. {
  796. return RL_NULL;
  797. }
  798. if ((2U * (uint32_t)shmem_config.buffer_count) >
  799. ((RL_WORD_ALIGN_DOWN(shmem_length - 2U * shmem_config.vring_size)) /
  800. (uint32_t)(shmem_config.buffer_payload_size + 16UL)))
  801. {
  802. return RL_NULL;
  803. }
  804. #else
  805. if ((2U * (uint32_t)RL_BUFFER_COUNT) >
  806. ((RL_WORD_ALIGN_DOWN(shmem_length - (uint32_t)RL_VRING_OVERHEAD)) / (uint32_t)RL_BUFFER_SIZE))
  807. {
  808. return RL_NULL;
  809. }
  810. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  811. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  812. if (static_context == RL_NULL)
  813. {
  814. return RL_NULL;
  815. }
  816. rpmsg_lite_dev = static_context;
  817. #else
  818. rpmsg_lite_dev = env_allocate_memory(sizeof(struct rpmsg_lite_instance));
  819. if (rpmsg_lite_dev == RL_NULL)
  820. {
  821. return RL_NULL;
  822. }
  823. #endif
  824. env_memset(rpmsg_lite_dev, 0, sizeof(struct rpmsg_lite_instance));
  825. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  826. status = env_init(&rpmsg_lite_dev->env, env_cfg);
  827. #else
  828. status = env_init();
  829. #endif
  830. if (status != RL_SUCCESS)
  831. {
  832. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  833. env_free_memory(rpmsg_lite_dev); /* coco validated: not able to force the application to reach this line */
  834. #endif
  835. return RL_NULL; /* coco validated: not able to force the application to reach this line */
  836. }
  837. rpmsg_lite_dev->link_id = link_id;
  838. /*
  839. * Since device is RPMSG Remote so we need to manage the
  840. * shared buffers. Create shared memory pool to handle buffers.
  841. */
  842. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  843. rpmsg_lite_dev->sh_mem_base =
  844. (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + 2U * shmem_config.vring_size);
  845. rpmsg_lite_dev->sh_mem_remaining = (RL_WORD_ALIGN_DOWN(shmem_length - 2U * shmem_config.vring_size)) /
  846. (uint32_t)(shmem_config.buffer_payload_size + 16UL);
  847. #else
  848. rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + (uint32_t)RL_VRING_OVERHEAD);
  849. rpmsg_lite_dev->sh_mem_remaining =
  850. (RL_WORD_ALIGN_DOWN(shmem_length - (uint32_t)RL_VRING_OVERHEAD)) / (uint32_t)RL_BUFFER_SIZE;
  851. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  852. rpmsg_lite_dev->sh_mem_total = rpmsg_lite_dev->sh_mem_remaining;
  853. /* Initialize names and callbacks*/
  854. vq_names[0] = "rx_vq";
  855. vq_names[1] = "tx_vq";
  856. callback[0] = rpmsg_lite_rx_callback;
  857. callback[1] = rpmsg_lite_tx_callback;
  858. rpmsg_lite_dev->vq_ops = &master_vq_ops;
  859. /* Create virtqueue for each vring. */
  860. for (idx = 0U; idx < 2U; idx++)
  861. {
  862. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  863. ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
  864. (uint32_t)((idx == 0U) ? (0U) : (shmem_config.vring_size)));
  865. ring_info.align = shmem_config.vring_align;
  866. ring_info.num_descs = shmem_config.buffer_count;
  867. #else
  868. ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
  869. (uint32_t)((idx == 0U) ? (0U) : (VRING_SIZE)));
  870. ring_info.align = VRING_ALIGN;
  871. ring_info.num_descs = RL_BUFFER_COUNT;
  872. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  873. env_memset((void *)ring_info.phy_addr, 0x00, (uint32_t)vring_size(ring_info.num_descs, ring_info.align));
  874. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  875. status = virtqueue_create_static((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info,
  876. callback[idx], virtqueue_notify, &vqs[idx],
  877. (struct vq_static_context *)&rpmsg_lite_dev->vq_ctxt[idx]);
  878. #else
  879. status = virtqueue_create((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, callback[idx],
  880. virtqueue_notify, &vqs[idx]);
  881. #endif /* RL_USE_STATIC_API */
  882. if (status == RL_SUCCESS)
  883. {
  884. /* Initialize vring control block in virtqueue. */
  885. vq_ring_init(vqs[idx]);
  886. /* Disable callbacks - will be enabled by the application
  887. * once initialization is completed.
  888. */
  889. virtqueue_disable_cb(vqs[idx]);
  890. }
  891. else
  892. {
  893. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  894. /* Free all already allocated memory for virtqueues */
  895. for (uint32_t a = 0U; a < 2U; a++)
  896. {
  897. if (RL_NULL != vqs[a])
  898. {
  899. virtqueue_free(vqs[a]);
  900. }
  901. }
  902. env_free_memory(rpmsg_lite_dev);
  903. #endif
  904. return RL_NULL;
  905. }
  906. /* virtqueue has reference to the RPMsg Lite instance */
  907. vqs[idx]->priv = (void *)rpmsg_lite_dev;
  908. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  909. vqs[idx]->env = rpmsg_lite_dev->env;
  910. #endif
  911. }
  912. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  913. status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1, &rpmsg_lite_dev->lock_static_ctxt);
  914. #else
  915. status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1);
  916. #endif
  917. if (status != RL_SUCCESS)
  918. {
  919. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  920. /* Free all already allocated memory for virtqueues */
  921. for (uint32_t b = 0U; b < 2U; b++)
  922. {
  923. virtqueue_free(vqs[b]);
  924. }
  925. env_free_memory(rpmsg_lite_dev);
  926. #endif
  927. return RL_NULL;
  928. }
  929. // FIXME - a better way to handle this , tx for master is rx for remote and vice versa.
  930. rpmsg_lite_dev->tvq = vqs[1];
  931. rpmsg_lite_dev->rvq = vqs[0];
  932. for (j = 0U; j < 2U; j++)
  933. {
  934. for (idx = 0U; ((idx < vqs[j]->vq_nentries) && (idx < rpmsg_lite_dev->sh_mem_total)); idx++)
  935. {
  936. /* Initialize TX virtqueue buffers for remote device */
  937. buffer = (rpmsg_lite_dev->sh_mem_remaining > 0U) ?
  938. (rpmsg_lite_dev->sh_mem_base +
  939. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  940. (uint32_t)(shmem_config.buffer_payload_size + 16UL) *
  941. (rpmsg_lite_dev->sh_mem_total - rpmsg_lite_dev->sh_mem_remaining--)) :
  942. #else
  943. (uint32_t)RL_BUFFER_SIZE *
  944. (rpmsg_lite_dev->sh_mem_total - rpmsg_lite_dev->sh_mem_remaining--)) :
  945. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  946. (RL_NULL);
  947. RL_ASSERT(buffer != RL_NULL);
  948. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  949. env_memset(buffer, 0x00, (uint32_t)(shmem_config.buffer_payload_size + 16UL));
  950. #else
  951. env_memset(buffer, 0x00, (uint32_t)RL_BUFFER_SIZE);
  952. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  953. if (vqs[j] == rpmsg_lite_dev->rvq)
  954. {
  955. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  956. status =
  957. virtqueue_fill_avail_buffers(vqs[j], buffer, (uint32_t)(shmem_config.buffer_payload_size + 16UL));
  958. #else
  959. status = virtqueue_fill_avail_buffers(vqs[j], buffer, (uint32_t)RL_BUFFER_SIZE);
  960. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  961. }
  962. else if (vqs[j] == rpmsg_lite_dev->tvq)
  963. {
  964. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  965. status =
  966. virtqueue_fill_used_buffers(vqs[j], buffer, (uint32_t)(shmem_config.buffer_payload_size + 16UL));
  967. #else
  968. status = virtqueue_fill_used_buffers(vqs[j], buffer, (uint32_t)RL_BUFFER_SIZE);
  969. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  970. }
  971. else
  972. {
  973. /* coco begin validated: this branch will never met unless RAM is corrupted */
  974. }
  975. if (status != RL_SUCCESS)
  976. {
  977. /* Clean up! */
  978. env_delete_mutex(rpmsg_lite_dev->lock);
  979. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  980. for (uint32_t c = 0U; c < 2U; c++)
  981. {
  982. virtqueue_free(vqs[c]);
  983. }
  984. env_free_memory(rpmsg_lite_dev);
  985. #endif
  986. return RL_NULL;
  987. }
  988. /* coco end */
  989. }
  990. }
  991. /* Install ISRs */
  992. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  993. env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
  994. env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
  995. env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
  996. env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
  997. rpmsg_lite_dev->link_state = 1U;
  998. env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
  999. env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
  1000. #else
  1001. (void)platform_init_interrupt(rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
  1002. (void)platform_init_interrupt(rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
  1003. env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
  1004. env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
  1005. rpmsg_lite_dev->link_state = 1U;
  1006. env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
  1007. env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
  1008. #endif
  1009. /*
  1010. * Let the remote device know that Master is ready for
  1011. * communication.
  1012. */
  1013. virtqueue_kick(rpmsg_lite_dev->rvq);
  1014. return rpmsg_lite_dev;
  1015. }
  1016. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  1017. struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr,
  1018. uint32_t link_id,
  1019. uint32_t init_flags,
  1020. struct rpmsg_lite_instance *static_context)
  1021. #elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  1022. struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr,
  1023. uint32_t link_id,
  1024. uint32_t init_flags,
  1025. void *env_cfg)
  1026. #else
  1027. struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, uint32_t link_id, uint32_t init_flags)
  1028. #endif
  1029. {
  1030. int32_t status;
  1031. void (*callback[2])(struct virtqueue *vq);
  1032. const char *vq_names[2];
  1033. struct vring_alloc_info ring_info;
  1034. struct virtqueue *vqs[2] = {0};
  1035. uint32_t idx;
  1036. struct rpmsg_lite_instance *rpmsg_lite_dev = RL_NULL;
  1037. if (link_id > RL_PLATFORM_HIGHEST_LINK_ID)
  1038. {
  1039. return RL_NULL;
  1040. }
  1041. if (shmem_addr == RL_NULL)
  1042. {
  1043. return RL_NULL;
  1044. }
  1045. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  1046. /* Get the custom shmem configuration defined per each rpmsg_lite instance
  1047. (i.e. per each link id) from the platform layer */
  1048. rpmsg_platform_shmem_config_t shmem_config = {0};
  1049. if (RL_SUCCESS != platform_get_custom_shmem_config(link_id, &shmem_config))
  1050. {
  1051. return RL_NULL;
  1052. }
  1053. /* shmem_config.buffer_count must be power of two (2, 4, ...) */
  1054. if (0U != (shmem_config.buffer_count & (shmem_config.buffer_count - 1U)))
  1055. {
  1056. return RL_NULL;
  1057. }
  1058. /* buffer size must be power of two (256, 512, ...) */
  1059. if (0U != ((shmem_config.buffer_payload_size + 16UL) & ((shmem_config.buffer_payload_size + 16UL) - 1U)))
  1060. {
  1061. return RL_NULL;
  1062. }
  1063. #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
  1064. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  1065. if (static_context == RL_NULL)
  1066. {
  1067. return RL_NULL;
  1068. }
  1069. rpmsg_lite_dev = static_context;
  1070. #else
  1071. rpmsg_lite_dev = env_allocate_memory(sizeof(struct rpmsg_lite_instance));
  1072. if (rpmsg_lite_dev == RL_NULL)
  1073. {
  1074. return RL_NULL;
  1075. }
  1076. #endif
  1077. env_memset(rpmsg_lite_dev, 0, sizeof(struct rpmsg_lite_instance));
  1078. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  1079. status = env_init(&rpmsg_lite_dev->env, env_cfg);
  1080. #else
  1081. status = env_init();
  1082. #endif
  1083. if (status != RL_SUCCESS)
  1084. {
  1085. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  1086. env_free_memory(rpmsg_lite_dev); /* coco validated: not able to force the application to reach this line */
  1087. #endif
  1088. return RL_NULL; /* coco validated: not able to force the application to reach this line */
  1089. }
  1090. rpmsg_lite_dev->link_id = link_id;
  1091. vq_names[0] = "tx_vq"; /* swapped in case of remote */
  1092. vq_names[1] = "rx_vq";
  1093. callback[0] = rpmsg_lite_tx_callback;
  1094. callback[1] = rpmsg_lite_rx_callback;
  1095. rpmsg_lite_dev->vq_ops = &remote_vq_ops;
  1096. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  1097. rpmsg_lite_dev->sh_mem_base =
  1098. (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + 2U * shmem_config.vring_size);
  1099. #else
  1100. rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + (uint32_t)RL_VRING_OVERHEAD);
  1101. #endif /* defined(RL_ALLOW_CUSTOM_VRING_CONFIG) && (RL_ALLOW_CUSTOM_VRING_CONFIG == 1) */
  1102. /* Create virtqueue for each vring. */
  1103. for (idx = 0U; idx < 2U; idx++)
  1104. {
  1105. #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
  1106. ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
  1107. (uint32_t)((idx == 0U) ? (0U) : (shmem_config.vring_size)));
  1108. ring_info.align = shmem_config.vring_align;
  1109. ring_info.num_descs = shmem_config.buffer_count;
  1110. #else
  1111. ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
  1112. (uint32_t)((idx == 0U) ? (0U) : (VRING_SIZE)));
  1113. ring_info.align = VRING_ALIGN;
  1114. ring_info.num_descs = RL_BUFFER_COUNT;
  1115. #endif /* defined(RL_ALLOW_CUSTOM_VRING_CONFIG) && (RL_ALLOW_CUSTOM_VRING_CONFIG == 1) */
  1116. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  1117. status = virtqueue_create_static((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info,
  1118. callback[idx], virtqueue_notify, &vqs[idx],
  1119. (struct vq_static_context *)&rpmsg_lite_dev->vq_ctxt[idx]);
  1120. #else
  1121. status = virtqueue_create((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, callback[idx],
  1122. virtqueue_notify, &vqs[idx]);
  1123. #endif /* RL_USE_STATIC_API */
  1124. if (status != RL_SUCCESS)
  1125. {
  1126. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  1127. /* Free all already allocated memory for virtqueues */
  1128. for (uint32_t a = 0U; a < 2U; a++)
  1129. {
  1130. if (RL_NULL != vqs[a])
  1131. {
  1132. virtqueue_free(vqs[a]);
  1133. }
  1134. }
  1135. env_free_memory(rpmsg_lite_dev);
  1136. #endif
  1137. return RL_NULL;
  1138. }
  1139. /* virtqueue has reference to the RPMsg Lite instance */
  1140. vqs[idx]->priv = (void *)rpmsg_lite_dev;
  1141. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  1142. vqs[idx]->env = rpmsg_lite_dev->env;
  1143. #endif
  1144. }
  1145. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  1146. status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1, &rpmsg_lite_dev->lock_static_ctxt);
  1147. #else
  1148. status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1);
  1149. #endif
  1150. if (status != RL_SUCCESS)
  1151. {
  1152. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  1153. /* Free all already allocated memory for virtqueues */
  1154. for (uint32_t b = 0U; b < 2U; b++)
  1155. {
  1156. virtqueue_free(vqs[b]);
  1157. }
  1158. env_free_memory(rpmsg_lite_dev);
  1159. #endif
  1160. return RL_NULL;
  1161. }
  1162. // FIXME - a better way to handle this , tx for master is rx for remote and vice versa.
  1163. rpmsg_lite_dev->tvq = vqs[0];
  1164. rpmsg_lite_dev->rvq = vqs[1];
  1165. /* Install ISRs */
  1166. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  1167. env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
  1168. env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
  1169. env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
  1170. env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
  1171. rpmsg_lite_dev->link_state = 0;
  1172. env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
  1173. env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
  1174. #else
  1175. (void)platform_init_interrupt(rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
  1176. (void)platform_init_interrupt(rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
  1177. env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
  1178. env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
  1179. rpmsg_lite_dev->link_state = 0;
  1180. env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
  1181. env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
  1182. #endif
  1183. return rpmsg_lite_dev;
  1184. }
  1185. /*******************************************
  1186. mmmm mmmmmm mmmmm mm m mmmmm mmmmmmm
  1187. # "m # # #"m # # #
  1188. # # #mmmmm # # #m # # #
  1189. # # # # # # # # #
  1190. #mmm" #mmmmm mm#mm # ## mm#mm #
  1191. ********************************************/
  1192. int32_t rpmsg_lite_deinit(struct rpmsg_lite_instance *rpmsg_lite_dev)
  1193. {
  1194. if (rpmsg_lite_dev == RL_NULL)
  1195. {
  1196. return RL_ERR_PARAM;
  1197. }
  1198. if (!((rpmsg_lite_dev->rvq != RL_NULL) && (rpmsg_lite_dev->tvq != RL_NULL) && (rpmsg_lite_dev->lock != RL_NULL)))
  1199. {
  1200. /* ERROR - trying to initialize uninitialized RPMSG? */
  1201. RL_ASSERT((rpmsg_lite_dev->rvq != RL_NULL) && (rpmsg_lite_dev->tvq != RL_NULL) &&
  1202. (rpmsg_lite_dev->lock != RL_NULL));
  1203. return RL_ERR_PARAM;
  1204. }
  1205. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  1206. env_deinit_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
  1207. env_deinit_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
  1208. #else
  1209. (void)platform_deinit_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
  1210. (void)platform_deinit_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
  1211. #endif
  1212. rpmsg_lite_dev->link_state = 0;
  1213. #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
  1214. virtqueue_free_static(rpmsg_lite_dev->rvq);
  1215. virtqueue_free_static(rpmsg_lite_dev->tvq);
  1216. #else
  1217. virtqueue_free(rpmsg_lite_dev->rvq);
  1218. virtqueue_free(rpmsg_lite_dev->tvq);
  1219. #endif /* RL_USE_STATIC_API */
  1220. rpmsg_lite_dev->rvq = RL_NULL;
  1221. rpmsg_lite_dev->tvq = RL_NULL;
  1222. env_delete_mutex(rpmsg_lite_dev->lock);
  1223. #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
  1224. (void)env_deinit(rpmsg_lite_dev->env);
  1225. #else
  1226. (void)env_deinit();
  1227. #endif
  1228. #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
  1229. env_free_memory(rpmsg_lite_dev);
  1230. #endif /* RL_USE_STATIC_API */
  1231. return RL_SUCCESS;
  1232. }