lwp_shm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-12 Jesven first version
  9. * 2023-02-20 wangxiaoyao adapt to mm
  10. * 2025-11-06 ibvqeibob add Doxygen comments for lwp shared memory APIs
  11. */
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #ifdef ARCH_MM_MMU
  15. #include <lwp.h>
  16. #include <lwp_shm.h>
  17. #include <lwp_mm.h>
  18. #include <lwp_user_mm.h>
  19. #include <mmu.h>
  20. /**
  21. * @brief Kernel control block for a shared memory segment.
  22. *
  23. * Each lwp_shm_struct represents one shared memory region. It records
  24. * the physical address, size, reference count and key. The embedded
  25. * mem_obj is used together with the aspace/varea mechanism to handle
  26. * mapping and page faults for this segment.
  27. */
  28. struct lwp_shm_struct
  29. {
  30. struct rt_mem_obj mem_obj; /**< Memory object interface used by aspace/varea */
  31. size_t addr; /**< Physical address of the shared memory; used as next pointer in the free list when unused */
  32. size_t size; /**< Size of the shared memory in bytes, page aligned */
  33. int ref; /**< Reference count of mappings to this shared memory */
  34. size_t key; /**< User-visible key used to look up the shared memory */
  35. };
  36. static struct lwp_avl_struct *shm_tree_key;
  37. static struct lwp_avl_struct *shm_tree_pa;
  38. static int shm_free_list = -1; /* the single-direct list of freed items */
  39. static int shm_id_used = 0; /* the latest allocated item in the array */
  40. static struct lwp_shm_struct _shm_ary[RT_LWP_SHM_MAX_NR];
  41. static const char *get_shm_name(rt_varea_t varea)
  42. {
  43. return "user.shm";
  44. }
  45. static void on_shm_varea_open(struct rt_varea *varea)
  46. {
  47. struct lwp_shm_struct *shm;
  48. shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
  49. shm->ref += 1;
  50. }
  51. static void on_shm_varea_close(struct rt_varea *varea)
  52. {
  53. struct lwp_shm_struct *shm;
  54. shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
  55. shm->ref -= 1;
  56. }
  57. static void on_shm_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
  58. {
  59. struct lwp_shm_struct *shm;
  60. int err;
  61. shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
  62. /* map all share page frames to user space in a time */
  63. void *page = (void *)shm->addr;
  64. void *pg_paddr = (char *)page + PV_OFFSET;
  65. err = rt_varea_map_range(varea, varea->start, pg_paddr, shm->size);
  66. if (err == RT_EOK)
  67. {
  68. msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
  69. msg->response.size = shm->size;
  70. msg->response.vaddr = page;
  71. }
  72. return ;
  73. }
  74. /*
  75. * Try to allocate an structure 'lwp_shm_struct' from the freed list or the
  76. * static array.
  77. */
  78. static int _shm_id_alloc(void)
  79. {
  80. int id = -1;
  81. if (shm_free_list != -1) /* first try the freed list */
  82. {
  83. id = shm_free_list;
  84. shm_free_list = (int)_shm_ary[shm_free_list].addr; /* single-direction */
  85. }
  86. else if (shm_id_used < RT_LWP_SHM_MAX_NR) /* then try the array */
  87. {
  88. id = shm_id_used;
  89. shm_id_used++;
  90. }
  91. return id;
  92. }
  93. /* Release the item in the static array to the freed list. */
  94. static void shm_id_free(int id)
  95. {
  96. /* link the freed itme to the single-direction list */
  97. _shm_ary[id].addr = (size_t)shm_free_list;
  98. shm_free_list = id;
  99. }
  100. /* Locate the shared memory through 'key' or create a new one. */
  101. static int _lwp_shmget(size_t key, size_t size, int create)
  102. {
  103. int id = -1;
  104. struct lwp_avl_struct *node_key = 0;
  105. struct lwp_avl_struct *node_pa = 0;
  106. void *page_addr = 0;
  107. uint32_t bit = 0;
  108. /* try to locate the item with the key in the binary tree */
  109. node_key = lwp_avl_find(key, shm_tree_key);
  110. if (node_key)
  111. {
  112. return (struct lwp_shm_struct *)node_key->data - _shm_ary; /* the index */
  113. }
  114. /* If there doesn't exist such an item and we're allowed to create one ... */
  115. if (create)
  116. {
  117. struct lwp_shm_struct* p;
  118. if (!size)
  119. {
  120. goto err;
  121. }
  122. id = _shm_id_alloc();
  123. if (id == -1)
  124. {
  125. goto err;
  126. }
  127. /* allocate pages up to 2's exponent to cover the required size */
  128. bit = rt_page_bits(size);
  129. page_addr = rt_pages_alloc_ext(bit, PAGE_ANY_AVAILABLE); /* virtual address */
  130. if (!page_addr)
  131. {
  132. goto err;
  133. }
  134. /* initialize the shared memory structure */
  135. p = _shm_ary + id;
  136. p->addr = (size_t)page_addr;
  137. p->size = (1UL << (bit + ARCH_PAGE_SHIFT));
  138. p->ref = 0;
  139. p->key = key;
  140. p->mem_obj.get_name = get_shm_name;
  141. p->mem_obj.on_page_fault = on_shm_page_fault;
  142. p->mem_obj.on_varea_open = on_shm_varea_open;
  143. p->mem_obj.on_varea_close = on_shm_varea_close;
  144. p->mem_obj.hint_free = NULL;
  145. /* then insert it into the balancing binary tree */
  146. node_key = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct) * 2);
  147. if (!node_key)
  148. {
  149. goto err;
  150. }
  151. node_key->avl_key = p->key;
  152. node_key->data = (void *)p;
  153. lwp_avl_insert(node_key, &shm_tree_key);
  154. node_pa = node_key + 1;
  155. node_pa->avl_key = p->addr;
  156. node_pa->data = (void *)p;
  157. lwp_avl_insert(node_pa, &shm_tree_pa);
  158. }
  159. return id;
  160. err:
  161. if (id != -1)
  162. {
  163. shm_id_free(id);
  164. }
  165. if (page_addr)
  166. {
  167. rt_pages_free(page_addr, bit);
  168. }
  169. if (node_key)
  170. {
  171. rt_free(node_key);
  172. }
  173. return -1;
  174. }
  175. /**
  176. * @brief Get or create a shared memory segment by key.
  177. *
  178. * Under the memory management lock, this function looks up an existing
  179. * shared memory control block by key. If it does not exist and @p create
  180. * is non-zero, a new segment is created with the requested size, physical
  181. * pages are allocated and the segment is inserted into the internal index
  182. * trees.
  183. *
  184. * @param[in] key Key used to identify the shared memory segment.
  185. * @param[in] size Requested size in bytes; only effective when creating.
  186. * @param[in] create Non-zero to allow creation; zero to only search.
  187. *
  188. * @return On success, returns a non-negative shared memory id.
  189. * On failure, returns -1.
  190. */
  191. int lwp_shmget(size_t key, size_t size, int create)
  192. {
  193. int ret = 0;
  194. rt_mm_lock();
  195. ret = _lwp_shmget(key, size, create);
  196. rt_mm_unlock();
  197. return ret;
  198. }
  199. /* Locate the binary tree node_key corresponding to the shared-memory id. */
  200. static struct lwp_avl_struct *shm_id_to_node(int id)
  201. {
  202. struct lwp_avl_struct *node_key = 0;
  203. struct lwp_shm_struct *p = RT_NULL;
  204. /* check id */
  205. if (id < 0 || id >= RT_LWP_SHM_MAX_NR)
  206. {
  207. return RT_NULL;
  208. }
  209. p = _shm_ary + id; /* the address of the shared-memory structure */
  210. node_key = lwp_avl_find(p->key, shm_tree_key);
  211. if (!node_key)
  212. {
  213. return RT_NULL;
  214. }
  215. if (node_key->data != (void *)p)
  216. {
  217. return RT_NULL;
  218. }
  219. return node_key;
  220. }
  221. /* Free the shared pages, the shared-memory structure and its binary tree node_key. */
  222. static int _lwp_shmrm(int id)
  223. {
  224. struct lwp_avl_struct *node_key = RT_NULL;
  225. struct lwp_avl_struct *node_pa = RT_NULL;
  226. struct lwp_shm_struct* p = RT_NULL;
  227. uint32_t bit = 0;
  228. node_key = shm_id_to_node(id);
  229. if (!node_key)
  230. {
  231. return -1;
  232. }
  233. p = (struct lwp_shm_struct *)node_key->data;
  234. if (p->ref)
  235. {
  236. return 0;
  237. }
  238. bit = rt_page_bits(p->size);
  239. rt_pages_free((void *)p->addr, bit);
  240. lwp_avl_remove(node_key, &shm_tree_key);
  241. node_pa = node_key + 1;
  242. lwp_avl_remove(node_pa, &shm_tree_pa);
  243. rt_free(node_key);
  244. shm_id_free(id);
  245. return 0;
  246. }
  247. /**
  248. * @brief Remove a shared memory segment by id.
  249. *
  250. * The internal control block is located by @p id. If the reference count
  251. * is zero, the physical pages, control block and AVL index nodes are freed.
  252. * If the segment is still referenced, no memory is actually released.
  253. *
  254. * @param[in] id Shared memory id returned by lwp_shmget().
  255. *
  256. * @return Returns 0 on success. If @p id is invalid or internal checks
  257. * fail, -1 is returned. When the reference count is non-zero,
  258. * 0 is returned but the segment is not freed.
  259. */
  260. int lwp_shmrm(int id)
  261. {
  262. int ret = 0;
  263. ret = _lwp_shmrm(id);
  264. return ret;
  265. }
  266. /* Map the shared memory specified by 'id' to the specified virtual address. */
  267. static void *_lwp_shmat(int id, void *shm_vaddr)
  268. {
  269. int err;
  270. struct rt_lwp *lwp = RT_NULL;
  271. struct lwp_avl_struct *node_key = RT_NULL;
  272. struct lwp_shm_struct *p = RT_NULL;
  273. void *va = shm_vaddr;
  274. /* The id is used to locate the node_key in the binary tree, and then get the
  275. * shared-memory structure linked to the node_key. We don't use the id to refer
  276. * to the shared-memory structure directly, because the binary tree is used
  277. * to verify the structure is really in use.
  278. */
  279. node_key = shm_id_to_node(id);
  280. if (!node_key)
  281. {
  282. return RT_NULL;
  283. }
  284. p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
  285. /* map the shared memory into the address space of the current thread */
  286. lwp = lwp_self();
  287. if (!lwp)
  288. {
  289. return RT_NULL;
  290. }
  291. err = rt_aspace_map(lwp->aspace, &va, p->size, MMU_MAP_U_RWCB, MMF_PREFETCH,
  292. &p->mem_obj, 0);
  293. if (err != RT_EOK)
  294. {
  295. va = RT_NULL;
  296. }
  297. return va;
  298. }
  299. /**
  300. * @brief Map a shared memory segment into the current LWP.
  301. *
  302. * The shared memory control block is located by @p id and mapped into the
  303. * user address space of the current LWP. If @p shm_vaddr is not RT_NULL,
  304. * the system tries to map the segment at the specified virtual address,
  305. * which must be page aligned.
  306. *
  307. * @param[in] id Shared memory id returned by lwp_shmget().
  308. * @param[in] shm_vaddr Desired user virtual address; if RT_NULL, the
  309. * system chooses an address. When not RT_NULL it
  310. * must be page aligned.
  311. *
  312. * @return The mapped user virtual address on success, or RT_NULL on failure.
  313. */
  314. void *lwp_shmat(int id, void *shm_vaddr)
  315. {
  316. void *ret = RT_NULL;
  317. if (((size_t)shm_vaddr & ARCH_PAGE_MASK) != 0)
  318. {
  319. return RT_NULL;
  320. }
  321. ret = _lwp_shmat(id, shm_vaddr);
  322. return ret;
  323. }
  324. static struct lwp_shm_struct *_lwp_shm_struct_get(struct rt_lwp *lwp, void *shm_vaddr)
  325. {
  326. void *pa = RT_NULL;
  327. struct lwp_avl_struct *node_pa = RT_NULL;
  328. if (!lwp)
  329. {
  330. return RT_NULL;
  331. }
  332. pa = lwp_v2p(lwp, shm_vaddr); /* physical memory */
  333. node_pa = lwp_avl_find((size_t)pa, shm_tree_pa);
  334. if (!node_pa)
  335. {
  336. return RT_NULL;
  337. }
  338. return (struct lwp_shm_struct *)node_pa->data;
  339. }
  340. static int _lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
  341. {
  342. struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
  343. if (p)
  344. {
  345. p->ref++;
  346. return p->ref;
  347. }
  348. return -1;
  349. }
  350. /**
  351. * @brief Increase the reference count of a shared memory segment.
  352. *
  353. * The shared memory control block is located according to the given
  354. * @p lwp and the user virtual address @p shm_vaddr. If found, its
  355. * reference count is increased by one.
  356. *
  357. * @param[in] lwp LWP object to operate on.
  358. * @param[in] shm_vaddr User virtual address where the shared memory
  359. * is mapped in this LWP.
  360. *
  361. * @return The new reference count on success, or -1 on failure.
  362. */
  363. int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
  364. {
  365. int ret = 0;
  366. rt_mm_lock();
  367. ret = _lwp_shm_ref_inc(lwp, shm_vaddr);
  368. rt_mm_unlock();
  369. return ret;
  370. }
  371. static int _lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
  372. {
  373. struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
  374. if (p && (p->ref > 0))
  375. {
  376. p->ref--;
  377. return p->ref;
  378. }
  379. return -1;
  380. }
  381. /**
  382. * @brief Decrease the reference count of a shared memory segment.
  383. *
  384. * The shared memory control block is located according to the given
  385. * @p lwp and the user virtual address @p shm_vaddr. If it exists and
  386. * the reference count is greater than zero, the count is decreased by one.
  387. *
  388. * @param[in] lwp LWP object to operate on.
  389. * @param[in] shm_vaddr User virtual address where the shared memory
  390. * is mapped in this LWP.
  391. *
  392. * @return The new reference count on success, or -1 on failure.
  393. */
  394. int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
  395. {
  396. int ret = 0;
  397. rt_mm_lock();
  398. ret = _lwp_shm_ref_dec(lwp, shm_vaddr);
  399. rt_mm_unlock();
  400. return ret;
  401. }
  402. /* Unmap the shared memory from the address space of the current thread. */
  403. int _lwp_shmdt(void *shm_vaddr)
  404. {
  405. struct rt_lwp *lwp = RT_NULL;
  406. int ret = 0;
  407. lwp = lwp_self();
  408. if (!lwp)
  409. {
  410. return -1;
  411. }
  412. ret = rt_aspace_unmap(lwp->aspace, shm_vaddr);
  413. if (ret != RT_EOK)
  414. {
  415. ret = -1;
  416. }
  417. return ret;
  418. }
  419. /**
  420. * @brief Unmap a shared memory segment from the current LWP.
  421. *
  422. * The mapping at @p shm_vaddr in the current LWP address space is
  423. * removed. Internal errors are translated into a generic error code.
  424. *
  425. * @param[in] shm_vaddr User virtual address of the shared memory mapping.
  426. *
  427. * @return Returns 0 on success, or -1 on failure.
  428. */
  429. int lwp_shmdt(void *shm_vaddr)
  430. {
  431. int ret = 0;
  432. rt_mm_lock();
  433. ret = _lwp_shmdt(shm_vaddr);
  434. rt_mm_unlock();
  435. return ret;
  436. }
  437. /* Get the virtual address of a shared memory in kernel. */
  438. void *_lwp_shminfo(int id)
  439. {
  440. struct lwp_avl_struct *node_key = RT_NULL;
  441. struct lwp_shm_struct *p = RT_NULL;
  442. /* the share memory is in use only if it exsits in the binary tree */
  443. node_key = shm_id_to_node(id);
  444. if (!node_key)
  445. {
  446. return RT_NULL;
  447. }
  448. p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
  449. return (void *)((char *)p->addr - PV_OFFSET); /* get the virtual address */
  450. }
  451. /**
  452. * @brief Get the kernel virtual address of a shared memory segment.
  453. *
  454. * The internal control block is located by @p id and the kernel
  455. * virtual address corresponding to that shared memory is returned.
  456. *
  457. * @param[in] id Shared memory id returned by lwp_shmget().
  458. *
  459. * @return Kernel virtual address of the shared memory on success,
  460. * or RT_NULL on failure.
  461. */
  462. void *lwp_shminfo(int id)
  463. {
  464. void *vaddr = RT_NULL;
  465. rt_mm_lock();
  466. vaddr = _lwp_shminfo(id);
  467. rt_mm_unlock();
  468. return vaddr;
  469. }
  470. #ifdef RT_USING_FINSH
  471. static int _shm_info(struct lwp_avl_struct* node_key, void *data)
  472. {
  473. int id = 0;
  474. struct lwp_shm_struct* p = (struct lwp_shm_struct *)node_key->data;
  475. id = p - _shm_ary;
  476. rt_kprintf("0x%08x 0x%08x 0x%08x %8d\n", p->key, p->addr, p->size, id);
  477. return 0;
  478. }
  479. /**
  480. * @brief Print information of all shared memory segments.
  481. *
  482. * This function prints the key, physical address, size and id of each
  483. * shared memory segment to the console. It is exported as the Finsh/Msh
  484. * command @c list_shm for debugging and inspection.
  485. */
  486. void list_shm(void)
  487. {
  488. rt_kprintf(" key paddr size id\n");
  489. rt_kprintf("---------- ---------- ---------- --------\n");
  490. rt_mm_lock();
  491. lwp_avl_traversal(shm_tree_key, _shm_info, NULL);
  492. rt_mm_unlock();
  493. }
  494. MSH_CMD_EXPORT(list_shm, show share memory info);
  495. #endif
  496. #endif