lwp_pid.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. * Add support for waitpid(options=WNOHANG)
  18. * 2023-11-16 xqyjlj Fix the case where pid is 0
  19. * 2023-11-17 xqyjlj add process group and session support
  20. * 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
  21. * Reimplement the waitpid with a wait queue method, and fixup problem
  22. * with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
  23. * process can be traced while waiter suspend
  24. * 2024-01-25 shell porting to new sched API
  25. */
  26. /* includes scheduler related API */
  27. #define __RT_IPC_SOURCE__
  28. /* for waitpid, we are compatible to GNU extension */
  29. #define _GNU_SOURCE
  30. #define DBG_TAG "lwp.pid"
  31. #define DBG_LVL DBG_INFO
  32. #include <rtdbg.h>
  33. #include "lwp_internal.h"
  34. #include <rthw.h>
  35. #include <rtthread.h>
  36. #include <dfs_file.h>
  37. #include <unistd.h>
  38. #include <stdio.h> /* rename() */
  39. #include <stdlib.h>
  40. #include <sys/stat.h>
  41. #include <sys/statfs.h> /* statfs() */
  42. #include <stdatomic.h>
  43. #ifdef ARCH_MM_MMU
  44. #include "lwp_user_mm.h"
  45. #endif
  46. #ifdef RT_USING_DFS_PROCFS
  47. #include "proc.h"
  48. #include "procfs.h"
  49. #endif
  50. #define PID_MAX 10000
  51. #define PID_CT_ASSERT(name, x) \
  52. struct assert_##name {char ary[2 * (x) - 1];}
  53. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  54. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  55. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  56. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  57. static int lwp_pid_ary_alloced = 0;
  58. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  59. static pid_t current_pid = 0;
  60. static struct rt_mutex pid_mtx;
  61. static struct rt_wqueue _pid_emptyq;
  62. /**
  63. * @brief Initialize PID management structures
  64. *
  65. * @return int Always returns 0.
  66. */
  67. int lwp_pid_init(void)
  68. {
  69. rt_wqueue_init(&_pid_emptyq);
  70. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  71. return 0;
  72. }
  73. /**
  74. * @brief Wait for an empty PID slot to become available
  75. *
  76. * @param[in] wait_flags Wait mode flags (RT_INTERRUPTIBLE, RT_KILLABLE or RT_UNINTERRUPTIBLE)
  77. * @param[in] to Timeout value in ticks (RT_WAITING_FOREVER for no timeout)
  78. *
  79. * @return int Error code (0 on success, negative on error)
  80. */
  81. int lwp_pid_wait_for_empty(int wait_flags, rt_tick_t to)
  82. {
  83. int error;
  84. if (wait_flags == RT_INTERRUPTIBLE)
  85. {
  86. error = rt_wqueue_wait_interruptible(&_pid_emptyq, 0, to);
  87. }
  88. else
  89. {
  90. error = rt_wqueue_wait_killable(&_pid_emptyq, 0, to);
  91. }
  92. return error;
  93. }
  94. /**
  95. * @brief Acquire the PID management mutex lock
  96. *
  97. * @note This is a blocking call that will wait indefinitely for the lock
  98. */
  99. void lwp_pid_lock_take(void)
  100. {
  101. LWP_DEF_RETURN_CODE(rc);
  102. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  103. /* should never failed */
  104. RT_ASSERT(rc == RT_EOK);
  105. RT_UNUSED(rc);
  106. }
  107. /**
  108. * @brief Release the PID management mutex lock
  109. *
  110. * @note This function should be called after lwp_pid_lock_take()
  111. */
  112. void lwp_pid_lock_release(void)
  113. {
  114. /* should never failed */
  115. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  116. RT_ASSERT(0);
  117. }
  118. /**
  119. * @brief Parameter structure for PID iteration callback
  120. *
  121. * @note This structure holds the callback function and context data used when
  122. * iterating through process IDs.
  123. */
  124. struct pid_foreach_param
  125. {
  126. /**
  127. * @brief Callback function to execute for each PID
  128. * @param[in] pid The process ID being processed
  129. * @param[in,out] data User-provided context data
  130. * @return int Operation status (0 to continue, non-zero to stop)
  131. */
  132. int (*cb)(pid_t pid, void *data);
  133. void *data; /**< User-provided context data */
  134. };
  135. /**
  136. * @brief Callback function for PID iteration
  137. *
  138. * @param[in] node AVL tree node containing the PID
  139. * @param[in] data User-provided parameter structure
  140. *
  141. * @return int Operation status (0 to continue, non-zero to stop)
  142. */
  143. static int _before_cb(struct lwp_avl_struct *node, void *data)
  144. {
  145. struct pid_foreach_param *param = data;
  146. pid_t pid = node->avl_key;
  147. return param->cb(pid, param->data);
  148. }
  149. /**
  150. * @brief Iterate over all process IDs
  151. *
  152. * @param[in] cb Callback function to execute for each PID
  153. * @param[in] data User-provided context data
  154. *
  155. * @return int Error code (0 on success, negative on error)
  156. */
  157. int lwp_pid_for_each(int (*cb)(pid_t pid, void *data), void *data)
  158. {
  159. int error;
  160. struct pid_foreach_param buf =
  161. {
  162. .cb = cb,
  163. .data = data,
  164. };
  165. lwp_pid_lock_take();
  166. error = lwp_avl_traversal(lwp_pid_root, _before_cb, &buf);
  167. lwp_pid_lock_release();
  168. return error;
  169. }
  170. /**
  171. * @brief Get the PID array
  172. *
  173. * @return struct lwp_avl_struct* Pointer to the PID array
  174. */
  175. struct lwp_avl_struct *lwp_get_pid_ary(void)
  176. {
  177. return lwp_pid_ary;
  178. }
  179. /**
  180. * @brief Allocates a new PID while holding the PID management lock
  181. *
  182. * @return pid_t The newly allocated PID, or 0 if allocation failed
  183. *
  184. * @note This function attempts to allocate a new process ID (PID) from either:
  185. * 1. The free list (lwp_pid_free_head) if available
  186. * 2. The PID array (lwp_pid_ary) if within maximum limits
  187. *
  188. * It then searches for an unused PID in two ranges:
  189. * 1. From current_pid+1 to PID_MAX
  190. * 2. From 1 to current_pid (if first search fails)
  191. *
  192. * The allocated PID is inserted into the PID AVL tree (lwp_pid_root)
  193. * and current_pid is updated to the new PID.
  194. */
  195. static pid_t lwp_pid_get_locked(void)
  196. {
  197. struct lwp_avl_struct *p;
  198. pid_t pid = 0;
  199. p = lwp_pid_free_head;
  200. if (p)
  201. {
  202. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  203. }
  204. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  205. {
  206. p = lwp_pid_ary + lwp_pid_ary_alloced;
  207. lwp_pid_ary_alloced++;
  208. }
  209. if (p)
  210. {
  211. int found_noused = 0;
  212. RT_ASSERT(p->data == RT_NULL);
  213. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  214. {
  215. if (!lwp_avl_find(pid, lwp_pid_root))
  216. {
  217. found_noused = 1;
  218. break;
  219. }
  220. }
  221. if (!found_noused)
  222. {
  223. for (pid = 1; pid <= current_pid; pid++)
  224. {
  225. if (!lwp_avl_find(pid, lwp_pid_root))
  226. {
  227. found_noused = 1;
  228. break;
  229. }
  230. }
  231. }
  232. p->avl_key = pid;
  233. lwp_avl_insert(p, &lwp_pid_root);
  234. current_pid = pid;
  235. }
  236. return pid;
  237. }
  238. /**
  239. * @brief Release a PID back to the free list while holding the PID lock
  240. *
  241. * @param[in] pid The process ID to release (must not be 0)
  242. *
  243. * @note This function removes the specified PID from the active PID tree and
  244. * adds it to the free list. The operation is performed atomically while
  245. * holding the PID management lock.
  246. */
  247. static void lwp_pid_put_locked(pid_t pid)
  248. {
  249. struct lwp_avl_struct *p;
  250. if (pid == 0)
  251. {
  252. return;
  253. }
  254. p = lwp_avl_find(pid, lwp_pid_root);
  255. if (p)
  256. {
  257. p->data = RT_NULL;
  258. lwp_avl_remove(p, &lwp_pid_root);
  259. p->avl_right = lwp_pid_free_head;
  260. lwp_pid_free_head = p;
  261. }
  262. }
  263. #ifdef RT_USING_DFS_PROCFS
  264. /**
  265. * @brief Free the proc dentry for the given LWP
  266. *
  267. * @param[in] lwp The LWP whose proc dentry is to be freed
  268. */
  269. rt_inline void _free_proc_dentry(rt_lwp_t lwp)
  270. {
  271. char pid_str[64] = {0};
  272. rt_snprintf(pid_str, 64, "%d", lwp->pid);
  273. pid_str[63] = 0;
  274. proc_remove_dentry(pid_str, 0);
  275. }
  276. #else
  277. #define _free_proc_dentry(lwp)
  278. #endif
  279. /**
  280. * @brief Release a process ID and clean up associated resources
  281. *
  282. * @param[in,out] lwp The lightweight process whose PID should be released
  283. */
  284. void lwp_pid_put(struct rt_lwp *lwp)
  285. {
  286. _free_proc_dentry(lwp);
  287. lwp_pid_lock_take();
  288. lwp_pid_put_locked(lwp->pid);
  289. if (lwp_pid_root == AVL_EMPTY)
  290. {
  291. rt_wqueue_wakeup_all(&_pid_emptyq, RT_NULL);
  292. /* refuse any new pid allocation now */
  293. }
  294. else
  295. {
  296. lwp_pid_lock_release();
  297. }
  298. /* reset pid field */
  299. lwp->pid = 0;
  300. /* clear reference */
  301. lwp_ref_dec(lwp);
  302. }
  303. /**
  304. * @brief Set the LWP for a given PID while holding the PID lock
  305. *
  306. * @param[in] pid The process ID to set the LWP for
  307. * @param[in] lwp The LWP to associate with the PID
  308. *
  309. * @note This function associates the specified LWP with the given PID in the
  310. * PID AVL tree. It increments the LWP's reference count and updates the
  311. * proc filesystem entry if the PID is non-zero.
  312. */
  313. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  314. {
  315. struct lwp_avl_struct *p;
  316. p = lwp_avl_find(pid, lwp_pid_root);
  317. if (p)
  318. {
  319. p->data = lwp;
  320. lwp_ref_inc(lwp);
  321. #ifdef RT_USING_DFS_PROCFS
  322. if (pid)
  323. {
  324. proc_pid(pid);
  325. }
  326. #endif
  327. }
  328. }
  329. /**
  330. * @brief Close all open files for the given LWP
  331. *
  332. * @param[in] lwp The LWP whose files should be closed
  333. *
  334. * @note This function iterates through all file descriptors in the LWP's file
  335. * descriptor table, closing each open file. It is typically called when
  336. * an LWP is being destroyed or when the LWP is switching to a new thread.
  337. */
  338. static void __exit_files(struct rt_lwp *lwp)
  339. {
  340. int fd = lwp->fdt.maxfd - 1;
  341. while (fd >= 0)
  342. {
  343. struct dfs_file *d;
  344. d = lwp->fdt.fds[fd];
  345. if (d)
  346. {
  347. dfs_file_close(d);
  348. fdt_fd_release(&lwp->fdt, fd);
  349. }
  350. fd--;
  351. }
  352. }
  353. /**
  354. * @brief Initialize the user object lock for a lightweight process
  355. *
  356. * @param[in,out] lwp The lightweight process structure whose object lock needs initialization
  357. */
  358. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  359. {
  360. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  361. }
  362. /**
  363. * @brief Destroy the user object lock for a lightweight process
  364. *
  365. * @param[in,out] lwp The lightweight process structure whose object lock needs destruction
  366. */
  367. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  368. {
  369. rt_mutex_detach(&lwp->object_mutex);
  370. }
  371. /**
  372. * @brief Lock the user object lock for a lightweight process
  373. *
  374. * @param[in,out] lwp The lightweight process structure whose object lock needs locking
  375. */
  376. void lwp_user_object_lock(struct rt_lwp *lwp)
  377. {
  378. if (lwp)
  379. {
  380. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  381. }
  382. else
  383. {
  384. RT_ASSERT(0);
  385. }
  386. }
  387. /**
  388. * @brief Unlock the user object lock for a lightweight process
  389. *
  390. * @param[in,out] lwp The lightweight process structure whose object lock needs unlocking
  391. */
  392. void lwp_user_object_unlock(struct rt_lwp *lwp)
  393. {
  394. if (lwp)
  395. {
  396. rt_mutex_release(&lwp->object_mutex);
  397. }
  398. else
  399. {
  400. RT_ASSERT(0);
  401. }
  402. }
  403. /**
  404. * @brief Add an object to the user object list of a lightweight process
  405. *
  406. * @param[in,out] lwp The lightweight process structure whose object list needs updating
  407. * @param[in] object The object to be added to the LWP's object list
  408. *
  409. * @return int Returns 0 on success, or -1 on failure
  410. */
  411. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  412. {
  413. int ret = -1;
  414. if (lwp && object)
  415. {
  416. lwp_user_object_lock(lwp);
  417. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  418. {
  419. struct lwp_avl_struct *node;
  420. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  421. if (node)
  422. {
  423. rt_atomic_add(&object->lwp_ref_count, 1);
  424. node->avl_key = (avl_key_t)object;
  425. lwp_avl_insert(node, &lwp->object_root);
  426. ret = 0;
  427. }
  428. }
  429. lwp_user_object_unlock(lwp);
  430. }
  431. return ret;
  432. }
  433. /**
  434. * @brief Delete a node from the user object list of a lightweight process
  435. *
  436. * @param[in,out] lwp The lightweight process structure whose object list needs updating
  437. * @param[in] node The node to be deleted from the LWP's object list
  438. *
  439. * @return rt_err_t Returns 0 on success, or -1 on failure
  440. */
  441. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  442. {
  443. rt_err_t ret = -1;
  444. rt_object_t object;
  445. if (!lwp || !node)
  446. {
  447. return ret;
  448. }
  449. object = (rt_object_t)node->avl_key;
  450. object->lwp_ref_count--;
  451. if (object->lwp_ref_count == 0)
  452. {
  453. /* remove from kernel object list */
  454. switch (object->type)
  455. {
  456. case RT_Object_Class_Semaphore:
  457. ret = rt_sem_delete((rt_sem_t)object);
  458. break;
  459. case RT_Object_Class_Mutex:
  460. ret = rt_mutex_delete((rt_mutex_t)object);
  461. break;
  462. case RT_Object_Class_Event:
  463. ret = rt_event_delete((rt_event_t)object);
  464. break;
  465. case RT_Object_Class_MailBox:
  466. ret = rt_mb_delete((rt_mailbox_t)object);
  467. break;
  468. case RT_Object_Class_MessageQueue:
  469. ret = rt_mq_delete((rt_mq_t)object);
  470. break;
  471. case RT_Object_Class_Timer:
  472. ret = rt_timer_delete((rt_timer_t)object);
  473. break;
  474. case RT_Object_Class_Custom:
  475. ret = rt_custom_object_destroy(object);
  476. break;
  477. default:
  478. LOG_E("input object type(%d) error", object->type);
  479. break;
  480. }
  481. }
  482. else
  483. {
  484. ret = 0;
  485. }
  486. lwp_avl_remove(node, &lwp->object_root);
  487. rt_free(node);
  488. return ret;
  489. }
  490. /**
  491. * @brief Delete an object from the user object list of a lightweight process
  492. *
  493. * @param[in,out] lwp The lightweight process structure whose object list needs updating
  494. * @param[in] object The object to be deleted from the LWP's object list
  495. *
  496. * @return rt_err_t Returns 0 on success, or -1 on failure
  497. */
  498. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  499. {
  500. rt_err_t ret = -1;
  501. if (lwp && object)
  502. {
  503. struct lwp_avl_struct *node;
  504. lwp_user_object_lock(lwp);
  505. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  506. ret = _object_node_delete(lwp, node);
  507. lwp_user_object_unlock(lwp);
  508. }
  509. return ret;
  510. }
  511. /**
  512. * @brief Clear all objects from the user object list of a lightweight process
  513. *
  514. * @param[in,out] lwp The lightweight process structure whose object list needs clearing
  515. */
  516. void lwp_user_object_clear(struct rt_lwp *lwp)
  517. {
  518. struct lwp_avl_struct *node;
  519. lwp_user_object_lock(lwp);
  520. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  521. {
  522. _object_node_delete(lwp, node);
  523. }
  524. lwp_user_object_unlock(lwp);
  525. }
  526. /**
  527. * @brief Callback function for duplicating objects in AVL tree traversal
  528. *
  529. * @param[in] node The AVL tree node containing the object to duplicate
  530. * @param[in,out] arg The destination lightweight process (cast to struct rt_lwp *)
  531. * @return int Always returns 0 to continue traversal
  532. *
  533. * @note This function is used as a callback during AVL tree traversal
  534. * to duplicate objects from one process to another
  535. */
  536. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  537. {
  538. rt_object_t object;
  539. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  540. object = (rt_object_t)node->avl_key;
  541. lwp_user_object_add(dst_lwp, object);
  542. return 0;
  543. }
  544. /**
  545. * @brief Duplicate all objects from one lightweight process to another
  546. *
  547. * @param[in,out] dst_lwp The destination lightweight process structure
  548. * @param[in] src_lwp The source lightweight process structure
  549. *
  550. * @note This function duplicates all objects from the source LWP to the destination LWP
  551. * by traversing the source LWP's object list and adding each object to the destination LWP
  552. */
  553. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  554. {
  555. lwp_user_object_lock(src_lwp);
  556. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  557. lwp_user_object_unlock(src_lwp);
  558. }
  559. /**
  560. * @brief Create a new lightweight process (LWP)
  561. *
  562. * @param[in] flags Creation flags that control LWP behavior
  563. * - LWP_CREATE_FLAG_NOTRACE_EXEC: Don't trace exec operations
  564. * - LWP_CREATE_FLAG_ALLOC_PID: Allocate a PID for the LWP
  565. * - LWP_CREATE_FLAG_INIT_USPACE: Initialize user space
  566. *
  567. * @return Pointer to newly created LWP structure on success
  568. * - RT_NULL on failure (pid allocation failed or user space init failed)
  569. */
  570. rt_lwp_t lwp_create(rt_base_t flags)
  571. {
  572. pid_t pid;
  573. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  574. if (new_lwp)
  575. {
  576. /* minimal setup of lwp object */
  577. new_lwp->ref = 1;
  578. #ifdef RT_USING_SMP
  579. new_lwp->bind_cpu = RT_CPUS_NR;
  580. #endif
  581. new_lwp->exe_file = RT_NULL;
  582. rt_list_init(&new_lwp->t_grp);
  583. rt_list_init(&new_lwp->pgrp_node);
  584. rt_list_init(&new_lwp->timer);
  585. lwp_user_object_lock_init(new_lwp);
  586. rt_wqueue_init(&new_lwp->wait_queue);
  587. rt_wqueue_init(&new_lwp->waitpid_waiters);
  588. lwp_signal_init(&new_lwp->signal);
  589. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  590. if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
  591. new_lwp->did_exec = RT_TRUE;
  592. /* lwp with pid */
  593. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  594. {
  595. lwp_pid_lock_take();
  596. pid = lwp_pid_get_locked();
  597. if (pid == 0)
  598. {
  599. lwp_user_object_lock_destroy(new_lwp);
  600. rt_free(new_lwp);
  601. new_lwp = RT_NULL;
  602. LOG_E("%s: pid slot fulled", __func__);
  603. }
  604. else
  605. {
  606. new_lwp->pid = pid;
  607. lwp_pid_set_lwp_locked(pid, new_lwp);
  608. }
  609. lwp_pid_lock_release();
  610. }
  611. rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
  612. if (flags & LWP_CREATE_FLAG_INIT_USPACE)
  613. {
  614. rt_err_t error = lwp_user_space_init(new_lwp, 0);
  615. if (error)
  616. {
  617. lwp_pid_put(new_lwp);
  618. lwp_user_object_lock_destroy(new_lwp);
  619. rt_free(new_lwp);
  620. new_lwp = RT_NULL;
  621. LOG_E("%s: failed to initialize user space", __func__);
  622. }
  623. }
  624. }
  625. LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
  626. return new_lwp;
  627. }
  628. /**
  629. * @brief Free all resources associated with a lightweight process (LWP)
  630. *
  631. * @param[in,out] lwp Lightweight process to be freed
  632. *
  633. * @note when reference is 0, a lwp can be released
  634. */
  635. void lwp_free(struct rt_lwp* lwp)
  636. {
  637. rt_processgroup_t group = RT_NULL;
  638. if (lwp == RT_NULL)
  639. {
  640. return;
  641. }
  642. /**
  643. * Brief: Recycle the lwp when reference is cleared
  644. *
  645. * Note: Critical Section
  646. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  647. * all the reference is clear)
  648. */
  649. LOG_D("lwp free: %p", lwp);
  650. rt_free(lwp->exe_file);
  651. group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
  652. if (group)
  653. lwp_pgrp_remove(group, lwp);
  654. LWP_LOCK(lwp);
  655. if (lwp->args != RT_NULL)
  656. {
  657. #ifndef ARCH_MM_MMU
  658. lwp->args_length = RT_NULL;
  659. #ifndef ARCH_MM_MPU
  660. rt_free(lwp->args);
  661. #endif /* not defined ARCH_MM_MPU */
  662. #endif /* ARCH_MM_MMU */
  663. lwp->args = RT_NULL;
  664. }
  665. lwp_user_object_clear(lwp);
  666. lwp_user_object_lock_destroy(lwp);
  667. /* free data section */
  668. if (lwp->data_entry != RT_NULL)
  669. {
  670. #ifdef ARCH_MM_MMU
  671. rt_free_align(lwp->data_entry);
  672. #else
  673. #ifdef ARCH_MM_MPU
  674. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  675. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  676. #else
  677. rt_free_align(lwp->data_entry);
  678. #endif /* ARCH_MM_MPU */
  679. #endif /* ARCH_MM_MMU */
  680. lwp->data_entry = RT_NULL;
  681. }
  682. /* free text section */
  683. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  684. {
  685. if (lwp->text_entry)
  686. {
  687. LOG_D("lwp text free: %p", lwp->text_entry);
  688. #ifndef ARCH_MM_MMU
  689. rt_free((void*)lwp->text_entry);
  690. #endif /* not defined ARCH_MM_MMU */
  691. lwp->text_entry = RT_NULL;
  692. }
  693. }
  694. #ifdef ARCH_MM_MMU
  695. lwp_unmap_user_space(lwp);
  696. #endif
  697. timer_list_free(&lwp->timer);
  698. LWP_UNLOCK(lwp);
  699. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  700. rt_mutex_detach(&lwp->lwp_lock);
  701. /**
  702. * pid must have release before enter lwp_free()
  703. * otherwise this is a data racing
  704. */
  705. RT_ASSERT(lwp->pid == 0);
  706. rt_free(lwp);
  707. }
  708. /**
  709. * @brief Handle thread exit cleanup for a lightweight process
  710. *
  711. * @param[in,out] lwp Lightweight process containing the thread
  712. * @param[in,out] thread Thread to be exited
  713. *
  714. * @note This function performs the following operations:
  715. * - Updates process resource usage statistics (system and user time)
  716. * - Removes thread from process sibling list
  717. * - Handles futex robust list cleanup
  718. * - Releases thread ID (TID)
  719. * - Deletes the thread
  720. * - Enters infinite loop (noreturn function)
  721. */
  722. rt_inline rt_noreturn
  723. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  724. {
  725. LWP_LOCK(lwp);
  726. lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
  727. lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  728. lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
  729. lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  730. rt_list_remove(&thread->sibling);
  731. LWP_UNLOCK(lwp);
  732. lwp_futex_exit_robust_list(thread);
  733. /**
  734. * Note: the tid tree always hold a reference to thread, hence the tid must
  735. * be release before cleanup of thread
  736. */
  737. lwp_tid_put(thread->tid);
  738. thread->tid = 0;
  739. rt_thread_delete(thread);
  740. rt_schedule();
  741. while (1) ;
  742. }
  743. /**
  744. * @brief Clear child thread ID notification for parent process
  745. *
  746. * @param[in,out] thread Thread whose child tid needs to be cleared
  747. *
  748. * @note This function performs the following operations:
  749. * - Checks if clear_child_tid pointer is set
  750. * - Writes 0 to user-space memory location if set
  751. * - Wakes any futex waiters on that location
  752. * - Clears the thread's clear_child_tid pointer
  753. */
  754. rt_inline void _clear_child_tid(rt_thread_t thread)
  755. {
  756. if (thread->clear_child_tid)
  757. {
  758. int t = 0;
  759. int *clear_child_tid = thread->clear_child_tid;
  760. thread->clear_child_tid = RT_NULL;
  761. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  762. sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
  763. }
  764. }
  765. /**
  766. * @brief Terminates a lightweight process and cleans up its resources
  767. *
  768. * @param[in,out] lwp The lightweight process to terminate
  769. * @param[in] status The exit status to set for the process
  770. *
  771. * @note This function handles both MMU and non-MMU architectures differently.
  772. * For MMU architectures, it clears child TID, sets exit status, and terminates.
  773. * For non-MMU, it terminates the main thread and all subthreads.
  774. */
  775. void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
  776. {
  777. rt_thread_t thread;
  778. if (!lwp)
  779. {
  780. LOG_W("%s: lwp should not be null", __func__);
  781. return ;
  782. }
  783. thread = rt_thread_self();
  784. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  785. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  786. #ifdef ARCH_MM_MMU
  787. _clear_child_tid(thread);
  788. LWP_LOCK(lwp);
  789. /**
  790. * Brief: only one thread should calls exit_group(),
  791. * but we can not ensured that during run-time
  792. */
  793. lwp->lwp_status = status;
  794. LWP_UNLOCK(lwp);
  795. lwp_terminate(lwp);
  796. #else
  797. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  798. if (main_thread == tid)
  799. {
  800. rt_thread_t sub_thread;
  801. rt_list_t *list;
  802. lwp_terminate(lwp);
  803. /* delete all subthread */
  804. while ((list = tid->sibling.prev) != &lwp->t_grp)
  805. {
  806. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  807. rt_list_remove(&sub_thread->sibling);
  808. rt_thread_delete(sub_thread);
  809. }
  810. lwp->lwp_ret = value;
  811. }
  812. #endif /* ARCH_MM_MMU */
  813. _thread_exit(lwp, thread);
  814. }
  815. /**
  816. * @brief Handles thread exit for a lightweight process
  817. *
  818. * @param[in,out] thread The thread that is exiting
  819. * @param[in] status The exit status to set for the thread
  820. *
  821. * @note For MMU architectures, this function checks if the exiting thread is the
  822. * header thread (main thread). If so, it treats it as a process exit.
  823. */
  824. void lwp_thread_exit(rt_thread_t thread, int status)
  825. {
  826. rt_thread_t header_thr;
  827. struct rt_lwp *lwp;
  828. LOG_D("%s", __func__);
  829. RT_ASSERT(thread == rt_thread_self());
  830. lwp = (struct rt_lwp *)thread->lwp;
  831. RT_ASSERT(lwp != RT_NULL);
  832. #ifdef ARCH_MM_MMU
  833. _clear_child_tid(thread);
  834. LWP_LOCK(lwp);
  835. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  836. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  837. {
  838. /**
  839. * if thread exit, treated as process exit normally.
  840. * This is reasonable since trap event is exited through lwp_exit()
  841. */
  842. lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
  843. LWP_UNLOCK(lwp);
  844. lwp_terminate(lwp);
  845. }
  846. else
  847. {
  848. LWP_UNLOCK(lwp);
  849. }
  850. #endif /* ARCH_MM_MMU */
  851. _thread_exit(lwp, thread);
  852. }
  853. /**
  854. * @brief Increments the reference count of a lightweight process
  855. *
  856. * @param[in,out] lwp The lightweight process whose reference count is to be incremented
  857. *
  858. * @return int The updated reference count after incrementing
  859. *
  860. * @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock.
  861. */
  862. int lwp_ref_inc(struct rt_lwp *lwp)
  863. {
  864. int ref;
  865. ref = rt_atomic_add(&lwp->ref, 1);
  866. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  867. return ref;
  868. }
  869. /**
  870. * @brief Decrements the reference count of a lightweight process (LWP)
  871. *
  872. * @param[in,out] lwp The lightweight process whose reference count to decrement
  873. *
  874. * @return int The reference count before decrementing
  875. *
  876. * @note This function atomically decrements the LWP's reference count.
  877. * When the count reaches 1 (meaning this was the last reference):
  878. * - For debug builds, sends a message to GDB server channel
  879. * - For non-MMU architectures with shared memory support, frees shared memory
  880. * - Calls lwp_free() to release all LWP resources
  881. */
  882. int lwp_ref_dec(struct rt_lwp *lwp)
  883. {
  884. int ref;
  885. ref = rt_atomic_add(&lwp->ref, -1);
  886. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  887. if (ref == 1)
  888. {
  889. struct rt_channel_msg msg;
  890. if (lwp->debug)
  891. {
  892. memset(&msg, 0, sizeof msg);
  893. rt_raw_channel_send(gdb_server_channel(), &msg);
  894. }
  895. #ifndef ARCH_MM_MMU
  896. #ifdef RT_LWP_USING_SHM
  897. lwp_shm_lwp_free(lwp);
  898. #endif /* RT_LWP_USING_SHM */
  899. #endif /* not defined ARCH_MM_MMU */
  900. lwp_free(lwp);
  901. }
  902. else
  903. {
  904. /* reference must be a positive integer */
  905. RT_ASSERT(ref > 1);
  906. }
  907. return ref;
  908. }
  909. /**
  910. * @brief Retrieves a lightweight process (LWP) by its PID while holding the lock
  911. *
  912. * @param[in] pid The process ID to look up
  913. *
  914. * @return struct rt_lwp* Pointer to the LWP structure if found, RT_NULL otherwise
  915. *
  916. * @note This function performs a raw lookup in the PID AVL tree while assuming
  917. * the caller already holds the necessary locks. It's a lower-level version
  918. * of lwp_from_pid() that doesn't handle locking internally.
  919. */
  920. struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
  921. {
  922. struct lwp_avl_struct *p;
  923. struct rt_lwp *lwp = RT_NULL;
  924. p = lwp_avl_find(pid, lwp_pid_root);
  925. if (p)
  926. {
  927. lwp = (struct rt_lwp *)p->data;
  928. }
  929. return lwp;
  930. }
  931. /**
  932. * @brief Retrieves a lightweight process (LWP) by PID with lock handling
  933. *
  934. * @param[in] pid The process ID to look up (0 means current process)
  935. *
  936. * @return struct rt_lwp* Pointer to the LWP structure if found, current LWP if pid=0
  937. *
  938. * @note This is a convenience wrapper that:
  939. * - If pid is non-zero, calls lwp_from_pid_raw_locked()
  940. * - If pid is zero, returns the current LWP via lwp_self()
  941. */
  942. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  943. {
  944. struct rt_lwp* lwp;
  945. lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
  946. return lwp;
  947. }
  948. /**
  949. * @brief Converts a lightweight process (LWP) to its PID
  950. *
  951. * @param[in] lwp The LWP structure to convert
  952. *
  953. * @return pid_t The PID of the LWP, or 0 if lwp is NULL
  954. */
  955. pid_t lwp_to_pid(struct rt_lwp* lwp)
  956. {
  957. if (!lwp)
  958. {
  959. return 0;
  960. }
  961. return lwp->pid;
  962. }
  963. /**
  964. * @brief Convert process ID to process name
  965. *
  966. * @param[in] pid Process ID to look up
  967. *
  968. * @return char* Pointer to the process name (without path) if found, or RT_NULL if not found
  969. */
  970. char* lwp_pid2name(int32_t pid)
  971. {
  972. struct rt_lwp *lwp;
  973. char* process_name = RT_NULL;
  974. lwp_pid_lock_take();
  975. lwp = lwp_from_pid_locked(pid);
  976. if (lwp)
  977. {
  978. process_name = strrchr(lwp->cmd, '/');
  979. process_name = process_name? process_name + 1: lwp->cmd;
  980. }
  981. lwp_pid_lock_release();
  982. return process_name;
  983. }
  984. /**
  985. * @brief Convert process name to process ID
  986. *
  987. * @param[in] name Process name to look up (without path)
  988. *
  989. * @return pid_t Process ID if found, or 0 if not found
  990. *
  991. * @note The function only returns PIDs for processes whose main thread
  992. * is not in CLOSED state.
  993. */
  994. pid_t lwp_name2pid(const char *name)
  995. {
  996. int idx;
  997. pid_t pid = 0;
  998. rt_thread_t main_thread;
  999. char* process_name = RT_NULL;
  1000. rt_sched_lock_level_t slvl;
  1001. lwp_pid_lock_take();
  1002. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  1003. {
  1004. /* 0 is reserved */
  1005. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  1006. if (lwp)
  1007. {
  1008. process_name = strrchr(lwp->exe_file, '/');
  1009. process_name = process_name? process_name + 1: lwp->cmd;
  1010. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  1011. {
  1012. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  1013. rt_sched_lock(&slvl);
  1014. if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
  1015. {
  1016. pid = lwp->pid;
  1017. }
  1018. rt_sched_unlock(slvl);
  1019. }
  1020. }
  1021. }
  1022. lwp_pid_lock_release();
  1023. return pid;
  1024. }
  1025. /**
  1026. * @brief Get the process ID of the current lightweight process (LWP)
  1027. *
  1028. * @return pid_t Process ID of the current LWP
  1029. */
  1030. int lwp_getpid(void)
  1031. {
  1032. rt_lwp_t lwp = lwp_self();
  1033. return lwp ? lwp->pid : 1;
  1034. /* return ((struct rt_lwp *)rt_thread_self()->lwp)->pid; */
  1035. }
  1036. /**
  1037. * @brief Update resource usage statistics from child to parent process
  1038. *
  1039. * @param[in] child Child process containing resource usage data
  1040. * @param[in] self_lwp Current lightweight process (parent)
  1041. * @param[in,out] uru Pointer to user-space rusage structure to update
  1042. *
  1043. * @note This function:
  1044. * - Copies system and user time from child process
  1045. * - Only updates if uru pointer is not NULL
  1046. * - Uses lwp_data_put to safely write to user-space memory
  1047. */
  1048. rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
  1049. {
  1050. struct rusage rt_rusage;
  1051. if (uru != RT_NULL)
  1052. {
  1053. rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
  1054. rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
  1055. rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
  1056. rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
  1057. lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
  1058. }
  1059. }
  1060. /**
  1061. * @brief Collects statistics and reaps a terminated child process
  1062. *
  1063. * @param[in] child The child process to collect statistics from
  1064. * @param[in] cur_thr The current thread context
  1065. * @param[in,out] self_lwp The parent process (current LWP)
  1066. * @param[out] ustatus Pointer to store child's exit status (optional)
  1067. * @param[in] options Wait options (e.g., WNOWAIT)
  1068. * @param[in,out] uru Pointer to resource usage structure to update (optional)
  1069. *
  1070. * @return rt_err_t Returns RT_EOK on success
  1071. *
  1072. * @note Updates resource usage statistics and optionally reaps the child process
  1073. */
  1074. static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
  1075. struct rt_lwp *self_lwp, int *ustatus,
  1076. int options, struct rusage *uru)
  1077. {
  1078. int lwp_stat = child->lwp_status;
  1079. /* report statistical data to process */
  1080. _update_ru(child, self_lwp, uru);
  1081. if (child->terminated && !(options & WNOWAIT))
  1082. {
  1083. /** Reap the child process if it's exited */
  1084. LOG_D("func %s: child detached", __func__);
  1085. lwp_pid_put(child);
  1086. lwp_children_unregister(self_lwp, child);
  1087. }
  1088. if (ustatus)
  1089. lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
  1090. return RT_EOK;
  1091. }
  1092. #define HAS_CHILD_BUT_NO_EVT (-1024)
  1093. /**
  1094. * @brief Queries process state change events from a child process
  1095. *
  1096. * @param[in] child The child process to query
  1097. * @param[in] cur_thr The current thread context
  1098. * @param[in] self_lwp The parent process (current LWP)
  1099. * @param[in] options Wait options (e.g., WSTOPPED)
  1100. * @param[out] status Pointer to store child's status (optional)
  1101. *
  1102. * @return sysret_t Returns child PID if event found, or HAS_CHILD_BUT_NO_EVT
  1103. *
  1104. * @note Checks for termination or stopped state changes in child process
  1105. */
  1106. static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
  1107. int options, int *status)
  1108. {
  1109. sysret_t rc;
  1110. LWP_LOCK(child);
  1111. if (child->terminated)
  1112. {
  1113. rc = child->pid;
  1114. }
  1115. else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
  1116. {
  1117. child->wait_reap_stp = 1;
  1118. rc = child->pid;
  1119. }
  1120. else
  1121. {
  1122. rc = HAS_CHILD_BUT_NO_EVT;
  1123. }
  1124. LWP_UNLOCK(child);
  1125. LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
  1126. return rc;
  1127. }
  1128. /**
  1129. * @brief Verifies and reaps a child process if conditions are met
  1130. *
  1131. * @param[in] cur_thr Current thread context
  1132. * @param[in] self_lwp Parent process (current LWP)
  1133. * @param[in] wait_pid PID of child process to verify
  1134. * @param[in] options Wait options (e.g., WNOHANG)
  1135. * @param[out] ustatus Pointer to store child's exit status (optional)
  1136. * @param[in,out] uru Pointer to resource usage structure (optional)
  1137. *
  1138. * @return pid_t Returns child PID if valid and event found, error code otherwise
  1139. *
  1140. * @note Verifies child-parent relationship and checks for termination/stopped state
  1141. */
  1142. static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  1143. pid_t wait_pid, int options, int *ustatus,
  1144. struct rusage *uru)
  1145. {
  1146. sysret_t rc;
  1147. struct rt_lwp *child;
  1148. /* check if pid is reference to a valid child */
  1149. lwp_pid_lock_take();
  1150. child = lwp_from_pid_locked(wait_pid);
  1151. if (!child)
  1152. rc = -EINVAL;
  1153. else if (child->parent != self_lwp)
  1154. rc = -ESRCH;
  1155. else
  1156. rc = wait_pid;
  1157. lwp_pid_lock_release();
  1158. if (rc > 0)
  1159. {
  1160. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  1161. if (rc > 0)
  1162. {
  1163. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  1164. }
  1165. }
  1166. return rc;
  1167. }
  1168. /**
  1169. * @brief Reaps any child process with given pair_pgid that has terminated or stopped
  1170. *
  1171. * @param[in] cur_thr Current thread context
  1172. * @param[in] self_lwp Parent process (current LWP)
  1173. * @param[in] pair_pgid Process group ID to match (0 for any)
  1174. * @param[in] options Wait options (e.g., WNOHANG)
  1175. * @param[out] ustatus Pointer to store child's exit status (optional)
  1176. * @param[in,out] uru Pointer to resource usage structure (optional)
  1177. *
  1178. * @return pid_t Returns child PID if found and event occurred, error code otherwise
  1179. *
  1180. * @note Iterates through child processes to find one that matches given pair_pgid
  1181. */
  1182. static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
  1183. int options, int *ustatus, struct rusage *uru)
  1184. {
  1185. sysret_t rc = -ECHILD;
  1186. struct rt_lwp *child;
  1187. LWP_LOCK(self_lwp);
  1188. child = self_lwp->first_child;
  1189. /* find a exited child if any */
  1190. while (child)
  1191. {
  1192. if (pair_pgid && child->pgid != pair_pgid)
  1193. continue;
  1194. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  1195. if (rc > 0)
  1196. break;
  1197. child = child->sibling;
  1198. }
  1199. LWP_UNLOCK(self_lwp);
  1200. if (rc > 0)
  1201. {
  1202. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  1203. }
  1204. return rc;
  1205. }
  1206. /**
  1207. * @brief Wakes up processes waiting for a child process status change
  1208. *
  1209. * @param[in] parent Parent process to notify
  1210. * @param[in] self_lwp Child process that triggered the wakeup
  1211. *
  1212. * @return rt_err_t Returns RT_EOK on success
  1213. *
  1214. * @note Uses wait queue to notify parent process about child status changes
  1215. */
  1216. rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
  1217. {
  1218. /* waker provide the message mainly through its lwp_status */
  1219. rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
  1220. return RT_EOK;
  1221. }
  1222. /**
  1223. * @brief Waitpid handle structure for process status change notifications
  1224. *
  1225. * @note This structure is used to manage wait queue entries for processes waiting
  1226. * for child process status changes.
  1227. */
  1228. struct waitpid_handle {
  1229. struct rt_wqueue_node wq_node; /**< Wait queue node for process status change notifications */
  1230. int options; /**< Wait options (e.g., WNOHANG, WUNTRACED) */
  1231. rt_lwp_t waker_lwp; /**< LWP that triggered the wakeup */
  1232. };
  1233. /**
  1234. * @brief Filter function for wait queue to determine if a process status change event should be accepted
  1235. *
  1236. * @param[in] wait_node Wait queue node containing filter criteria
  1237. * @param[in] key Pointer to the lightweight process (waker_lwp) triggering the event
  1238. *
  1239. * @return int 0 if event should be accepted (matches criteria), 1 if event should be discarded
  1240. *
  1241. * @note The function handles three cases for process matching:
  1242. * - Positive destiny: Exact PID match
  1243. * - destiny == -1: Any child process of waiter
  1244. * - destiny == 0/-pgid: Process group matching
  1245. */
  1246. static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
  1247. {
  1248. int can_accept_evt = 0;
  1249. rt_thread_t waiter = wait_node->polling_thread;
  1250. pid_t destiny = (pid_t)wait_node->key;
  1251. rt_lwp_t waker_lwp = key;
  1252. struct waitpid_handle *handle;
  1253. rt_ubase_t options;
  1254. handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
  1255. RT_ASSERT(waiter != RT_NULL);
  1256. options = handle->options;
  1257. /* filter out if waker is not the one */
  1258. if (destiny > 0)
  1259. {
  1260. /**
  1261. * in waitpid immediately return routine, we already do the check
  1262. * that pid is one of the child process of waiting thread
  1263. */
  1264. can_accept_evt = waker_lwp->pid == destiny;
  1265. }
  1266. else if (destiny == -1)
  1267. {
  1268. can_accept_evt = waker_lwp->parent == waiter->lwp;
  1269. }
  1270. else
  1271. {
  1272. /* destiny == 0 || destiny == -pgid */
  1273. pid_t waiter_pgid;
  1274. if (destiny == 0)
  1275. {
  1276. waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
  1277. }
  1278. else
  1279. {
  1280. waiter_pgid = -destiny;
  1281. }
  1282. can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
  1283. }
  1284. /* filter out if event is not desired */
  1285. if (can_accept_evt)
  1286. {
  1287. if ((options & WEXITED) && waker_lwp->terminated)
  1288. can_accept_evt = 1;
  1289. else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
  1290. can_accept_evt = 1;
  1291. else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
  1292. can_accept_evt = 1;
  1293. else
  1294. can_accept_evt = 0;
  1295. }
  1296. /* setup message for waiter if accepted */
  1297. if (can_accept_evt)
  1298. handle->waker_lwp = waker_lwp;
  1299. /* 0 if event is accepted, otherwise discard */
  1300. return !can_accept_evt;
  1301. }
  1302. /**
  1303. * @brief Wait for a child process status change event
  1304. *
  1305. * @param[in] cur_thr Current thread context that will be suspended
  1306. * @param[in] self_lwp Lightweight process (parent) waiting for the event
  1307. * @param[in,out] handle Waitpid handle containing filter criteria and wait queue node
  1308. * @param[in] destiny Process ID or process group to wait for (-1 for any child, -pgid for process group)
  1309. *
  1310. * @return rt_err_t RT_EOK on success, negative error code on failure
  1311. *
  1312. * @note This function suspends the current thread to wait for a child process status change
  1313. * event that matches the specified criteria (process ID or process group).
  1314. */
  1315. static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  1316. struct waitpid_handle *handle, pid_t destiny)
  1317. {
  1318. rt_err_t ret;
  1319. /* current context checking */
  1320. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  1321. handle->wq_node.polling_thread = cur_thr;
  1322. handle->wq_node.key = destiny;
  1323. handle->wq_node.wakeup = _waitq_filter;
  1324. handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
  1325. rt_list_init(&handle->wq_node.list);
  1326. cur_thr->error = RT_EOK;
  1327. LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
  1328. rt_enter_critical();
  1329. ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  1330. if (ret == RT_EOK)
  1331. {
  1332. rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
  1333. rt_exit_critical();
  1334. rt_schedule();
  1335. ret = cur_thr->error;
  1336. /**
  1337. * cur_thr error is a positive value, but some legacy implementation
  1338. * use a negative one. So we check to avoid errors
  1339. */
  1340. ret = ret > 0 ? -ret : ret;
  1341. /**
  1342. * we dont rely on this actually, but we cleanup it since wakeup API
  1343. * set this up durint operation, and this will cause some messy condition
  1344. */
  1345. handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
  1346. rt_wqueue_remove(&handle->wq_node);
  1347. }
  1348. else
  1349. {
  1350. /* failed to suspend, return immediately with failure */
  1351. rt_exit_critical();
  1352. }
  1353. return ret;
  1354. }
  1355. /**
  1356. * @brief Wait for and reap a child process status change
  1357. *
  1358. * @param[in] cur_thr Current thread context
  1359. * @param[in] self_lwp Lightweight process (parent) waiting for the child
  1360. * @param[in] pid Process ID to wait for (-1 for any child, -pgid for process group)
  1361. * @param[in] options Wait options (WNOHANG, WUNTRACED, etc.)
  1362. * @param[out] ustatus Pointer to store child exit status
  1363. * @param[in,out] uru Pointer to resource usage structure to update
  1364. *
  1365. * @return sysret_t PID of the child process that changed status, or error code
  1366. *
  1367. * @note The function:
  1368. * - Uses _wait_for_event to wait for status changes
  1369. * - Calls _stats_and_reap_child if a matching child is found
  1370. */
  1371. static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
  1372. int options, int *ustatus, struct rusage *uru)
  1373. {
  1374. sysret_t rc;
  1375. struct waitpid_handle handle;
  1376. rt_lwp_t waker;
  1377. /* wait for SIGCHLD or other async events */
  1378. handle.options = options;
  1379. handle.waker_lwp = 0;
  1380. rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
  1381. waker = handle.waker_lwp;
  1382. if (waker != RT_NULL)
  1383. {
  1384. rc = waker->pid;
  1385. /* check out if any process exited */
  1386. LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
  1387. _stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
  1388. }
  1389. /**
  1390. * else if (rc != RT_EOK)
  1391. * unable to do a suspend, or wakeup unexpectedly
  1392. * -> then returned a failure
  1393. */
  1394. return rc;
  1395. }
  1396. /**
  1397. * @brief Wait for process termination and return status
  1398. *
  1399. * @param[in] pid Process ID to wait for:
  1400. * >0 - specific child process
  1401. * -1 - any child process
  1402. * -pgid - any child in process group
  1403. * 0 - any child in caller's process group
  1404. * @param[out] status Pointer to store child exit status (optional)
  1405. * @param[in] options Wait options (WNOHANG, WUNTRACED, etc.)
  1406. * @param[in,out] ru Pointer to resource usage structure (optional)
  1407. *
  1408. * @return pid_t PID of the child that changed state, or:
  1409. * -1 on error
  1410. * 0 if WNOHANG and no child status available
  1411. *
  1412. * @note The function handles three cases:
  1413. * - Specific PID wait (pid > 0)
  1414. * - Any child wait (pid == -1)
  1415. * - Process group wait (pid == 0 or pid < -1)
  1416. */
  1417. pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
  1418. {
  1419. pid_t rc = -1;
  1420. struct rt_thread *cur_thr;
  1421. struct rt_lwp *self_lwp;
  1422. cur_thr = rt_thread_self();
  1423. self_lwp = lwp_self();
  1424. if (!cur_thr || !self_lwp)
  1425. {
  1426. rc = -EINVAL;
  1427. }
  1428. else
  1429. {
  1430. /* check if able to reap desired child immediately */
  1431. if (pid > 0)
  1432. {
  1433. /* if pid is child then try to reap it */
  1434. rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  1435. }
  1436. else if (pid == -1)
  1437. {
  1438. /* any terminated child */
  1439. rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
  1440. }
  1441. else
  1442. {
  1443. /**
  1444. * (pid < -1 || pid == 0)
  1445. * any terminated child with matched pgid
  1446. */
  1447. pid_t pair_pgid;
  1448. if (pid == 0)
  1449. {
  1450. pair_pgid = lwp_pgid_get_byprocess(self_lwp);
  1451. }
  1452. else
  1453. {
  1454. pair_pgid = -pid;
  1455. }
  1456. rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
  1457. }
  1458. if (rc == HAS_CHILD_BUT_NO_EVT)
  1459. {
  1460. if (!(options & WNOHANG))
  1461. {
  1462. /* otherwise, arrange a suspend and wait for async event */
  1463. options |= WEXITED;
  1464. rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  1465. }
  1466. else
  1467. {
  1468. /**
  1469. * POSIX.1: If waitpid() was invoked with WNOHANG set in options,
  1470. * it has at least one child process specified by pid for which
  1471. * status is not available, and status is not available for any
  1472. * process specified by pid, 0 is returned
  1473. */
  1474. rc = 0;
  1475. }
  1476. }
  1477. else
  1478. {
  1479. RT_ASSERT(rc != 0);
  1480. }
  1481. }
  1482. LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
  1483. return rc;
  1484. }
  1485. /**
  1486. * @brief Waits for a child process to terminate
  1487. *
  1488. * @param[in] pid The process ID to wait for
  1489. * @param[out] status Pointer to store child exit status (optional)
  1490. * @param[in] options Wait options (e.g., WNOHANG, WUNTRACED)
  1491. *
  1492. * @return pid_t The process ID of the child whose state changed,
  1493. * -1 on error, or 0 if WNOHANG was specified and no child was available
  1494. *
  1495. * @note This is a wrapper function that calls lwp_waitpid with NULL for the resource usage parameter
  1496. */
  1497. pid_t waitpid(pid_t pid, int *status, int options)
  1498. {
  1499. return lwp_waitpid(pid, status, options, RT_NULL);
  1500. }
  1501. #ifdef RT_USING_FINSH
  1502. /**
  1503. * @brief Prints a line of dashes for visual separation
  1504. *
  1505. * @param[in] len Number of dashes to print
  1506. */
  1507. static void object_split(int len)
  1508. {
  1509. while (len--)
  1510. {
  1511. rt_kprintf("-");
  1512. }
  1513. }
  1514. /**
  1515. * @brief Prints detailed information about a thread
  1516. *
  1517. * @param[in] thread Pointer to the thread structure to print information about
  1518. * @param[in] maxlen Maximum length for thread name display
  1519. *
  1520. * @note This function prints:
  1521. * - CPU core (SMP only) and priority
  1522. * - Thread state (ready, suspended, init, close, running)
  1523. * - Stack information (usage percentage, size, etc.)
  1524. * - Remaining tick count and error code
  1525. * - Thread name
  1526. */
  1527. static void print_thread_info(struct rt_thread* thread, int maxlen)
  1528. {
  1529. rt_uint8_t *ptr;
  1530. rt_uint8_t stat;
  1531. #ifdef RT_USING_SMP
  1532. if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  1533. rt_kprintf("%3d %3d ", RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
  1534. else
  1535. rt_kprintf("N/A %3d ", RT_SCHED_PRIV(thread).current_priority);
  1536. #else
  1537. rt_kprintf("%3d ", RT_SCHED_PRIV(thread).current_priority);
  1538. #endif /*RT_USING_SMP*/
  1539. stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
  1540. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  1541. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  1542. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  1543. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  1544. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  1545. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  1546. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  1547. while (*ptr == '#')ptr--;
  1548. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1549. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  1550. thread->stack_size,
  1551. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  1552. thread->remaining_tick,
  1553. thread->error);
  1554. #else
  1555. ptr = (rt_uint8_t *)thread->stack_addr;
  1556. while (*ptr == '#')ptr++;
  1557. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d",
  1558. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  1559. thread->stack_size,
  1560. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  1561. / thread->stack_size,
  1562. RT_SCHED_PRIV(thread).remaining_tick,
  1563. thread->error);
  1564. #endif
  1565. rt_kprintf(" %-.*s\n",rt_strlen(thread->parent.name), thread->parent.name);
  1566. }
  1567. /**
  1568. * @brief Lists all processes and threads in the system
  1569. *
  1570. * @return long Returns 0 on success
  1571. *
  1572. * @note This function:
  1573. * - Prints a header with process/thread information columns
  1574. * - Lists all kernel threads (without LWP association)
  1575. * - Lists all user processes (LWPs) with their threads
  1576. * - For each thread, displays:
  1577. * - PID/TID (process/thread IDs)
  1578. * - Priority, status, stack information
  1579. * - Remaining tick count and error code
  1580. * - Thread name/command
  1581. */
  1582. long list_process(void)
  1583. {
  1584. int index;
  1585. int maxlen;
  1586. rt_ubase_t level;
  1587. struct rt_thread *thread;
  1588. struct rt_list_node *node, *list;
  1589. const char *item_title = "thread";
  1590. int count = 0;
  1591. struct rt_thread **threads;
  1592. maxlen = RT_NAME_MAX;
  1593. #ifdef RT_USING_SMP
  1594. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error %-*.s\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
  1595. object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1596. rt_kprintf( "--- --- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
  1597. #else
  1598. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
  1599. object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1600. rt_kprintf( "--- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
  1601. #endif /*RT_USING_SMP*/
  1602. count = rt_object_get_length(RT_Object_Class_Thread);
  1603. if (count > 0)
  1604. {
  1605. /* get thread pointers */
  1606. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  1607. if (threads)
  1608. {
  1609. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  1610. if (index > 0)
  1611. {
  1612. for (index = 0; index <count; index++)
  1613. {
  1614. struct rt_thread th;
  1615. thread = threads[index];
  1616. level = rt_spin_lock_irqsave(&thread->spinlock);
  1617. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  1618. {
  1619. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1620. continue;
  1621. }
  1622. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  1623. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1624. if (th.lwp == RT_NULL)
  1625. {
  1626. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  1627. print_thread_info(&th, maxlen);
  1628. }
  1629. }
  1630. }
  1631. rt_free(threads);
  1632. }
  1633. }
  1634. for (index = 0; index < RT_LWP_MAX_NR; index++)
  1635. {
  1636. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  1637. if (lwp)
  1638. {
  1639. list = &lwp->t_grp;
  1640. for (node = list->next; node != list; node = node->next)
  1641. {
  1642. thread = rt_list_entry(node, struct rt_thread, sibling);
  1643. rt_kprintf("%4d %4d %-*.*s ", lwp_to_pid(lwp), thread->tid, maxlen, RT_NAME_MAX, lwp->cmd);
  1644. print_thread_info(thread, maxlen);
  1645. }
  1646. }
  1647. }
  1648. return 0;
  1649. }
  1650. MSH_CMD_EXPORT(list_process, list process);
  1651. /**
  1652. * @brief Command handler for killing processes
  1653. *
  1654. * @param[in] argc Argument count
  1655. * @param[in] argv Argument vector (contains PID and optional signal)
  1656. *
  1657. * @note Usage:
  1658. * - kill <pid>
  1659. * - kill <pid> -s <signal>
  1660. * Default signal is SIGKILL (9)
  1661. */
  1662. static void cmd_kill(int argc, char** argv)
  1663. {
  1664. int pid;
  1665. int sig = SIGKILL;
  1666. if (argc < 2)
  1667. {
  1668. rt_kprintf("kill pid or kill pid -s signal\n");
  1669. return;
  1670. }
  1671. pid = atoi(argv[1]);
  1672. if (argc >= 4)
  1673. {
  1674. if (argv[2][0] == '-' && argv[2][1] == 's')
  1675. {
  1676. sig = atoi(argv[3]);
  1677. }
  1678. }
  1679. lwp_pid_lock_take();
  1680. lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
  1681. lwp_pid_lock_release();
  1682. }
  1683. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  1684. /**
  1685. * @brief Kills all processes matching the given name
  1686. *
  1687. * @param[in] argc Argument count (must be >= 2)
  1688. * @param[in] argv Argument vector containing process name to kill
  1689. *
  1690. * @note Sends SIGKILL signal to all processes with the specified name.
  1691. * Requires at least 2 arguments (command name + process name)
  1692. */
  1693. static void cmd_killall(int argc, char** argv)
  1694. {
  1695. int pid;
  1696. if (argc < 2)
  1697. {
  1698. rt_kprintf("killall processes_name\n");
  1699. return;
  1700. }
  1701. while((pid = lwp_name2pid(argv[1])) > 0)
  1702. {
  1703. lwp_pid_lock_take();
  1704. lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
  1705. lwp_pid_lock_release();
  1706. rt_thread_mdelay(100);
  1707. }
  1708. }
  1709. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  1710. #endif
  1711. /**
  1712. * @brief Checks if the current thread has received an exit request
  1713. *
  1714. * @return int Returns:
  1715. * - 0 if no exit request or not an LWP thread
  1716. * - 1 if exit request was triggered and set to IN_PROCESS
  1717. *
  1718. * @note Verifies if the current lightweight process thread has been requested to exit
  1719. * by checking the exit_request atomic flag.
  1720. */
  1721. int lwp_check_exit_request(void)
  1722. {
  1723. rt_thread_t thread = rt_thread_self();
  1724. rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
  1725. if (!thread->lwp)
  1726. {
  1727. return 0;
  1728. }
  1729. return atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1730. LWP_EXIT_REQUEST_IN_PROCESS);
  1731. }
  1732. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1733. static void _resr_cleanup(struct rt_lwp *lwp);
  1734. /**
  1735. * @brief Terminates a lightweight process (LWP)
  1736. *
  1737. * @param[in,out] lwp Pointer to the lightweight process structure to terminate
  1738. *
  1739. * @note Safely terminates an LWP by marking it as terminated, waiting for sibling threads,
  1740. * and cleaning up resources.
  1741. */
  1742. void lwp_terminate(struct rt_lwp *lwp)
  1743. {
  1744. if (!lwp)
  1745. {
  1746. /* kernel thread not support */
  1747. return;
  1748. }
  1749. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1750. LWP_LOCK(lwp);
  1751. if (!lwp->terminated)
  1752. {
  1753. /* stop the receiving of signals */
  1754. lwp->terminated = RT_TRUE;
  1755. LWP_UNLOCK(lwp);
  1756. _wait_sibling_exit(lwp, rt_thread_self());
  1757. _resr_cleanup(lwp);
  1758. }
  1759. else
  1760. {
  1761. LWP_UNLOCK(lwp);
  1762. }
  1763. }
  1764. /**
  1765. * @brief Waits for sibling threads to exit during process termination
  1766. *
  1767. * @param[in] lwp Pointer to the lightweight process structure
  1768. * @param[in] curr_thread Current thread context making the termination request
  1769. *
  1770. * @note Details for this function:
  1771. * - Broadcast Exit Request: Broadcasts exit requests to all sibling threads.
  1772. * - Wake Suspended Threads: Wakes up any suspended threads by setting their error status to RT_EINTR.
  1773. * - Wait for Termination: it enters a loop waiting for all sibling threads to terminate.
  1774. * - Cleanup Terminated Threads: Once threads are in the INIT state, it removes them from the sibling list
  1775. * and deletes their thread control blocks.
  1776. */
  1777. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1778. {
  1779. rt_sched_lock_level_t slvl;
  1780. rt_list_t *list;
  1781. rt_thread_t thread;
  1782. rt_size_t expected = LWP_EXIT_REQUEST_NONE;
  1783. /* broadcast exit request for sibling threads */
  1784. LWP_LOCK(lwp);
  1785. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1786. {
  1787. thread = rt_list_entry(list, struct rt_thread, sibling);
  1788. atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1789. LWP_EXIT_REQUEST_TRIGGERED);
  1790. rt_sched_lock(&slvl);
  1791. /* dont release, otherwise thread may have been freed */
  1792. if (rt_sched_thread_is_suspended(thread))
  1793. {
  1794. thread->error = RT_EINTR;
  1795. rt_sched_unlock(slvl);
  1796. rt_thread_wakeup(thread);
  1797. }
  1798. else
  1799. {
  1800. rt_sched_unlock(slvl);
  1801. }
  1802. }
  1803. LWP_UNLOCK(lwp);
  1804. while (1)
  1805. {
  1806. int subthread_is_terminated;
  1807. LOG_D("%s: wait for subthread exiting", __func__);
  1808. /**
  1809. * Brief: wait for all *running* sibling threads to exit
  1810. *
  1811. * Note: Critical Section
  1812. * - sibling list of lwp (RW. It will clear all siblings finally)
  1813. */
  1814. LWP_LOCK(lwp);
  1815. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1816. if (!subthread_is_terminated)
  1817. {
  1818. rt_sched_lock_level_t slvl;
  1819. rt_thread_t sub_thread;
  1820. rt_list_t *list;
  1821. int all_subthread_in_init = 1;
  1822. /* check all subthread is in init state */
  1823. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1824. {
  1825. rt_sched_lock(&slvl);
  1826. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1827. if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
  1828. {
  1829. rt_sched_unlock(slvl);
  1830. all_subthread_in_init = 0;
  1831. break;
  1832. }
  1833. else
  1834. {
  1835. rt_sched_unlock(slvl);
  1836. }
  1837. }
  1838. if (all_subthread_in_init)
  1839. {
  1840. /* delete all subthread */
  1841. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1842. {
  1843. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1844. rt_list_remove(&sub_thread->sibling);
  1845. /**
  1846. * Note: Critical Section
  1847. * - thread control block (RW. Since it will free the thread
  1848. * control block, it must ensure no one else can access
  1849. * thread any more)
  1850. */
  1851. lwp_tid_put(sub_thread->tid);
  1852. sub_thread->tid = 0;
  1853. rt_thread_delete(sub_thread);
  1854. }
  1855. subthread_is_terminated = 1;
  1856. }
  1857. }
  1858. LWP_UNLOCK(lwp);
  1859. if (subthread_is_terminated)
  1860. {
  1861. break;
  1862. }
  1863. rt_thread_mdelay(10);
  1864. }
  1865. }
  1866. /**
  1867. * @brief Notifies parent process about child process termination
  1868. *
  1869. * @param[in,out] lwp The child lightweight process structure
  1870. *
  1871. * @note This function sends SIGCHLD signal to parent process with termination details.
  1872. * It handles both signaled (killed/dumped) and normal exit cases.
  1873. */
  1874. static void _notify_parent(rt_lwp_t lwp)
  1875. {
  1876. int si_code;
  1877. int signo_or_exitcode;
  1878. lwp_siginfo_ext_t ext;
  1879. lwp_status_t lwp_status = lwp->lwp_status;
  1880. rt_lwp_t parent = lwp->parent;
  1881. if (WIFSIGNALED(lwp_status))
  1882. {
  1883. si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
  1884. signo_or_exitcode = WTERMSIG(lwp_status);
  1885. }
  1886. else
  1887. {
  1888. si_code = CLD_EXITED;
  1889. signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
  1890. }
  1891. lwp_waitpid_kick(parent, lwp);
  1892. ext = rt_malloc(sizeof(struct lwp_siginfo));
  1893. if (ext)
  1894. {
  1895. rt_thread_t cur_thr = rt_thread_self();
  1896. ext->sigchld.status = signo_or_exitcode;
  1897. ext->sigchld.stime = cur_thr->system_time;
  1898. ext->sigchld.utime = cur_thr->user_time;
  1899. }
  1900. lwp_signal_kill(parent, SIGCHLD, si_code, ext);
  1901. }
  1902. /**
  1903. * @brief Clean up resources when a lightweight process (LWP) terminates
  1904. *
  1905. * @param[in,out] lwp The lightweight process structure to clean up
  1906. *
  1907. * @note This function handles the cleanup of various resources associated with an LWP
  1908. * when it terminates, including:
  1909. * - Job control cleanup
  1910. * - Signal detachment
  1911. * - Child process handling
  1912. * - Parent notification
  1913. * - File descriptor cleanup
  1914. * - PID resource release
  1915. */
  1916. static void _resr_cleanup(struct rt_lwp *lwp)
  1917. {
  1918. int need_cleanup_pid = RT_FALSE;
  1919. lwp_jobctrl_on_exit(lwp);
  1920. LWP_LOCK(lwp);
  1921. lwp_signal_detach(&lwp->signal);
  1922. /**
  1923. * @brief Detach children from lwp
  1924. *
  1925. * @note Critical Section
  1926. * - the lwp (RW. Release lwp)
  1927. * - the pid resource manager (RW. Release the pid)
  1928. */
  1929. while (lwp->first_child)
  1930. {
  1931. struct rt_lwp *child;
  1932. child = lwp->first_child;
  1933. lwp->first_child = child->sibling;
  1934. /** @note safe since the slist node is release */
  1935. LWP_UNLOCK(lwp);
  1936. LWP_LOCK(child);
  1937. if (child->terminated)
  1938. {
  1939. lwp_pid_put(child);
  1940. }
  1941. else
  1942. {
  1943. child->sibling = RT_NULL;
  1944. /* info: this may cause an orphan lwp */
  1945. child->parent = RT_NULL;
  1946. }
  1947. LWP_UNLOCK(child);
  1948. lwp_ref_dec(child);
  1949. lwp_ref_dec(lwp);
  1950. LWP_LOCK(lwp);
  1951. }
  1952. LWP_UNLOCK(lwp);
  1953. /**
  1954. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1955. * will be sent to parent
  1956. *
  1957. * @note Critical Section
  1958. * - the parent lwp (RW.)
  1959. */
  1960. LWP_LOCK(lwp);
  1961. if (lwp->parent &&
  1962. !lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
  1963. {
  1964. /* if successfully race to setup lwp->terminated before parent detach */
  1965. LWP_UNLOCK(lwp);
  1966. /**
  1967. * Note: children cannot detach itself and must wait for parent to take
  1968. * care of it
  1969. */
  1970. _notify_parent(lwp);
  1971. }
  1972. else
  1973. {
  1974. LWP_UNLOCK(lwp);
  1975. /**
  1976. * if process is orphan, it doesn't have parent to do the recycling.
  1977. * Otherwise, its parent had setup a flag to mask out recycling event
  1978. */
  1979. need_cleanup_pid = RT_TRUE;
  1980. }
  1981. LWP_LOCK(lwp);
  1982. if (lwp->fdt.fds != RT_NULL)
  1983. {
  1984. struct dfs_file **fds;
  1985. /* auto clean fds */
  1986. __exit_files(lwp);
  1987. fds = lwp->fdt.fds;
  1988. lwp->fdt.fds = RT_NULL;
  1989. LWP_UNLOCK(lwp);
  1990. rt_free(fds);
  1991. }
  1992. else
  1993. {
  1994. LWP_UNLOCK(lwp);
  1995. }
  1996. if (need_cleanup_pid)
  1997. {
  1998. lwp_pid_put(lwp);
  1999. }
  2000. }
  2001. /**
  2002. * @brief Set CPU affinity for a thread
  2003. *
  2004. * @param[in] tid The thread ID to set affinity for
  2005. * @param[in] cpu The target CPU core number
  2006. *
  2007. * @return 0 on success, -1 on failure (invalid thread ID)
  2008. *
  2009. * @note This function binds a thread to a specific CPU core in SMP systems.
  2010. * It handles thread reference counting and returns operation status.
  2011. */
  2012. static int _lwp_setaffinity(int tid, int cpu)
  2013. {
  2014. rt_thread_t thread;
  2015. int ret = -1;
  2016. thread = lwp_tid_get_thread_and_inc_ref(tid);
  2017. if (thread)
  2018. {
  2019. #ifdef RT_USING_SMP
  2020. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_ubase_t)cpu);
  2021. #endif
  2022. ret = 0;
  2023. }
  2024. lwp_tid_dec_ref(thread);
  2025. return ret;
  2026. }
  2027. /**
  2028. * @brief Sets CPU affinity for a thread
  2029. *
  2030. * @param[in] tid The thread ID to set affinity for
  2031. * @param[in] cpu The target CPU core number (0 to RT_CPUS_NR-1)
  2032. *
  2033. * @return int 0 on success, -1 on failure
  2034. *
  2035. * @note wrapper function for _lwp_setaffinity
  2036. */
  2037. int lwp_setaffinity(int tid, int cpu)
  2038. {
  2039. int ret;
  2040. #ifdef RT_USING_SMP
  2041. if (cpu < 0 || cpu > RT_CPUS_NR)
  2042. {
  2043. cpu = RT_CPUS_NR;
  2044. }
  2045. #endif
  2046. ret = _lwp_setaffinity(tid, cpu);
  2047. return ret;
  2048. }
  2049. #ifdef RT_USING_SMP
  2050. /**
  2051. * @brief Command handler for CPU binding operation
  2052. *
  2053. * @param[in] argc Number of command arguments
  2054. * @param[in] argv Array of command argument strings
  2055. *
  2056. * @note Requires exactly 2 arguments: pid (process ID) and cpu (CPU core number)
  2057. */
  2058. static void cmd_cpu_bind(int argc, char** argv)
  2059. {
  2060. int pid;
  2061. int cpu;
  2062. if (argc < 3)
  2063. {
  2064. rt_kprintf("Useage: cpu_bind pid cpu\n");
  2065. return;
  2066. }
  2067. pid = atoi(argv[1]);
  2068. cpu = atoi(argv[2]);
  2069. lwp_setaffinity((pid_t)pid, cpu);
  2070. }
  2071. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  2072. #endif