lwp.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. * 2023-10-16 Shell Support a new backtrace framework
  15. * 2023-11-17 xqyjlj add process group and session support
  16. * 2023-11-30 Shell add lwp_startup()
  17. */
  18. #define DBG_TAG "lwp"
  19. #define DBG_LVL DBG_INFO
  20. #include <rtdbg.h>
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #include <dfs_file.h>
  24. #include <unistd.h>
  25. #include <stdio.h> /* rename() */
  26. #include <fcntl.h>
  27. #include <sys/stat.h>
  28. #include <sys/statfs.h> /* statfs() */
  29. #include <lwp_elf.h>
  30. #ifndef RT_USING_DFS
  31. #error "lwp need file system(RT_USING_DFS)"
  32. #endif
  33. #include "lwp_internal.h"
  34. #include "lwp_arch.h"
  35. #include "lwp_arch_comm.h"
  36. #include "lwp_signal.h"
  37. #include "lwp_dbg.h"
  38. #include <terminal/terminal.h>
  39. #ifdef ARCH_MM_MMU
  40. #include <lwp_user_mm.h>
  41. #endif /* end of ARCH_MM_MMU */
  42. #ifndef O_DIRECTORY
  43. #define O_DIRECTORY 0x200000
  44. #endif
  45. #ifndef O_BINARY
  46. #define O_BINARY 0x10000
  47. #endif
  48. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  49. #ifdef DFS_USING_WORKDIR
  50. extern char working_directory[];
  51. #endif
  52. static int lwp_component_init(void)
  53. {
  54. int rc;
  55. if ((rc = lwp_tid_init()) != RT_EOK)
  56. {
  57. LOG_E("%s: lwp_component_init() failed", __func__);
  58. }
  59. else if ((rc = lwp_pid_init()) != RT_EOK)
  60. {
  61. LOG_E("%s: lwp_pid_init() failed", __func__);
  62. }
  63. else if ((rc = rt_channel_component_init()) != RT_EOK)
  64. {
  65. LOG_E("%s: rt_channel_component_init failed", __func__);
  66. }
  67. else if ((rc = lwp_futex_init()) != RT_EOK)
  68. {
  69. LOG_E("%s: lwp_futex_init() failed", __func__);
  70. }
  71. return rc;
  72. }
  73. INIT_COMPONENT_EXPORT(lwp_component_init);
  74. rt_weak int lwp_startup_debug_request(void)
  75. {
  76. return 0;
  77. }
  78. #define LATENCY_TIMES (3)
  79. #define LATENCY_IN_MSEC (128)
  80. #define LWP_CONSOLE_PATH "CONSOLE=/dev/console"
  81. const char *init_search_path[] = {
  82. "/sbin/init",
  83. "/bin/init",
  84. };
  85. /**
  86. * Startup process 0 and do the essential works
  87. * This is the "Hello World" point of RT-Smart
  88. */
  89. static int lwp_startup(void)
  90. {
  91. int error;
  92. const char *init_path;
  93. char *argv[] = {0, "&"};
  94. char *envp[] = {LWP_CONSOLE_PATH, 0};
  95. #ifdef LWP_DEBUG_INIT
  96. int command;
  97. int countdown = LATENCY_TIMES;
  98. while (countdown)
  99. {
  100. command = lwp_startup_debug_request();
  101. if (command)
  102. {
  103. return 0;
  104. }
  105. rt_kprintf("Press any key to stop init process startup ... %d\n", countdown);
  106. countdown -= 1;
  107. rt_thread_mdelay(LATENCY_IN_MSEC);
  108. }
  109. rt_kprintf("Starting init ...\n");
  110. #endif /* LWP_DEBUG_INIT */
  111. for (size_t i = 0; i < sizeof(init_search_path)/sizeof(init_search_path[0]); i++)
  112. {
  113. struct stat s;
  114. init_path = init_search_path[i];
  115. error = stat(init_path, &s);
  116. if (error == 0)
  117. {
  118. argv[0] = (void *)init_path;
  119. error = lwp_execve((void *)init_path, 0, sizeof(argv)/sizeof(argv[0]), argv, envp);
  120. if (error < 0)
  121. {
  122. LOG_E("%s: failed to startup process 0 (init)\n"
  123. "Switching to legacy mode...", __func__);
  124. }
  125. else if (error != 1)
  126. {
  127. LOG_E("%s: pid 1 is already allocated", __func__);
  128. error = -EBUSY;
  129. }
  130. else
  131. {
  132. rt_lwp_t p = lwp_from_pid_locked(1);
  133. p->sig_protected = 1;
  134. error = 0;
  135. }
  136. break;
  137. }
  138. }
  139. if (error)
  140. {
  141. LOG_D("%s: init program not found\n"
  142. "Switching to legacy mode...", __func__);
  143. }
  144. return error;
  145. }
  146. INIT_APP_EXPORT(lwp_startup);
  147. void lwp_setcwd(char *buf)
  148. {
  149. struct rt_lwp *lwp = RT_NULL;
  150. if(strlen(buf) >= DFS_PATH_MAX)
  151. {
  152. rt_kprintf("buf too long!\n");
  153. return ;
  154. }
  155. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  156. if (lwp)
  157. {
  158. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX - 1);
  159. }
  160. else
  161. {
  162. rt_strncpy(working_directory, buf, DFS_PATH_MAX - 1);
  163. }
  164. return ;
  165. }
  166. char *lwp_getcwd(void)
  167. {
  168. char *dir_buf = RT_NULL;
  169. struct rt_lwp *lwp = RT_NULL;
  170. rt_thread_t thread = rt_thread_self();
  171. if (thread)
  172. {
  173. lwp = (struct rt_lwp *)thread->lwp;
  174. }
  175. if (lwp)
  176. {
  177. if(lwp->working_directory[0] != '/')
  178. {
  179. dir_buf = &working_directory[0];
  180. }
  181. else
  182. {
  183. dir_buf = &lwp->working_directory[0];
  184. }
  185. }
  186. else
  187. dir_buf = &working_directory[0];
  188. return dir_buf;
  189. }
  190. /**
  191. * RT-Thread light-weight process
  192. */
  193. void lwp_set_kernel_sp(uint32_t *sp)
  194. {
  195. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  196. }
  197. uint32_t *lwp_get_kernel_sp(void)
  198. {
  199. #ifdef ARCH_MM_MMU
  200. return (uint32_t *)rt_thread_self()->sp;
  201. #else
  202. uint32_t* kernel_sp;
  203. extern rt_uint32_t rt_interrupt_from_thread;
  204. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  205. if (rt_thread_switch_interrupt_flag)
  206. {
  207. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  208. }
  209. else
  210. {
  211. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  212. }
  213. return kernel_sp;
  214. #endif
  215. }
  216. #ifdef ARCH_MM_MMU
  217. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  218. {
  219. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  220. int *args;
  221. char *str;
  222. char *str_k;
  223. char **new_argve;
  224. int i;
  225. int len;
  226. size_t *args_k;
  227. struct process_aux *aux;
  228. size_t prot = PROT_READ | PROT_WRITE;
  229. size_t flags = MAP_FIXED | MAP_PRIVATE;
  230. size_t zero = 0;
  231. for (i = 0; i < argc; i++)
  232. {
  233. size += (rt_strlen(argv[i]) + 1);
  234. }
  235. size += (sizeof(size_t) * argc);
  236. i = 0;
  237. if (envp)
  238. {
  239. while (envp[i] != 0)
  240. {
  241. size += (rt_strlen(envp[i]) + 1);
  242. size += sizeof(size_t);
  243. i++;
  244. }
  245. }
  246. /* for aux */
  247. size += sizeof(struct process_aux);
  248. if (size > ARCH_PAGE_SIZE)
  249. {
  250. return RT_NULL;
  251. }
  252. args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
  253. if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
  254. {
  255. return RT_NULL;
  256. }
  257. args_k = (size_t *)lwp_v2p(lwp, args);
  258. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  259. /* argc, argv[], 0, envp[], 0 , aux[] */
  260. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  261. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  262. new_argve = (char **)&args_k[1];
  263. args_k[0] = argc;
  264. for (i = 0; i < argc; i++)
  265. {
  266. len = rt_strlen(argv[i]) + 1;
  267. new_argve[i] = str;
  268. lwp_memcpy(str_k, argv[i], len);
  269. str += len;
  270. str_k += len;
  271. }
  272. new_argve[i] = 0;
  273. i++;
  274. new_argve[i] = 0;
  275. if (envp)
  276. {
  277. int j;
  278. for (j = 0; envp[j] != 0; j++)
  279. {
  280. len = rt_strlen(envp[j]) + 1;
  281. new_argve[i] = str;
  282. lwp_memcpy(str_k, envp[j], len);
  283. str += len;
  284. str_k += len;
  285. i++;
  286. }
  287. new_argve[i] = 0;
  288. }
  289. i++;
  290. /* aux */
  291. aux = (struct process_aux *)(new_argve + i);
  292. aux->item[0].key = AT_EXECFN;
  293. aux->item[0].value = (size_t)(size_t)new_argve[0];
  294. i += AUX_ARRAY_ITEMS_NR * 2;
  295. new_argve[i] = 0;
  296. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  297. lwp->args = args;
  298. return aux;
  299. }
  300. #else
  301. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  302. {
  303. #ifdef ARCH_MM_MMU
  304. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  305. struct process_aux *aux;
  306. #else
  307. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  308. #endif /* ARCH_MM_MMU */
  309. int *args;
  310. char *str;
  311. char **new_argve;
  312. int i;
  313. int len;
  314. for (i = 0; i < argc; i++)
  315. {
  316. size += (rt_strlen(argv[i]) + 1);
  317. }
  318. size += (sizeof(int) * argc);
  319. i = 0;
  320. if (envp)
  321. {
  322. while (envp[i] != 0)
  323. {
  324. size += (rt_strlen(envp[i]) + 1);
  325. size += sizeof(int);
  326. i++;
  327. }
  328. }
  329. #ifdef ARCH_MM_MMU
  330. /* for aux */
  331. size += sizeof(struct process_aux);
  332. args = (int *)rt_malloc(size);
  333. if (args == RT_NULL)
  334. {
  335. return RT_NULL;
  336. }
  337. /* argc, argv[], 0, envp[], 0 */
  338. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  339. #else
  340. args = (int *)rt_malloc(size);
  341. if (args == RT_NULL)
  342. {
  343. return RT_NULL;
  344. }
  345. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  346. #endif /* ARCH_MM_MMU */
  347. new_argve = (char **)&args[1];
  348. args[0] = argc;
  349. for (i = 0; i < argc; i++)
  350. {
  351. len = rt_strlen(argv[i]) + 1;
  352. new_argve[i] = str;
  353. lwp_memcpy(str, argv[i], len);
  354. str += len;
  355. }
  356. new_argve[i] = 0;
  357. i++;
  358. new_argve[i] = 0;
  359. if (envp)
  360. {
  361. int j;
  362. for (j = 0; envp[j] != 0; j++)
  363. {
  364. len = rt_strlen(envp[j]) + 1;
  365. new_argve[i] = str;
  366. lwp_memcpy(str, envp[j], len);
  367. str += len;
  368. i++;
  369. }
  370. new_argve[i] = 0;
  371. }
  372. #ifdef ARCH_MM_MMU
  373. /* aux */
  374. aux = (struct process_aux *)(new_argve + i);
  375. aux->item[0].key = AT_EXECFN;
  376. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  377. i += AUX_ARRAY_ITEMS_NR * 2;
  378. new_argve[i] = 0;
  379. lwp->args = args;
  380. return aux;
  381. #else
  382. lwp->args = args;
  383. lwp->args_length = size;
  384. return (struct process_aux *)(new_argve + i);
  385. #endif /* ARCH_MM_MMU */
  386. }
  387. #endif
  388. #ifdef ARCH_MM_MMU
  389. #define check_off(voff, vlen) \
  390. do \
  391. { \
  392. if (voff > vlen) \
  393. { \
  394. result = -RT_ERROR; \
  395. goto _exit; \
  396. } \
  397. } while (0)
  398. #define check_read(vrlen, vrlen_want) \
  399. do \
  400. { \
  401. if (vrlen < vrlen_want) \
  402. { \
  403. result = -RT_ERROR; \
  404. goto _exit; \
  405. } \
  406. } while (0)
  407. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  408. {
  409. size_t read_block = 0;
  410. while (nmemb)
  411. {
  412. size_t count;
  413. count = read(fd, ptr, size * nmemb) / size;
  414. if (count < nmemb)
  415. {
  416. LOG_E("ERROR: file size error!");
  417. break;
  418. }
  419. ptr = (void *)((uint8_t *)ptr + (count * size));
  420. nmemb -= count;
  421. read_block += count;
  422. }
  423. return read_block;
  424. }
  425. typedef struct
  426. {
  427. Elf_Word st_name;
  428. Elf_Addr st_value;
  429. Elf_Word st_size;
  430. unsigned char st_info;
  431. unsigned char st_other;
  432. Elf_Half st_shndx;
  433. } Elf_sym;
  434. #ifdef ARCH_MM_MMU
  435. struct map_range
  436. {
  437. void *start;
  438. size_t size;
  439. };
  440. static void expand_map_range(struct map_range *m, void *start, size_t size)
  441. {
  442. if (!m->start)
  443. {
  444. m->start = start;
  445. m->size = size;
  446. }
  447. else
  448. {
  449. void *end = (void *)((char*)start + size);
  450. void *mend = (void *)((char*)m->start + m->size);
  451. if (m->start > start)
  452. {
  453. m->start = start;
  454. }
  455. if (mend < end)
  456. {
  457. mend = end;
  458. }
  459. m->size = (char *)mend - (char *)m->start;
  460. }
  461. }
  462. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  463. {
  464. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  465. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  466. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  467. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  468. if (m1->size)
  469. {
  470. if (m1_start < (void *)USER_LOAD_VADDR)
  471. {
  472. return -1;
  473. }
  474. if (m1_start > (void *)USER_STACK_VSTART)
  475. {
  476. return -1;
  477. }
  478. if (m1_end < (void *)USER_LOAD_VADDR)
  479. {
  480. return -1;
  481. }
  482. if (m1_end > (void *)USER_STACK_VSTART)
  483. {
  484. return -1;
  485. }
  486. }
  487. if (m2->size)
  488. {
  489. if (m2_start < (void *)USER_LOAD_VADDR)
  490. {
  491. return -1;
  492. }
  493. if (m2_start > (void *)USER_STACK_VSTART)
  494. {
  495. return -1;
  496. }
  497. if (m2_end < (void *)USER_LOAD_VADDR)
  498. {
  499. return -1;
  500. }
  501. if (m2_end > (void *)USER_STACK_VSTART)
  502. {
  503. return -1;
  504. }
  505. }
  506. if ((m1->size != 0) && (m2->size != 0))
  507. {
  508. if (m1_start < m2_start)
  509. {
  510. if (m1_end > m2_start)
  511. {
  512. return -1;
  513. }
  514. }
  515. else /* m2_start <= m1_start */
  516. {
  517. if (m2_end > m1_start)
  518. {
  519. return -1;
  520. }
  521. }
  522. }
  523. return 0;
  524. }
  525. #endif
  526. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  527. {
  528. uint32_t i;
  529. uint32_t off = 0;
  530. size_t load_off = 0;
  531. char *p_section_str = 0;
  532. Elf_sym *dynsym = 0;
  533. Elf_Ehdr eheader;
  534. Elf_Phdr pheader;
  535. Elf_Shdr sheader;
  536. int result = RT_EOK;
  537. uint32_t magic;
  538. size_t read_len;
  539. void *got_start = 0;
  540. size_t got_size = 0;
  541. void *rel_dyn_start = 0;
  542. size_t rel_dyn_size = 0;
  543. size_t dynsym_off = 0;
  544. size_t dynsym_size = 0;
  545. #ifdef ARCH_MM_MMU
  546. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  547. void *pa, *va;
  548. void *va_self;
  549. #endif
  550. if (len < sizeof eheader)
  551. {
  552. LOG_E("len < sizeof eheader!");
  553. return -RT_ERROR;
  554. }
  555. lseek(fd, 0, SEEK_SET);
  556. read_len = load_fread(&magic, 1, sizeof magic, fd);
  557. check_read(read_len, sizeof magic);
  558. if (memcmp(elf_magic, &magic, 4) != 0)
  559. {
  560. LOG_E("elf_magic not same, magic:0x%x!", magic);
  561. return -RT_ERROR;
  562. }
  563. lseek(fd, off, SEEK_SET);
  564. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  565. check_read(read_len, sizeof eheader);
  566. #ifndef ARCH_CPU_64BIT
  567. if (eheader.e_ident[4] != 1)
  568. { /* not 32bit */
  569. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  570. return -RT_ERROR;
  571. }
  572. #else
  573. if (eheader.e_ident[4] != 2)
  574. { /* not 64bit */
  575. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  576. return -RT_ERROR;
  577. }
  578. #endif
  579. if (eheader.e_ident[6] != 1)
  580. { /* ver not 1 */
  581. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  582. return -RT_ERROR;
  583. }
  584. if ((eheader.e_type != ET_DYN)
  585. #ifdef ARCH_MM_MMU
  586. && (eheader.e_type != ET_EXEC)
  587. #endif
  588. )
  589. {
  590. /* not pie or exec elf */
  591. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  592. return -RT_ERROR;
  593. }
  594. #ifdef ARCH_MM_MMU
  595. {
  596. off = eheader.e_phoff;
  597. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  598. {
  599. check_off(off, len);
  600. lseek(fd, off, SEEK_SET);
  601. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  602. check_read(read_len, sizeof pheader);
  603. if (pheader.p_type == PT_DYNAMIC)
  604. {
  605. /* load ld.so */
  606. return 1; /* 1 means dynamic */
  607. }
  608. }
  609. }
  610. #endif
  611. if (eheader.e_entry != 0)
  612. {
  613. if ((eheader.e_entry != USER_LOAD_VADDR)
  614. && (eheader.e_entry != LDSO_LOAD_VADDR))
  615. {
  616. /* the entry is invalidate */
  617. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  618. return -RT_ERROR;
  619. }
  620. }
  621. { /* load aux */
  622. uint8_t *process_header;
  623. size_t process_header_size;
  624. off = eheader.e_phoff;
  625. process_header_size = eheader.e_phnum * sizeof pheader;
  626. #ifdef ARCH_MM_MMU
  627. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  628. {
  629. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  630. return -RT_ERROR;
  631. }
  632. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  633. if (!va)
  634. {
  635. LOG_E("lwp map user failed!");
  636. return -RT_ERROR;
  637. }
  638. pa = lwp_v2p(lwp, va);
  639. process_header = (uint8_t *)pa - PV_OFFSET;
  640. #else
  641. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  642. if (!process_header)
  643. {
  644. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  645. return -RT_ERROR;
  646. }
  647. #endif
  648. check_off(off, len);
  649. lseek(fd, off, SEEK_SET);
  650. read_len = load_fread(process_header, 1, process_header_size, fd);
  651. check_read(read_len, process_header_size);
  652. #ifdef ARCH_MM_MMU
  653. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  654. #endif
  655. aux->item[1].key = AT_PAGESZ;
  656. #ifdef ARCH_MM_MMU
  657. aux->item[1].value = ARCH_PAGE_SIZE;
  658. #else
  659. aux->item[1].value = RT_MM_PAGE_SIZE;
  660. #endif
  661. aux->item[2].key = AT_RANDOM;
  662. {
  663. uint32_t random_value = rt_tick_get();
  664. uint8_t *random;
  665. #ifdef ARCH_MM_MMU
  666. uint8_t *krandom;
  667. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  668. krandom = (uint8_t *)lwp_v2p(lwp, random);
  669. krandom = (uint8_t *)krandom - PV_OFFSET;
  670. rt_memcpy(krandom, &random_value, sizeof random_value);
  671. #else
  672. random = (uint8_t *)(process_header + process_header_size);
  673. rt_memcpy(random, &random_value, sizeof random_value);
  674. #endif
  675. aux->item[2].value = (size_t)random;
  676. }
  677. aux->item[3].key = AT_PHDR;
  678. #ifdef ARCH_MM_MMU
  679. aux->item[3].value = (size_t)va;
  680. #else
  681. aux->item[3].value = (size_t)process_header;
  682. #endif
  683. aux->item[4].key = AT_PHNUM;
  684. aux->item[4].value = eheader.e_phnum;
  685. aux->item[5].key = AT_PHENT;
  686. aux->item[5].value = sizeof pheader;
  687. #ifdef ARCH_MM_MMU
  688. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  689. #endif
  690. }
  691. if (load_addr)
  692. {
  693. load_off = (size_t)load_addr;
  694. }
  695. #ifdef ARCH_MM_MMU
  696. else
  697. {
  698. /* map user */
  699. off = eheader.e_shoff;
  700. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  701. {
  702. check_off(off, len);
  703. lseek(fd, off, SEEK_SET);
  704. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  705. check_read(read_len, sizeof sheader);
  706. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  707. {
  708. continue;
  709. }
  710. switch (sheader.sh_type)
  711. {
  712. case SHT_PROGBITS:
  713. if ((sheader.sh_flags & SHF_WRITE) == 0)
  714. {
  715. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  716. }
  717. else
  718. {
  719. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  720. }
  721. break;
  722. case SHT_NOBITS:
  723. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  724. break;
  725. default:
  726. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  727. break;
  728. }
  729. }
  730. if (user_area[0].size == 0)
  731. {
  732. /* no code */
  733. result = -RT_ERROR;
  734. goto _exit;
  735. }
  736. if (user_area[0].start == NULL)
  737. {
  738. /* DYN */
  739. load_off = USER_LOAD_VADDR;
  740. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  741. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  742. }
  743. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  744. {
  745. result = -RT_ERROR;
  746. goto _exit;
  747. }
  748. /* text and data */
  749. for (i = 0; i < 2; i++)
  750. {
  751. if (user_area[i].size != 0)
  752. {
  753. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  754. if (!va || (va != user_area[i].start))
  755. {
  756. result = -RT_ERROR;
  757. goto _exit;
  758. }
  759. }
  760. }
  761. lwp->text_size = user_area[0].size;
  762. }
  763. #else
  764. else
  765. {
  766. size_t start = -1UL;
  767. size_t end = 0UL;
  768. size_t total_size;
  769. off = eheader.e_shoff;
  770. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  771. {
  772. check_off(off, len);
  773. lseek(fd, off, SEEK_SET);
  774. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  775. check_read(read_len, sizeof sheader);
  776. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  777. {
  778. continue;
  779. }
  780. switch (sheader.sh_type)
  781. {
  782. case SHT_PROGBITS:
  783. case SHT_NOBITS:
  784. if (start > sheader.sh_addr)
  785. {
  786. start = sheader.sh_addr;
  787. }
  788. if (sheader.sh_addr + sheader.sh_size > end)
  789. {
  790. end = sheader.sh_addr + sheader.sh_size;
  791. }
  792. break;
  793. default:
  794. break;
  795. }
  796. }
  797. total_size = end - start;
  798. #ifdef RT_USING_CACHE
  799. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  800. #else
  801. load_off = (size_t)rt_malloc(total_size);
  802. #endif
  803. if (load_off == 0)
  804. {
  805. LOG_E("alloc text memory faild!");
  806. result = -RT_ENOMEM;
  807. goto _exit;
  808. }
  809. else
  810. {
  811. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  812. }
  813. lwp->load_off = load_off; /* for free */
  814. lwp->text_size = total_size;
  815. }
  816. #endif
  817. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  818. off = eheader.e_phoff;
  819. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  820. {
  821. check_off(off, len);
  822. lseek(fd, off, SEEK_SET);
  823. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  824. check_read(read_len, sizeof pheader);
  825. if (pheader.p_type == PT_LOAD)
  826. {
  827. if (pheader.p_filesz > pheader.p_memsz)
  828. {
  829. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  830. return -RT_ERROR;
  831. }
  832. check_off(pheader.p_offset, len);
  833. lseek(fd, pheader.p_offset, SEEK_SET);
  834. #ifdef ARCH_MM_MMU
  835. {
  836. uint32_t size = pheader.p_filesz;
  837. size_t tmp_len = 0;
  838. va = (void *)(pheader.p_vaddr + load_addr);
  839. read_len = 0;
  840. while (size)
  841. {
  842. pa = lwp_v2p(lwp, va);
  843. va_self = (void *)((char *)pa - PV_OFFSET);
  844. LOG_D("va_self = %p pa = %p", va_self, pa);
  845. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  846. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  847. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  848. read_len += tmp_len;
  849. size -= tmp_len;
  850. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  851. }
  852. }
  853. #else
  854. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  855. #endif
  856. check_read(read_len, pheader.p_filesz);
  857. if (pheader.p_filesz < pheader.p_memsz)
  858. {
  859. #ifdef ARCH_MM_MMU
  860. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  861. uint32_t size_s;
  862. uint32_t off;
  863. off = pheader.p_filesz & ARCH_PAGE_MASK;
  864. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  865. while (size)
  866. {
  867. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  868. pa = lwp_v2p(lwp, va);
  869. va_self = (void *)((char *)pa - PV_OFFSET);
  870. memset((void *)((char *)va_self + off), 0, size_s);
  871. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  872. off = 0;
  873. size -= size_s;
  874. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  875. }
  876. #else
  877. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  878. #endif
  879. }
  880. }
  881. }
  882. /* relocate */
  883. if (eheader.e_type == ET_DYN)
  884. {
  885. /* section info */
  886. off = eheader.e_shoff;
  887. /* find section string table */
  888. check_off(off, len);
  889. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  890. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  891. check_read(read_len, sizeof sheader);
  892. p_section_str = (char *)rt_malloc(sheader.sh_size);
  893. if (!p_section_str)
  894. {
  895. LOG_E("out of memory!");
  896. result = -ENOMEM;
  897. goto _exit;
  898. }
  899. check_off(sheader.sh_offset, len);
  900. lseek(fd, sheader.sh_offset, SEEK_SET);
  901. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  902. check_read(read_len, sheader.sh_size);
  903. check_off(off, len);
  904. lseek(fd, off, SEEK_SET);
  905. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  906. {
  907. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  908. check_read(read_len, sizeof sheader);
  909. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  910. {
  911. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  912. got_size = (size_t)sheader.sh_size;
  913. }
  914. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  915. {
  916. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  917. rel_dyn_size = (size_t)sheader.sh_size;
  918. }
  919. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  920. {
  921. dynsym_off = (size_t)sheader.sh_offset;
  922. dynsym_size = (size_t)sheader.sh_size;
  923. }
  924. }
  925. /* reloc */
  926. if (dynsym_size)
  927. {
  928. dynsym = rt_malloc(dynsym_size);
  929. if (!dynsym)
  930. {
  931. LOG_E("ERROR: Malloc error!");
  932. result = -ENOMEM;
  933. goto _exit;
  934. }
  935. check_off(dynsym_off, len);
  936. lseek(fd, dynsym_off, SEEK_SET);
  937. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  938. check_read(read_len, dynsym_size);
  939. }
  940. #ifdef ARCH_MM_MMU
  941. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  942. #else
  943. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  944. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  945. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  946. #endif
  947. }
  948. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  949. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  950. _exit:
  951. if (dynsym)
  952. {
  953. rt_free(dynsym);
  954. }
  955. if (p_section_str)
  956. {
  957. rt_free(p_section_str);
  958. }
  959. if (result != RT_EOK)
  960. {
  961. LOG_E("lwp load faild, %d", result);
  962. }
  963. return result;
  964. }
  965. #endif /* ARCH_MM_MMU */
  966. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  967. {
  968. uint8_t *ptr;
  969. int ret = -1;
  970. int len;
  971. int fd = -1;
  972. /* check file name */
  973. RT_ASSERT(filename != RT_NULL);
  974. /* check lwp control block */
  975. RT_ASSERT(lwp != RT_NULL);
  976. /* copy file name to process name */
  977. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  978. if (load_addr != RT_NULL)
  979. {
  980. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  981. ptr = load_addr;
  982. }
  983. else
  984. {
  985. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  986. ptr = RT_NULL;
  987. }
  988. fd = open(filename, O_BINARY | O_RDONLY, 0);
  989. if (fd < 0)
  990. {
  991. LOG_E("ERROR: Can't open elf file %s!", filename);
  992. goto out;
  993. }
  994. len = lseek(fd, 0, SEEK_END);
  995. if (len < 0)
  996. {
  997. LOG_E("ERROR: File %s size error!", filename);
  998. goto out;
  999. }
  1000. lseek(fd, 0, SEEK_SET);
  1001. ret = load_elf(fd, len, lwp, ptr, aux);
  1002. if ((ret != RT_EOK) && (ret != 1))
  1003. {
  1004. LOG_E("lwp load ret = %d", ret);
  1005. }
  1006. out:
  1007. if (fd > 0)
  1008. {
  1009. close(fd);
  1010. }
  1011. return ret;
  1012. }
  1013. /* lwp-thread clean up routine */
  1014. void lwp_cleanup(struct rt_thread *tid)
  1015. {
  1016. struct rt_lwp *lwp;
  1017. if (tid == NULL)
  1018. {
  1019. LOG_I("%s: invalid parameter tid == NULL", __func__);
  1020. return;
  1021. }
  1022. else
  1023. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  1024. /**
  1025. * Brief: lwp thread cleanup
  1026. *
  1027. * Note: Critical Section
  1028. * - thread control block (RW. It's ensured that no one else can access tcb
  1029. * other than itself)
  1030. */
  1031. lwp = (struct rt_lwp *)tid->lwp;
  1032. lwp_thread_signal_detach(&tid->signal);
  1033. /* tty will be release in lwp_ref_dec() if ref is cleared */
  1034. lwp_ref_dec(lwp);
  1035. return;
  1036. }
  1037. static void lwp_execve_setup_stdio(struct rt_lwp *lwp)
  1038. {
  1039. struct dfs_fdtable *lwp_fdt;
  1040. struct dfs_file *cons_file;
  1041. int cons_fd;
  1042. lwp_fdt = &lwp->fdt;
  1043. /* open console */
  1044. cons_fd = open("/dev/console", O_RDWR);
  1045. if (cons_fd < 0)
  1046. {
  1047. LOG_E("%s: Cannot open console tty", __func__);
  1048. return ;
  1049. }
  1050. LOG_D("%s: open console as fd %d", __func__, cons_fd);
  1051. /* init 4 fds */
  1052. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  1053. if (lwp_fdt->fds)
  1054. {
  1055. cons_file = fd_get(cons_fd);
  1056. lwp_fdt->maxfd = 4;
  1057. fdt_fd_associate_file(lwp_fdt, 0, cons_file);
  1058. fdt_fd_associate_file(lwp_fdt, 1, cons_file);
  1059. fdt_fd_associate_file(lwp_fdt, 2, cons_file);
  1060. }
  1061. close(cons_fd);
  1062. return;
  1063. }
  1064. static void _lwp_thread_entry(void *parameter)
  1065. {
  1066. rt_thread_t tid;
  1067. struct rt_lwp *lwp;
  1068. tid = rt_thread_self();
  1069. lwp = (struct rt_lwp *)tid->lwp;
  1070. tid->cleanup = lwp_cleanup;
  1071. tid->user_stack = RT_NULL;
  1072. if (lwp->debug)
  1073. {
  1074. lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
  1075. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  1076. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  1077. icache_invalid_all();
  1078. }
  1079. /**
  1080. * without ASID support, it will be a special case when trying to run application
  1081. * and exit multiple times and a same page frame allocated to it bound to
  1082. * different text segment. Then we are in a situation where icache contains
  1083. * out-of-dated data and must be handle by the running core itself.
  1084. * with ASID support, this should be a rare case that ASID & page frame both
  1085. * identical to previous running application.
  1086. *
  1087. * For a new application loaded into memory, icache are seen as empty. And there
  1088. * should be nothing in the icache entry to match. So this icache invalidation
  1089. * operation should have barely influence.
  1090. */
  1091. rt_hw_icache_invalidate_all();
  1092. #ifdef ARCH_MM_MMU
  1093. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  1094. #else
  1095. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  1096. #endif /* ARCH_MM_MMU */
  1097. }
  1098. struct rt_lwp *lwp_self(void)
  1099. {
  1100. rt_thread_t tid;
  1101. tid = rt_thread_self();
  1102. if (tid)
  1103. {
  1104. return (struct rt_lwp *)tid->lwp;
  1105. }
  1106. return RT_NULL;
  1107. }
  1108. rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
  1109. {
  1110. /* lwp add to children link */
  1111. LWP_LOCK(parent);
  1112. child->sibling = parent->first_child;
  1113. parent->first_child = child;
  1114. child->parent = parent;
  1115. LWP_UNLOCK(parent);
  1116. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1117. /* parent holds reference to child */
  1118. lwp_ref_inc(parent);
  1119. /* child holds reference to parent */
  1120. lwp_ref_inc(child);
  1121. return 0;
  1122. }
  1123. rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
  1124. {
  1125. struct rt_lwp **lwp_node;
  1126. LWP_LOCK(parent);
  1127. /* detach from children link */
  1128. lwp_node = &parent->first_child;
  1129. while (*lwp_node != child)
  1130. {
  1131. RT_ASSERT(*lwp_node != RT_NULL);
  1132. lwp_node = &(*lwp_node)->sibling;
  1133. }
  1134. (*lwp_node) = child->sibling;
  1135. child->parent = RT_NULL;
  1136. LWP_UNLOCK(parent);
  1137. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1138. lwp_ref_dec(child);
  1139. lwp_ref_dec(parent);
  1140. return 0;
  1141. }
  1142. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1143. {
  1144. int result;
  1145. struct rt_lwp *lwp;
  1146. char *thread_name;
  1147. struct process_aux *aux;
  1148. int tid = 0;
  1149. if (filename == RT_NULL)
  1150. {
  1151. return -EINVAL;
  1152. }
  1153. if (access(filename, X_OK) != 0)
  1154. {
  1155. return -EACCES;
  1156. }
  1157. lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID | LWP_CREATE_FLAG_NOTRACE_EXEC);
  1158. if (lwp == RT_NULL)
  1159. {
  1160. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1161. return -ENOMEM;
  1162. }
  1163. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1164. if ((tid = lwp_tid_get()) == 0)
  1165. {
  1166. lwp_ref_dec(lwp);
  1167. return -ENOMEM;
  1168. }
  1169. #ifdef ARCH_MM_MMU
  1170. if (lwp_user_space_init(lwp, 0) != 0)
  1171. {
  1172. lwp_tid_put(tid);
  1173. lwp_ref_dec(lwp);
  1174. return -ENOMEM;
  1175. }
  1176. #endif
  1177. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1178. {
  1179. lwp_tid_put(tid);
  1180. lwp_ref_dec(lwp);
  1181. return -ENOMEM;
  1182. }
  1183. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1184. #ifdef ARCH_MM_MMU
  1185. if (result == 1)
  1186. {
  1187. /* dynmaic */
  1188. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1189. result = load_ldso(lwp, filename, argv, envp);
  1190. }
  1191. #endif /* ARCH_MM_MMU */
  1192. if (result == RT_EOK)
  1193. {
  1194. rt_thread_t thread = RT_NULL;
  1195. rt_uint32_t priority = 25, tick = 200;
  1196. lwp_execve_setup_stdio(lwp);
  1197. /* obtain the base name */
  1198. thread_name = strrchr(filename, '/');
  1199. thread_name = thread_name ? thread_name + 1 : filename;
  1200. #ifndef ARCH_MM_MMU
  1201. struct lwp_app_head *app_head = lwp->text_entry;
  1202. if (app_head->priority)
  1203. {
  1204. priority = app_head->priority;
  1205. }
  1206. if (app_head->tick)
  1207. {
  1208. tick = app_head->tick;
  1209. }
  1210. #endif /* not defined ARCH_MM_MMU */
  1211. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1212. LWP_TASK_STACK_SIZE, priority, tick);
  1213. if (thread != RT_NULL)
  1214. {
  1215. struct rt_lwp *self_lwp;
  1216. rt_session_t session;
  1217. rt_processgroup_t group;
  1218. thread->tid = tid;
  1219. lwp_tid_set_thread(tid, thread);
  1220. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1221. (rt_size_t)thread->stack_addr + thread->stack_size);
  1222. self_lwp = lwp_self();
  1223. /* when create init, self_lwp == null */
  1224. if (self_lwp == RT_NULL && lwp_to_pid(lwp) != 1)
  1225. {
  1226. self_lwp = lwp_from_pid_and_lock(1);
  1227. }
  1228. if (self_lwp)
  1229. {
  1230. /* lwp add to children link */
  1231. lwp_children_register(self_lwp, lwp);
  1232. }
  1233. session = RT_NULL;
  1234. group = RT_NULL;
  1235. group = lwp_pgrp_create(lwp);
  1236. if (group)
  1237. {
  1238. lwp_pgrp_insert(group, lwp);
  1239. if (self_lwp == RT_NULL)
  1240. {
  1241. session = lwp_session_create(lwp);
  1242. lwp_session_insert(session, group);
  1243. }
  1244. else
  1245. {
  1246. session = lwp_session_find(lwp_sid_get_byprocess(self_lwp));
  1247. lwp_session_insert(session, group);
  1248. }
  1249. }
  1250. thread->lwp = lwp;
  1251. #ifndef ARCH_MM_MMU
  1252. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1253. thread->user_stack = app_head->stack_offset ?
  1254. (void *)(app_head->stack_offset -
  1255. app_head->data_offset +
  1256. (uint32_t)lwp->data_entry) : RT_NULL;
  1257. thread->user_stack_size = app_head->stack_size;
  1258. /* init data area */
  1259. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1260. /* init user stack */
  1261. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1262. #endif /* not defined ARCH_MM_MMU */
  1263. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1264. lwp->did_exec = RT_TRUE;
  1265. if (debug && rt_dbg_ops)
  1266. {
  1267. lwp->debug = debug;
  1268. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1269. }
  1270. rt_thread_startup(thread);
  1271. return lwp_to_pid(lwp);
  1272. }
  1273. }
  1274. lwp_tid_put(tid);
  1275. lwp_ref_dec(lwp);
  1276. return -RT_ERROR;
  1277. }
  1278. #ifdef RT_USING_MUSLLIBC
  1279. extern char **__environ;
  1280. #else
  1281. char **__environ = 0;
  1282. #endif
  1283. pid_t exec(char *filename, int debug, int argc, char **argv)
  1284. {
  1285. setenv("OS", "RT-Thread", 1);
  1286. return lwp_execve(filename, debug, argc, argv, __environ);
  1287. }
  1288. #ifdef ARCH_MM_MMU
  1289. void lwp_user_setting_save(rt_thread_t thread)
  1290. {
  1291. if (thread)
  1292. {
  1293. thread->thread_idr = arch_get_tidr();
  1294. }
  1295. }
  1296. void lwp_user_setting_restore(rt_thread_t thread)
  1297. {
  1298. if (!thread)
  1299. {
  1300. return;
  1301. }
  1302. #if !defined(ARCH_RISCV64)
  1303. /* tidr will be set in RESTORE_ALL in risc-v */
  1304. arch_set_tidr(thread->thread_idr);
  1305. #endif
  1306. if (rt_dbg_ops)
  1307. {
  1308. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1309. if (l != 0)
  1310. {
  1311. rt_hw_set_process_id((size_t)l->pid);
  1312. }
  1313. else
  1314. {
  1315. rt_hw_set_process_id(0);
  1316. }
  1317. if (l && l->debug)
  1318. {
  1319. uint32_t step_type = 0;
  1320. step_type = dbg_step_type();
  1321. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1322. {
  1323. dbg_activate_step();
  1324. }
  1325. else
  1326. {
  1327. dbg_deactivate_step();
  1328. }
  1329. }
  1330. }
  1331. }
  1332. #endif /* ARCH_MM_MMU */
  1333. void lwp_uthread_ctx_save(void *ctx)
  1334. {
  1335. rt_thread_t thread;
  1336. thread = rt_thread_self();
  1337. thread->user_ctx.ctx = ctx;
  1338. }
  1339. void lwp_uthread_ctx_restore(void)
  1340. {
  1341. rt_thread_t thread;
  1342. thread = rt_thread_self();
  1343. thread->user_ctx.ctx = RT_NULL;
  1344. }
  1345. rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame)
  1346. {
  1347. rt_err_t rc = -RT_ERROR;
  1348. long nesting = 0;
  1349. char **argv;
  1350. rt_lwp_t lwp;
  1351. if (uthread && uthread->lwp && rt_scheduler_is_available())
  1352. {
  1353. lwp = uthread->lwp;
  1354. argv = lwp_get_command_line_args(lwp);
  1355. if (argv)
  1356. {
  1357. rt_kprintf("please use: addr2line -e %s -a -f", argv[0]);
  1358. lwp_free_command_line_args(argv);
  1359. }
  1360. else
  1361. {
  1362. rt_kprintf("please use: addr2line -e %s -a -f", lwp->cmd);
  1363. }
  1364. while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
  1365. {
  1366. rt_kprintf(" 0x%lx", frame->pc);
  1367. if (rt_hw_backtrace_frame_unwind(uthread, frame))
  1368. {
  1369. break;
  1370. }
  1371. nesting++;
  1372. }
  1373. rt_kprintf("\n");
  1374. rc = RT_EOK;
  1375. }
  1376. return rc;
  1377. }
  1378. void rt_update_process_times(void)
  1379. {
  1380. struct rt_thread *thread;
  1381. #ifdef RT_USING_SMP
  1382. struct rt_cpu* pcpu;
  1383. pcpu = rt_cpu_self();
  1384. #endif
  1385. thread = rt_thread_self();
  1386. if (!IS_USER_MODE(thread))
  1387. {
  1388. thread->user_time += 1;
  1389. #ifdef RT_USING_SMP
  1390. pcpu->cpu_stat.user += 1;
  1391. #endif
  1392. }
  1393. else
  1394. {
  1395. thread->system_time += 1;
  1396. #ifdef RT_USING_SMP
  1397. if (thread == pcpu->idle_thread)
  1398. {
  1399. pcpu->cpu_stat.idle += 1;
  1400. }
  1401. else
  1402. {
  1403. pcpu->cpu_stat.system += 1;
  1404. }
  1405. #endif
  1406. }
  1407. }