lwp.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. * 2023-10-16 Shell Support a new backtrace framework
  15. */
  16. #define DBG_TAG "LWP"
  17. #define DBG_LVL DBG_WARNING
  18. #include <rtdbg.h>
  19. #include <rthw.h>
  20. #include <rtthread.h>
  21. #include <dfs_file.h>
  22. #include <unistd.h>
  23. #include <stdio.h> /* rename() */
  24. #include <fcntl.h>
  25. #include <sys/stat.h>
  26. #include <sys/statfs.h> /* statfs() */
  27. #include <lwp_elf.h>
  28. #ifndef RT_USING_DFS
  29. #error "lwp need file system(RT_USING_DFS)"
  30. #endif
  31. #include "lwp.h"
  32. #include "lwp_arch.h"
  33. #include "lwp_arch_comm.h"
  34. #include "lwp_signal.h"
  35. #include "lwp_dbg.h"
  36. #include "console.h"
  37. #ifdef ARCH_MM_MMU
  38. #include <lwp_user_mm.h>
  39. #endif /* end of ARCH_MM_MMU */
  40. #ifndef O_DIRECTORY
  41. #define O_DIRECTORY 0x200000
  42. #endif
  43. #ifndef O_BINARY
  44. #define O_BINARY 0x10000
  45. #endif
  46. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  47. #ifdef DFS_USING_WORKDIR
  48. extern char working_directory[];
  49. #endif
  50. static struct termios stdin_termios, old_stdin_termios;
  51. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  52. struct termios *get_old_termios(void)
  53. {
  54. return &old_stdin_termios;
  55. }
  56. void lwp_setcwd(char *buf)
  57. {
  58. struct rt_lwp *lwp = RT_NULL;
  59. if(strlen(buf) >= DFS_PATH_MAX)
  60. {
  61. rt_kprintf("buf too long!\n");
  62. return ;
  63. }
  64. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  65. if (lwp)
  66. {
  67. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  68. }
  69. else
  70. {
  71. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  72. }
  73. return ;
  74. }
  75. char *lwp_getcwd(void)
  76. {
  77. char *dir_buf = RT_NULL;
  78. struct rt_lwp *lwp = RT_NULL;
  79. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  80. if (lwp)
  81. {
  82. if(lwp->working_directory[0] != '/')
  83. {
  84. dir_buf = &working_directory[0];
  85. }
  86. else
  87. {
  88. dir_buf = &lwp->working_directory[0];
  89. }
  90. }
  91. else
  92. dir_buf = &working_directory[0];
  93. return dir_buf;
  94. }
  95. /**
  96. * RT-Thread light-weight process
  97. */
  98. void lwp_set_kernel_sp(uint32_t *sp)
  99. {
  100. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  101. }
  102. uint32_t *lwp_get_kernel_sp(void)
  103. {
  104. #ifdef ARCH_MM_MMU
  105. return (uint32_t *)rt_thread_self()->sp;
  106. #else
  107. uint32_t* kernel_sp;
  108. extern rt_uint32_t rt_interrupt_from_thread;
  109. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  110. if (rt_thread_switch_interrupt_flag)
  111. {
  112. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  113. }
  114. else
  115. {
  116. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  117. }
  118. return kernel_sp;
  119. #endif
  120. }
  121. #ifdef ARCH_MM_MMU
  122. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  123. {
  124. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  125. int *args;
  126. char *str;
  127. char *str_k;
  128. char **new_argve;
  129. int i;
  130. int len;
  131. size_t *args_k;
  132. struct process_aux *aux;
  133. size_t prot = PROT_READ | PROT_WRITE;
  134. size_t flags = MAP_FIXED | MAP_PRIVATE;
  135. size_t zero = 0;
  136. for (i = 0; i < argc; i++)
  137. {
  138. size += (rt_strlen(argv[i]) + 1);
  139. }
  140. size += (sizeof(size_t) * argc);
  141. i = 0;
  142. if (envp)
  143. {
  144. while (envp[i] != 0)
  145. {
  146. size += (rt_strlen(envp[i]) + 1);
  147. size += sizeof(size_t);
  148. i++;
  149. }
  150. }
  151. /* for aux */
  152. size += sizeof(struct process_aux);
  153. if (size > ARCH_PAGE_SIZE)
  154. {
  155. return RT_NULL;
  156. }
  157. args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
  158. if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
  159. {
  160. return RT_NULL;
  161. }
  162. args_k = (size_t *)lwp_v2p(lwp, args);
  163. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  164. /* argc, argv[], 0, envp[], 0 , aux[] */
  165. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  166. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  167. new_argve = (char **)&args_k[1];
  168. args_k[0] = argc;
  169. for (i = 0; i < argc; i++)
  170. {
  171. len = rt_strlen(argv[i]) + 1;
  172. new_argve[i] = str;
  173. lwp_memcpy(str_k, argv[i], len);
  174. str += len;
  175. str_k += len;
  176. }
  177. new_argve[i] = 0;
  178. i++;
  179. new_argve[i] = 0;
  180. if (envp)
  181. {
  182. int j;
  183. for (j = 0; envp[j] != 0; j++)
  184. {
  185. len = rt_strlen(envp[j]) + 1;
  186. new_argve[i] = str;
  187. lwp_memcpy(str_k, envp[j], len);
  188. str += len;
  189. str_k += len;
  190. i++;
  191. }
  192. new_argve[i] = 0;
  193. }
  194. i++;
  195. /* aux */
  196. aux = (struct process_aux *)(new_argve + i);
  197. aux->item[0].key = AT_EXECFN;
  198. aux->item[0].value = (size_t)(size_t)new_argve[0];
  199. i += AUX_ARRAY_ITEMS_NR * 2;
  200. new_argve[i] = 0;
  201. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  202. lwp->args = args;
  203. return aux;
  204. }
  205. #else
  206. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  207. {
  208. #ifdef ARCH_MM_MMU
  209. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  210. struct process_aux *aux;
  211. #else
  212. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  213. #endif /* ARCH_MM_MMU */
  214. int *args;
  215. char *str;
  216. char **new_argve;
  217. int i;
  218. int len;
  219. for (i = 0; i < argc; i++)
  220. {
  221. size += (rt_strlen(argv[i]) + 1);
  222. }
  223. size += (sizeof(int) * argc);
  224. i = 0;
  225. if (envp)
  226. {
  227. while (envp[i] != 0)
  228. {
  229. size += (rt_strlen(envp[i]) + 1);
  230. size += sizeof(int);
  231. i++;
  232. }
  233. }
  234. #ifdef ARCH_MM_MMU
  235. /* for aux */
  236. size += sizeof(struct process_aux);
  237. args = (int *)rt_malloc(size);
  238. if (args == RT_NULL)
  239. {
  240. return RT_NULL;
  241. }
  242. /* argc, argv[], 0, envp[], 0 */
  243. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  244. #else
  245. args = (int *)rt_malloc(size);
  246. if (args == RT_NULL)
  247. {
  248. return RT_NULL;
  249. }
  250. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  251. #endif /* ARCH_MM_MMU */
  252. new_argve = (char **)&args[1];
  253. args[0] = argc;
  254. for (i = 0; i < argc; i++)
  255. {
  256. len = rt_strlen(argv[i]) + 1;
  257. new_argve[i] = str;
  258. lwp_memcpy(str, argv[i], len);
  259. str += len;
  260. }
  261. new_argve[i] = 0;
  262. i++;
  263. new_argve[i] = 0;
  264. if (envp)
  265. {
  266. int j;
  267. for (j = 0; envp[j] != 0; j++)
  268. {
  269. len = rt_strlen(envp[j]) + 1;
  270. new_argve[i] = str;
  271. lwp_memcpy(str, envp[j], len);
  272. str += len;
  273. i++;
  274. }
  275. new_argve[i] = 0;
  276. }
  277. #ifdef ARCH_MM_MMU
  278. /* aux */
  279. aux = (struct process_aux *)(new_argve + i);
  280. aux->item[0].key = AT_EXECFN;
  281. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  282. i += AUX_ARRAY_ITEMS_NR * 2;
  283. new_argve[i] = 0;
  284. lwp->args = args;
  285. return aux;
  286. #else
  287. lwp->args = args;
  288. lwp->args_length = size;
  289. return (struct process_aux *)(new_argve + i);
  290. #endif /* ARCH_MM_MMU */
  291. }
  292. #endif
  293. #ifdef ARCH_MM_MMU
  294. #define check_off(voff, vlen) \
  295. do \
  296. { \
  297. if (voff > vlen) \
  298. { \
  299. result = -RT_ERROR; \
  300. goto _exit; \
  301. } \
  302. } while (0)
  303. #define check_read(vrlen, vrlen_want) \
  304. do \
  305. { \
  306. if (vrlen < vrlen_want) \
  307. { \
  308. result = -RT_ERROR; \
  309. goto _exit; \
  310. } \
  311. } while (0)
  312. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  313. {
  314. size_t read_block = 0;
  315. while (nmemb)
  316. {
  317. size_t count;
  318. count = read(fd, ptr, size * nmemb) / size;
  319. if (count < nmemb)
  320. {
  321. LOG_E("ERROR: file size error!");
  322. break;
  323. }
  324. ptr = (void *)((uint8_t *)ptr + (count * size));
  325. nmemb -= count;
  326. read_block += count;
  327. }
  328. return read_block;
  329. }
  330. typedef struct
  331. {
  332. Elf_Word st_name;
  333. Elf_Addr st_value;
  334. Elf_Word st_size;
  335. unsigned char st_info;
  336. unsigned char st_other;
  337. Elf_Half st_shndx;
  338. } Elf_sym;
  339. #ifdef ARCH_MM_MMU
  340. struct map_range
  341. {
  342. void *start;
  343. size_t size;
  344. };
  345. static void expand_map_range(struct map_range *m, void *start, size_t size)
  346. {
  347. if (!m->start)
  348. {
  349. m->start = start;
  350. m->size = size;
  351. }
  352. else
  353. {
  354. void *end = (void *)((char*)start + size);
  355. void *mend = (void *)((char*)m->start + m->size);
  356. if (m->start > start)
  357. {
  358. m->start = start;
  359. }
  360. if (mend < end)
  361. {
  362. mend = end;
  363. }
  364. m->size = (char *)mend - (char *)m->start;
  365. }
  366. }
  367. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  368. {
  369. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  370. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  371. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  372. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  373. if (m1->size)
  374. {
  375. if (m1_start < (void *)USER_LOAD_VADDR)
  376. {
  377. return -1;
  378. }
  379. if (m1_start > (void *)USER_STACK_VSTART)
  380. {
  381. return -1;
  382. }
  383. if (m1_end < (void *)USER_LOAD_VADDR)
  384. {
  385. return -1;
  386. }
  387. if (m1_end > (void *)USER_STACK_VSTART)
  388. {
  389. return -1;
  390. }
  391. }
  392. if (m2->size)
  393. {
  394. if (m2_start < (void *)USER_LOAD_VADDR)
  395. {
  396. return -1;
  397. }
  398. if (m2_start > (void *)USER_STACK_VSTART)
  399. {
  400. return -1;
  401. }
  402. if (m2_end < (void *)USER_LOAD_VADDR)
  403. {
  404. return -1;
  405. }
  406. if (m2_end > (void *)USER_STACK_VSTART)
  407. {
  408. return -1;
  409. }
  410. }
  411. if ((m1->size != 0) && (m2->size != 0))
  412. {
  413. if (m1_start < m2_start)
  414. {
  415. if (m1_end > m2_start)
  416. {
  417. return -1;
  418. }
  419. }
  420. else /* m2_start <= m1_start */
  421. {
  422. if (m2_end > m1_start)
  423. {
  424. return -1;
  425. }
  426. }
  427. }
  428. return 0;
  429. }
  430. #endif
  431. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  432. {
  433. uint32_t i;
  434. uint32_t off = 0;
  435. size_t load_off = 0;
  436. char *p_section_str = 0;
  437. Elf_sym *dynsym = 0;
  438. Elf_Ehdr eheader;
  439. Elf_Phdr pheader;
  440. Elf_Shdr sheader;
  441. int result = RT_EOK;
  442. uint32_t magic;
  443. size_t read_len;
  444. void *got_start = 0;
  445. size_t got_size = 0;
  446. void *rel_dyn_start = 0;
  447. size_t rel_dyn_size = 0;
  448. size_t dynsym_off = 0;
  449. size_t dynsym_size = 0;
  450. #ifdef ARCH_MM_MMU
  451. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  452. void *pa, *va;
  453. void *va_self;
  454. #endif
  455. if (len < sizeof eheader)
  456. {
  457. LOG_E("len < sizeof eheader!");
  458. return -RT_ERROR;
  459. }
  460. lseek(fd, 0, SEEK_SET);
  461. read_len = load_fread(&magic, 1, sizeof magic, fd);
  462. check_read(read_len, sizeof magic);
  463. if (memcmp(elf_magic, &magic, 4) != 0)
  464. {
  465. LOG_E("elf_magic not same, magic:0x%x!", magic);
  466. return -RT_ERROR;
  467. }
  468. lseek(fd, off, SEEK_SET);
  469. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  470. check_read(read_len, sizeof eheader);
  471. #ifndef ARCH_CPU_64BIT
  472. if (eheader.e_ident[4] != 1)
  473. { /* not 32bit */
  474. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  475. return -RT_ERROR;
  476. }
  477. #else
  478. if (eheader.e_ident[4] != 2)
  479. { /* not 64bit */
  480. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  481. return -RT_ERROR;
  482. }
  483. #endif
  484. if (eheader.e_ident[6] != 1)
  485. { /* ver not 1 */
  486. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  487. return -RT_ERROR;
  488. }
  489. if ((eheader.e_type != ET_DYN)
  490. #ifdef ARCH_MM_MMU
  491. && (eheader.e_type != ET_EXEC)
  492. #endif
  493. )
  494. {
  495. /* not pie or exec elf */
  496. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  497. return -RT_ERROR;
  498. }
  499. #ifdef ARCH_MM_MMU
  500. {
  501. off = eheader.e_phoff;
  502. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  503. {
  504. check_off(off, len);
  505. lseek(fd, off, SEEK_SET);
  506. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  507. check_read(read_len, sizeof pheader);
  508. if (pheader.p_type == PT_DYNAMIC)
  509. {
  510. /* load ld.so */
  511. return 1; /* 1 means dynamic */
  512. }
  513. }
  514. }
  515. #endif
  516. if (eheader.e_entry != 0)
  517. {
  518. if ((eheader.e_entry != USER_LOAD_VADDR)
  519. && (eheader.e_entry != LDSO_LOAD_VADDR))
  520. {
  521. /* the entry is invalidate */
  522. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  523. return -RT_ERROR;
  524. }
  525. }
  526. { /* load aux */
  527. uint8_t *process_header;
  528. size_t process_header_size;
  529. off = eheader.e_phoff;
  530. process_header_size = eheader.e_phnum * sizeof pheader;
  531. #ifdef ARCH_MM_MMU
  532. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  533. {
  534. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  535. return -RT_ERROR;
  536. }
  537. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  538. if (!va)
  539. {
  540. LOG_E("lwp map user failed!");
  541. return -RT_ERROR;
  542. }
  543. pa = lwp_v2p(lwp, va);
  544. process_header = (uint8_t *)pa - PV_OFFSET;
  545. #else
  546. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  547. if (!process_header)
  548. {
  549. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  550. return -RT_ERROR;
  551. }
  552. #endif
  553. check_off(off, len);
  554. lseek(fd, off, SEEK_SET);
  555. read_len = load_fread(process_header, 1, process_header_size, fd);
  556. check_read(read_len, process_header_size);
  557. #ifdef ARCH_MM_MMU
  558. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  559. #endif
  560. aux->item[1].key = AT_PAGESZ;
  561. #ifdef ARCH_MM_MMU
  562. aux->item[1].value = ARCH_PAGE_SIZE;
  563. #else
  564. aux->item[1].value = RT_MM_PAGE_SIZE;
  565. #endif
  566. aux->item[2].key = AT_RANDOM;
  567. {
  568. uint32_t random_value = rt_tick_get();
  569. uint8_t *random;
  570. #ifdef ARCH_MM_MMU
  571. uint8_t *krandom;
  572. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  573. krandom = (uint8_t *)lwp_v2p(lwp, random);
  574. krandom = (uint8_t *)krandom - PV_OFFSET;
  575. rt_memcpy(krandom, &random_value, sizeof random_value);
  576. #else
  577. random = (uint8_t *)(process_header + process_header_size);
  578. rt_memcpy(random, &random_value, sizeof random_value);
  579. #endif
  580. aux->item[2].value = (size_t)random;
  581. }
  582. aux->item[3].key = AT_PHDR;
  583. #ifdef ARCH_MM_MMU
  584. aux->item[3].value = (size_t)va;
  585. #else
  586. aux->item[3].value = (size_t)process_header;
  587. #endif
  588. aux->item[4].key = AT_PHNUM;
  589. aux->item[4].value = eheader.e_phnum;
  590. aux->item[5].key = AT_PHENT;
  591. aux->item[5].value = sizeof pheader;
  592. #ifdef ARCH_MM_MMU
  593. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  594. #endif
  595. }
  596. if (load_addr)
  597. {
  598. load_off = (size_t)load_addr;
  599. }
  600. #ifdef ARCH_MM_MMU
  601. else
  602. {
  603. /* map user */
  604. off = eheader.e_shoff;
  605. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  606. {
  607. check_off(off, len);
  608. lseek(fd, off, SEEK_SET);
  609. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  610. check_read(read_len, sizeof sheader);
  611. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  612. {
  613. continue;
  614. }
  615. switch (sheader.sh_type)
  616. {
  617. case SHT_PROGBITS:
  618. if ((sheader.sh_flags & SHF_WRITE) == 0)
  619. {
  620. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  621. }
  622. else
  623. {
  624. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  625. }
  626. break;
  627. case SHT_NOBITS:
  628. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  629. break;
  630. default:
  631. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  632. break;
  633. }
  634. }
  635. if (user_area[0].size == 0)
  636. {
  637. /* no code */
  638. result = -RT_ERROR;
  639. goto _exit;
  640. }
  641. if (user_area[0].start == NULL)
  642. {
  643. /* DYN */
  644. load_off = USER_LOAD_VADDR;
  645. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  646. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  647. }
  648. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  649. {
  650. result = -RT_ERROR;
  651. goto _exit;
  652. }
  653. /* text and data */
  654. for (i = 0; i < 2; i++)
  655. {
  656. if (user_area[i].size != 0)
  657. {
  658. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  659. if (!va || (va != user_area[i].start))
  660. {
  661. result = -RT_ERROR;
  662. goto _exit;
  663. }
  664. }
  665. }
  666. lwp->text_size = user_area[0].size;
  667. }
  668. #else
  669. else
  670. {
  671. size_t start = -1UL;
  672. size_t end = 0UL;
  673. size_t total_size;
  674. off = eheader.e_shoff;
  675. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  676. {
  677. check_off(off, len);
  678. lseek(fd, off, SEEK_SET);
  679. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  680. check_read(read_len, sizeof sheader);
  681. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  682. {
  683. continue;
  684. }
  685. switch (sheader.sh_type)
  686. {
  687. case SHT_PROGBITS:
  688. case SHT_NOBITS:
  689. if (start > sheader.sh_addr)
  690. {
  691. start = sheader.sh_addr;
  692. }
  693. if (sheader.sh_addr + sheader.sh_size > end)
  694. {
  695. end = sheader.sh_addr + sheader.sh_size;
  696. }
  697. break;
  698. default:
  699. break;
  700. }
  701. }
  702. total_size = end - start;
  703. #ifdef RT_USING_CACHE
  704. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  705. #else
  706. load_off = (size_t)rt_malloc(total_size);
  707. #endif
  708. if (load_off == 0)
  709. {
  710. LOG_E("alloc text memory faild!");
  711. result = -RT_ENOMEM;
  712. goto _exit;
  713. }
  714. else
  715. {
  716. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  717. }
  718. lwp->load_off = load_off; /* for free */
  719. lwp->text_size = total_size;
  720. }
  721. #endif
  722. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  723. off = eheader.e_phoff;
  724. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  725. {
  726. check_off(off, len);
  727. lseek(fd, off, SEEK_SET);
  728. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  729. check_read(read_len, sizeof pheader);
  730. if (pheader.p_type == PT_LOAD)
  731. {
  732. if (pheader.p_filesz > pheader.p_memsz)
  733. {
  734. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  735. return -RT_ERROR;
  736. }
  737. check_off(pheader.p_offset, len);
  738. lseek(fd, pheader.p_offset, SEEK_SET);
  739. #ifdef ARCH_MM_MMU
  740. {
  741. uint32_t size = pheader.p_filesz;
  742. size_t tmp_len = 0;
  743. va = (void *)(pheader.p_vaddr + load_addr);
  744. read_len = 0;
  745. while (size)
  746. {
  747. pa = lwp_v2p(lwp, va);
  748. va_self = (void *)((char *)pa - PV_OFFSET);
  749. LOG_D("va_self = %p pa = %p", va_self, pa);
  750. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  751. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  752. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  753. read_len += tmp_len;
  754. size -= tmp_len;
  755. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  756. }
  757. }
  758. #else
  759. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  760. #endif
  761. check_read(read_len, pheader.p_filesz);
  762. if (pheader.p_filesz < pheader.p_memsz)
  763. {
  764. #ifdef ARCH_MM_MMU
  765. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  766. uint32_t size_s;
  767. uint32_t off;
  768. off = pheader.p_filesz & ARCH_PAGE_MASK;
  769. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  770. while (size)
  771. {
  772. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  773. pa = lwp_v2p(lwp, va);
  774. va_self = (void *)((char *)pa - PV_OFFSET);
  775. memset((void *)((char *)va_self + off), 0, size_s);
  776. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  777. off = 0;
  778. size -= size_s;
  779. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  780. }
  781. #else
  782. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  783. #endif
  784. }
  785. }
  786. }
  787. /* relocate */
  788. if (eheader.e_type == ET_DYN)
  789. {
  790. /* section info */
  791. off = eheader.e_shoff;
  792. /* find section string table */
  793. check_off(off, len);
  794. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  795. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  796. check_read(read_len, sizeof sheader);
  797. p_section_str = (char *)rt_malloc(sheader.sh_size);
  798. if (!p_section_str)
  799. {
  800. LOG_E("out of memory!");
  801. result = -ENOMEM;
  802. goto _exit;
  803. }
  804. check_off(sheader.sh_offset, len);
  805. lseek(fd, sheader.sh_offset, SEEK_SET);
  806. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  807. check_read(read_len, sheader.sh_size);
  808. check_off(off, len);
  809. lseek(fd, off, SEEK_SET);
  810. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  811. {
  812. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  813. check_read(read_len, sizeof sheader);
  814. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  815. {
  816. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  817. got_size = (size_t)sheader.sh_size;
  818. }
  819. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  820. {
  821. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  822. rel_dyn_size = (size_t)sheader.sh_size;
  823. }
  824. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  825. {
  826. dynsym_off = (size_t)sheader.sh_offset;
  827. dynsym_size = (size_t)sheader.sh_size;
  828. }
  829. }
  830. /* reloc */
  831. if (dynsym_size)
  832. {
  833. dynsym = rt_malloc(dynsym_size);
  834. if (!dynsym)
  835. {
  836. LOG_E("ERROR: Malloc error!");
  837. result = -ENOMEM;
  838. goto _exit;
  839. }
  840. check_off(dynsym_off, len);
  841. lseek(fd, dynsym_off, SEEK_SET);
  842. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  843. check_read(read_len, dynsym_size);
  844. }
  845. #ifdef ARCH_MM_MMU
  846. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  847. #else
  848. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  849. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  850. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  851. #endif
  852. }
  853. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  854. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  855. _exit:
  856. if (dynsym)
  857. {
  858. rt_free(dynsym);
  859. }
  860. if (p_section_str)
  861. {
  862. rt_free(p_section_str);
  863. }
  864. if (result != RT_EOK)
  865. {
  866. LOG_E("lwp load faild, %d", result);
  867. }
  868. return result;
  869. }
  870. #endif /* ARCH_MM_MMU */
  871. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  872. {
  873. uint8_t *ptr;
  874. int ret = -1;
  875. int len;
  876. int fd = -1;
  877. /* check file name */
  878. RT_ASSERT(filename != RT_NULL);
  879. /* check lwp control block */
  880. RT_ASSERT(lwp != RT_NULL);
  881. /* copy file name to process name */
  882. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  883. if (load_addr != RT_NULL)
  884. {
  885. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  886. ptr = load_addr;
  887. }
  888. else
  889. {
  890. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  891. ptr = RT_NULL;
  892. }
  893. fd = open(filename, O_BINARY | O_RDONLY, 0);
  894. if (fd < 0)
  895. {
  896. LOG_E("ERROR: Can't open elf file %s!", filename);
  897. goto out;
  898. }
  899. len = lseek(fd, 0, SEEK_END);
  900. if (len < 0)
  901. {
  902. LOG_E("ERROR: File %s size error!", filename);
  903. goto out;
  904. }
  905. lseek(fd, 0, SEEK_SET);
  906. ret = load_elf(fd, len, lwp, ptr, aux);
  907. if ((ret != RT_EOK) && (ret != 1))
  908. {
  909. LOG_E("lwp load ret = %d", ret);
  910. }
  911. out:
  912. if (fd > 0)
  913. {
  914. close(fd);
  915. }
  916. return ret;
  917. }
  918. /* lwp thread clean up */
  919. void lwp_cleanup(struct rt_thread *tid)
  920. {
  921. rt_base_t level;
  922. struct rt_lwp *lwp;
  923. if (tid == NULL)
  924. {
  925. LOG_I("%s: invalid parameter tid == NULL", __func__);
  926. return;
  927. }
  928. else
  929. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  930. level = rt_hw_interrupt_disable();
  931. lwp = (struct rt_lwp *)tid->lwp;
  932. /* lwp thread cleanup */
  933. lwp_tid_put(tid->tid);
  934. rt_list_remove(&tid->sibling);
  935. lwp_thread_signal_detach(&tid->signal);
  936. rt_hw_interrupt_enable(level);
  937. /* tty will be release in lwp_ref_dec() if ref is cleared */
  938. lwp_ref_dec(lwp);
  939. return;
  940. }
  941. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  942. {
  943. struct dfs_file *d;
  944. struct dfs_fdtable *lwp_fdt;
  945. lwp_fdt = &lwp->fdt;
  946. /* init 4 fds */
  947. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  948. if (lwp_fdt->fds)
  949. {
  950. lwp_fdt->maxfd = 4;
  951. d = fd_get(0);
  952. fd_associate(lwp_fdt, 0, d);
  953. d = fd_get(1);
  954. fd_associate(lwp_fdt, 1, d);
  955. d = fd_get(2);
  956. fd_associate(lwp_fdt, 2, d);
  957. }
  958. return;
  959. }
  960. static void _lwp_thread_entry(void *parameter)
  961. {
  962. rt_thread_t tid;
  963. struct rt_lwp *lwp;
  964. tid = rt_thread_self();
  965. lwp = (struct rt_lwp *)tid->lwp;
  966. tid->cleanup = lwp_cleanup;
  967. tid->user_stack = RT_NULL;
  968. if (lwp->debug)
  969. {
  970. lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
  971. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  972. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  973. icache_invalid_all();
  974. }
  975. /**
  976. * without ASID support, it will be a special case when trying to run application
  977. * and exit multiple times and a same page frame allocated to it bound to
  978. * different text segment. Then we are in a situation where icache contains
  979. * out-of-dated data and must be handle by the running core itself.
  980. * with ASID support, this should be a rare case that ASID & page frame both
  981. * identical to previous running application.
  982. *
  983. * For a new application loaded into memory, icache are seen as empty. And there
  984. * should be nothing in the icache entry to match. So this icache invalidation
  985. * operation should have barely influence.
  986. */
  987. rt_hw_icache_invalidate_all();
  988. #ifdef ARCH_MM_MMU
  989. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  990. #else
  991. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  992. #endif /* ARCH_MM_MMU */
  993. }
  994. struct rt_lwp *lwp_self(void)
  995. {
  996. rt_thread_t tid;
  997. tid = rt_thread_self();
  998. if (tid)
  999. {
  1000. return (struct rt_lwp *)tid->lwp;
  1001. }
  1002. return RT_NULL;
  1003. }
  1004. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1005. {
  1006. int result;
  1007. rt_base_t level;
  1008. struct rt_lwp *lwp;
  1009. char *thread_name;
  1010. char *argv_last = argv[argc - 1];
  1011. int bg = 0;
  1012. struct process_aux *aux;
  1013. int tid = 0;
  1014. int ret;
  1015. if (filename == RT_NULL)
  1016. {
  1017. return -RT_ERROR;
  1018. }
  1019. if (access(filename, X_OK) != 0)
  1020. {
  1021. return -EACCES;
  1022. }
  1023. lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID);
  1024. if (lwp == RT_NULL)
  1025. {
  1026. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1027. return -RT_ENOMEM;
  1028. }
  1029. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1030. if ((tid = lwp_tid_get()) == 0)
  1031. {
  1032. lwp_ref_dec(lwp);
  1033. return -ENOMEM;
  1034. }
  1035. #ifdef ARCH_MM_MMU
  1036. if (lwp_user_space_init(lwp, 0) != 0)
  1037. {
  1038. lwp_tid_put(tid);
  1039. lwp_ref_dec(lwp);
  1040. return -ENOMEM;
  1041. }
  1042. #endif
  1043. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1044. {
  1045. argc--;
  1046. bg = 1;
  1047. }
  1048. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1049. {
  1050. lwp_tid_put(tid);
  1051. lwp_ref_dec(lwp);
  1052. return -ENOMEM;
  1053. }
  1054. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1055. #ifdef ARCH_MM_MMU
  1056. if (result == 1)
  1057. {
  1058. /* dynmaic */
  1059. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1060. result = load_ldso(lwp, filename, argv, envp);
  1061. }
  1062. #endif /* ARCH_MM_MMU */
  1063. if (result == RT_EOK)
  1064. {
  1065. rt_thread_t thread = RT_NULL;
  1066. rt_uint32_t priority = 25, tick = 200;
  1067. lwp_copy_stdio_fdt(lwp);
  1068. /* obtain the base name */
  1069. thread_name = strrchr(filename, '/');
  1070. thread_name = thread_name ? thread_name + 1 : filename;
  1071. #ifndef ARCH_MM_MMU
  1072. struct lwp_app_head *app_head = lwp->text_entry;
  1073. if (app_head->priority)
  1074. {
  1075. priority = app_head->priority;
  1076. }
  1077. if (app_head->tick)
  1078. {
  1079. tick = app_head->tick;
  1080. }
  1081. #endif /* not defined ARCH_MM_MMU */
  1082. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1083. LWP_TASK_STACK_SIZE, priority, tick);
  1084. if (thread != RT_NULL)
  1085. {
  1086. struct rt_lwp *self_lwp;
  1087. thread->tid = tid;
  1088. lwp_tid_set_thread(tid, thread);
  1089. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1090. (rt_size_t)thread->stack_addr + thread->stack_size);
  1091. level = rt_hw_interrupt_disable();
  1092. self_lwp = lwp_self();
  1093. if (self_lwp)
  1094. {
  1095. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1096. lwp->__pgrp = tid;
  1097. lwp->session = self_lwp->session;
  1098. /* lwp add to children link */
  1099. lwp->sibling = self_lwp->first_child;
  1100. self_lwp->first_child = lwp;
  1101. lwp->parent = self_lwp;
  1102. }
  1103. else
  1104. {
  1105. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1106. lwp->__pgrp = tid;
  1107. }
  1108. if (!bg)
  1109. {
  1110. if (lwp->session == -1)
  1111. {
  1112. struct tty_struct *tty = RT_NULL;
  1113. struct rt_lwp *old_lwp;
  1114. tty = (struct tty_struct *)console_tty_get();
  1115. old_lwp = tty->foreground;
  1116. if (old_lwp)
  1117. {
  1118. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1119. ret = tty_push(&tty->head, old_lwp);
  1120. rt_mutex_release(&tty->lock);
  1121. if (ret < 0)
  1122. {
  1123. lwp_tid_put(tid);
  1124. lwp_ref_dec(lwp);
  1125. LOG_E("malloc fail!\n");
  1126. return -ENOMEM;
  1127. }
  1128. }
  1129. lwp->tty = tty;
  1130. lwp->tty->pgrp = lwp->__pgrp;
  1131. lwp->tty->session = lwp->session;
  1132. lwp->tty->foreground = lwp;
  1133. tcgetattr(1, &stdin_termios);
  1134. old_stdin_termios = stdin_termios;
  1135. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1136. tcsetattr(1, 0, &stdin_termios);
  1137. }
  1138. else
  1139. {
  1140. if (self_lwp != RT_NULL)
  1141. {
  1142. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1143. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1144. rt_mutex_release(&self_lwp->tty->lock);
  1145. if (ret < 0)
  1146. {
  1147. lwp_tid_put(tid);
  1148. lwp_ref_dec(lwp);
  1149. LOG_E("malloc fail!\n");
  1150. return -ENOMEM;
  1151. }
  1152. lwp->tty = self_lwp->tty;
  1153. lwp->tty->pgrp = lwp->__pgrp;
  1154. lwp->tty->session = lwp->session;
  1155. lwp->tty->foreground = lwp;
  1156. }
  1157. else
  1158. {
  1159. lwp->tty = RT_NULL;
  1160. }
  1161. }
  1162. }
  1163. else
  1164. {
  1165. lwp->background = RT_TRUE;
  1166. }
  1167. thread->lwp = lwp;
  1168. #ifndef ARCH_MM_MMU
  1169. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1170. thread->user_stack = app_head->stack_offset ?
  1171. (void *)(app_head->stack_offset -
  1172. app_head->data_offset +
  1173. (uint32_t)lwp->data_entry) : RT_NULL;
  1174. thread->user_stack_size = app_head->stack_size;
  1175. /* init data area */
  1176. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1177. /* init user stack */
  1178. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1179. #endif /* not defined ARCH_MM_MMU */
  1180. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1181. if (debug && rt_dbg_ops)
  1182. {
  1183. lwp->debug = debug;
  1184. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1185. }
  1186. rt_hw_interrupt_enable(level);
  1187. rt_thread_startup(thread);
  1188. return lwp_to_pid(lwp);
  1189. }
  1190. }
  1191. lwp_tid_put(tid);
  1192. lwp_ref_dec(lwp);
  1193. return -RT_ERROR;
  1194. }
  1195. #ifdef RT_USING_MUSLLIBC
  1196. extern char **__environ;
  1197. #else
  1198. char **__environ = 0;
  1199. #endif
  1200. pid_t exec(char *filename, int debug, int argc, char **argv)
  1201. {
  1202. setenv("OS", "RT-Thread", 1);
  1203. return lwp_execve(filename, debug, argc, argv, __environ);
  1204. }
  1205. #ifdef ARCH_MM_MMU
  1206. void lwp_user_setting_save(rt_thread_t thread)
  1207. {
  1208. if (thread)
  1209. {
  1210. thread->thread_idr = arch_get_tidr();
  1211. }
  1212. }
  1213. void lwp_user_setting_restore(rt_thread_t thread)
  1214. {
  1215. if (!thread)
  1216. {
  1217. return;
  1218. }
  1219. #if !defined(ARCH_RISCV64)
  1220. /* tidr will be set in RESTORE_ALL in risc-v */
  1221. arch_set_tidr(thread->thread_idr);
  1222. #endif
  1223. if (rt_dbg_ops)
  1224. {
  1225. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1226. if (l != 0)
  1227. {
  1228. rt_hw_set_process_id((size_t)l->pid);
  1229. }
  1230. else
  1231. {
  1232. rt_hw_set_process_id(0);
  1233. }
  1234. if (l && l->debug)
  1235. {
  1236. uint32_t step_type = 0;
  1237. step_type = dbg_step_type();
  1238. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1239. {
  1240. dbg_activate_step();
  1241. }
  1242. else
  1243. {
  1244. dbg_deactivate_step();
  1245. }
  1246. }
  1247. }
  1248. }
  1249. #endif /* ARCH_MM_MMU */
  1250. void lwp_uthread_ctx_save(void *ctx)
  1251. {
  1252. rt_thread_t thread;
  1253. thread = rt_thread_self();
  1254. thread->user_ctx.ctx = ctx;
  1255. }
  1256. void lwp_uthread_ctx_restore(void)
  1257. {
  1258. rt_thread_t thread;
  1259. thread = rt_thread_self();
  1260. thread->user_ctx.ctx = RT_NULL;
  1261. }
  1262. rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame)
  1263. {
  1264. rt_err_t rc = -RT_ERROR;
  1265. long nesting = 0;
  1266. char **argv;
  1267. rt_lwp_t lwp;
  1268. if (uthread->lwp)
  1269. {
  1270. lwp = uthread->lwp;
  1271. argv = lwp_get_command_line_args(lwp);
  1272. if (argv)
  1273. {
  1274. LOG_RAW("please use: addr2line -e %s -a -f", argv[0]);
  1275. lwp_free_command_line_args(argv);
  1276. }
  1277. else
  1278. {
  1279. LOG_RAW("please use: addr2line -e %s -a -f", lwp->cmd);
  1280. }
  1281. while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
  1282. {
  1283. LOG_RAW(" 0x%lx", frame->pc);
  1284. if (rt_hw_backtrace_frame_unwind(uthread, frame))
  1285. {
  1286. break;
  1287. }
  1288. nesting++;
  1289. }
  1290. LOG_RAW("\n");
  1291. rc = RT_EOK;
  1292. }
  1293. return rc;
  1294. }
  1295. void rt_update_process_times(void)
  1296. {
  1297. struct rt_thread *thread;
  1298. #ifdef RT_USING_SMP
  1299. struct rt_cpu* pcpu;
  1300. pcpu = rt_cpu_self();
  1301. #endif
  1302. thread = rt_thread_self();
  1303. if (!IS_USER_MODE(thread))
  1304. {
  1305. thread->user_time += 1;
  1306. #ifdef RT_USING_SMP
  1307. pcpu->cpu_stat.user += 1;
  1308. #endif
  1309. }
  1310. else
  1311. {
  1312. thread->system_time += 1;
  1313. #ifdef RT_USING_SMP
  1314. if (thread == pcpu->idle_thread)
  1315. {
  1316. pcpu->cpu_stat.idle += 1;
  1317. }
  1318. else
  1319. {
  1320. pcpu->cpu_stat.system += 1;
  1321. }
  1322. #endif
  1323. }
  1324. }