module.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534
  1. /*
  2. * File : module.c
  3. * This file is part of RT-Thread RTOS
  4. * COPYRIGHT (C) 2006 - 2011, RT-Thread Development Team
  5. *
  6. * The license and distribution terms for this file may be
  7. * found in the file LICENSE in this distribution or at
  8. * http://www.rt-thread.org/license/LICENSE
  9. *
  10. * Change Logs:
  11. * Date Author Notes
  12. * 2010-01-09 Bernard first version
  13. * 2010-04-09 yi.qiu implement based on first version
  14. * 2010-10-23 yi.qiu implement module memory allocator
  15. * 2011-05-25 yi.qiu implement module hook function
  16. * 2011-06-23 yi.qiu rewrite module memory allocator
  17. */
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #include <rtm.h>
  21. #include "string.h"
  22. #include "kservice.h"
  23. #ifdef RT_USING_MODULE
  24. #include "module.h"
  25. #define elf_module ((Elf32_Ehdr *)module_ptr)
  26. #define shdr ((Elf32_Shdr *)((rt_uint8_t *)module_ptr + elf_module->e_shoff))
  27. #define phdr ((Elf32_Phdr *)((rt_uint8_t *)module_ptr + elf_module->e_phoff))
  28. #define IS_PROG(s) (s.sh_type == SHT_PROGBITS)
  29. #define IS_NOPROG(s) (s.sh_type == SHT_NOBITS)
  30. #define IS_REL(s) (s.sh_type == SHT_REL)
  31. #define IS_RELA(s) (s.sh_type == SHT_RELA)
  32. #define IS_ALLOC(s) (s.sh_flags == SHF_ALLOC)
  33. #define IS_AX(s) ((s.sh_flags & SHF_ALLOC) && (s.sh_flags & SHF_EXECINSTR))
  34. #define IS_AW(s) ((s.sh_flags & SHF_ALLOC) && (s.sh_flags & SHF_WRITE))
  35. #define PAGE_COUNT_MAX 256
  36. /* module memory allocator */
  37. struct rt_mem_head
  38. {
  39. rt_size_t size; /* size of memory block */
  40. struct rt_mem_head *next; /* next valid memory block */
  41. };
  42. struct rt_page_info
  43. {
  44. rt_uint32_t *page_ptr;
  45. rt_uint32_t npage;
  46. };
  47. #ifdef RT_USING_SLAB
  48. static void *rt_module_malloc_page(rt_size_t npages);
  49. static void rt_module_free_page(rt_module_t module, void *page_ptr, rt_size_t npages);
  50. #endif
  51. static rt_module_t rt_current_module = RT_NULL;
  52. static struct rt_semaphore mod_sem;
  53. static struct rt_module_symtab *_rt_module_symtab_begin = RT_NULL, *_rt_module_symtab_end = RT_NULL;
  54. rt_list_t rt_module_symbol_list;
  55. static char *_strip_name(const char *string)
  56. {
  57. int i = 0, p = 0, q = 0;
  58. const char *str = string;
  59. char *dest = RT_NULL;
  60. while (*str != '\n' && *str != '\0')
  61. {
  62. if (*str =='/' ) p = i + 1;
  63. if (*str == '.') q = i;
  64. str++; i++;
  65. }
  66. if (p < q)
  67. {
  68. int len = q - p;
  69. dest = (char *)rt_malloc(len + 1);
  70. rt_strncpy(dest, &string[p], len);
  71. dest[len] = '\0';
  72. }
  73. return dest;
  74. }
  75. /**
  76. * @ingroup SystemInit
  77. *
  78. * This function will initialize system module
  79. *
  80. */
  81. void rt_system_module_init(void)
  82. {
  83. #ifdef __GNUC__
  84. extern int __rtmsymtab_start;
  85. extern int __rtmsymtab_end;
  86. _rt_module_symtab_begin = (struct rt_module_symtab *)&__rtmsymtab_start;
  87. _rt_module_symtab_end = (struct rt_module_symtab *)&__rtmsymtab_end;
  88. #elif defined (__CC_ARM)
  89. extern int RTMSymTab$$Base;
  90. extern int RTMSymTab$$Limit;
  91. _rt_module_symtab_begin = (struct rt_module_symtab *)&RTMSymTab$$Base;
  92. _rt_module_symtab_end = (struct rt_module_symtab *)&RTMSymTab$$Limit;
  93. #endif
  94. rt_list_init(&rt_module_symbol_list);
  95. /* initialize heap semaphore */
  96. rt_sem_init(&mod_sem, "module", 1, RT_IPC_FLAG_FIFO);
  97. /* init current module */
  98. rt_current_module = RT_NULL;
  99. }
  100. static rt_uint32_t rt_module_symbol_find(const char *sym_str)
  101. {
  102. /* find in kernel symbol table */
  103. struct rt_module_symtab *index;
  104. for (index = _rt_module_symtab_begin; index != _rt_module_symtab_end; index ++)
  105. {
  106. if (rt_strcmp(index->name, sym_str) == 0)
  107. return (rt_uint32_t)index->addr;
  108. }
  109. return 0;
  110. }
  111. /**
  112. * This function will return self module object
  113. *
  114. * @return the self module object
  115. *
  116. */
  117. rt_module_t rt_module_self(void)
  118. {
  119. /* return current module */
  120. return rt_current_module;
  121. }
  122. /**
  123. * This function will set current module object
  124. *
  125. * @return RT_EOK
  126. */
  127. rt_err_t rt_module_set(rt_module_t module)
  128. {
  129. /* set current module */
  130. rt_current_module = module;
  131. return RT_EOK;
  132. }
  133. static int rt_module_arm_relocate(struct rt_module *module, Elf32_Rel *rel, Elf32_Addr sym_val)
  134. {
  135. Elf32_Addr *where, tmp;
  136. Elf32_Sword addend, offset;
  137. rt_uint32_t upper, lower, sign, j1, j2;
  138. where = (Elf32_Addr *)((rt_uint8_t *)module->module_space + rel->r_offset);
  139. switch (ELF32_R_TYPE(rel->r_info))
  140. {
  141. case R_ARM_NONE:
  142. break;
  143. case R_ARM_ABS32:
  144. *where += (Elf32_Addr)sym_val;
  145. RT_DEBUG_LOG(RT_DEBUG_MODULE, ("R_ARM_ABS32: %x -> %x\n", where, *where));
  146. break;
  147. case R_ARM_PC24:
  148. case R_ARM_PLT32:
  149. case R_ARM_CALL:
  150. case R_ARM_JUMP24:
  151. addend = *where & 0x00ffffff;
  152. if (addend & 0x00800000)
  153. addend |= 0xff000000;
  154. tmp = sym_val - (Elf32_Addr)where + (addend << 2);
  155. tmp >>= 2;
  156. *where = (*where & 0xff000000) | (tmp & 0x00ffffff);
  157. RT_DEBUG_LOG(RT_DEBUG_MODULE, ("R_ARM_PC24: %x -> %x\n", where, *where));
  158. break;
  159. case R_ARM_REL32:
  160. *where += sym_val - (Elf32_Addr)where;
  161. RT_DEBUG_LOG(RT_DEBUG_MODULE,("R_ARM_REL32: %x -> %x, sym %x, offset %x\n", where, *where, sym_val, rel->r_offset));
  162. break;
  163. case R_ARM_V4BX:
  164. *where &= 0xf000000f;
  165. *where |= 0x01a0f000;
  166. break;
  167. case R_ARM_GLOB_DAT:
  168. case R_ARM_JUMP_SLOT:
  169. *where = (Elf32_Addr)sym_val;
  170. RT_DEBUG_LOG(RT_DEBUG_MODULE,
  171. ("R_ARM_JUMP_SLOT: 0x%x -> 0x%x 0x%x\n", where, *where, sym_val));
  172. break;
  173. #if 0 /* To do */
  174. case R_ARM_GOT_BREL:
  175. temp = (Elf32_Addr)sym_val;
  176. *where = (Elf32_Addr)&temp;
  177. RT_DEBUG_LOG(RT_DEBUG_MODULE,
  178. ("R_ARM_GOT_BREL: 0x%x -> 0x%x 0x%x\n", where, *where, sym_val));
  179. break;
  180. #endif
  181. case R_ARM_RELATIVE:
  182. *where += (Elf32_Addr)sym_val;
  183. //RT_DEBUG_LOG(RT_DEBUG_MODULE,
  184. //("R_ARM_RELATIVE: 0x%x -> 0x%x 0x%x\n", where, *where, sym_val));
  185. break;
  186. case R_ARM_THM_CALL:
  187. case R_ARM_THM_JUMP24:
  188. upper = *(rt_uint16_t *)where;
  189. lower = *(rt_uint16_t *)((Elf32_Addr)where + 2);
  190. sign = (upper >> 10) & 1;
  191. j1 = (lower >> 13) & 1;
  192. j2 = (lower >> 11) & 1;
  193. offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
  194. ((~(j2 ^ sign) & 1) << 22) |
  195. ((upper & 0x03ff) << 12) |
  196. ((lower & 0x07ff) << 1);
  197. if (offset & 0x01000000)
  198. offset -= 0x02000000;
  199. offset += sym_val - (Elf32_Addr)where;
  200. if (!(offset & 1) || offset <= (rt_int32_t)0xff000000 ||
  201. offset >= (rt_int32_t)0x01000000)
  202. {
  203. rt_kprintf("only Thumb addresses allowed\n");
  204. return -1;
  205. }
  206. sign = (offset >> 24) & 1;
  207. j1 = sign ^ (~(offset >> 23) & 1);
  208. j2 = sign ^ (~(offset >> 22) & 1);
  209. *(rt_uint16_t *)where = (rt_uint16_t)((upper & 0xf800) | (sign << 10) |
  210. ((offset >> 12) & 0x03ff));
  211. *(rt_uint16_t *)(where + 2) = (rt_uint16_t)((lower & 0xd000) |
  212. (j1 << 13) | (j2 << 11) |
  213. ((offset >> 1) & 0x07ff));
  214. upper = *(rt_uint16_t *)where;
  215. lower = *(rt_uint16_t *)((Elf32_Addr)where + 2);
  216. break;
  217. default:
  218. return -1;
  219. }
  220. return 0;
  221. }
  222. static void rt_module_init_object_container(struct rt_module *module)
  223. {
  224. RT_ASSERT(module != RT_NULL);
  225. /* initialize object container - thread */
  226. rt_list_init(&(module->module_object[RT_Object_Class_Thread].object_list));
  227. module->module_object[RT_Object_Class_Thread].object_size = sizeof(struct rt_thread);
  228. module->module_object[RT_Object_Class_Thread].type = RT_Object_Class_Thread;
  229. #ifdef RT_USING_SEMAPHORE
  230. /* initialize object container - semaphore */
  231. rt_list_init(&(module->module_object[RT_Object_Class_Semaphore].object_list));
  232. module->module_object[RT_Object_Class_Semaphore].object_size = sizeof(struct rt_semaphore);
  233. module->module_object[RT_Object_Class_Semaphore].type = RT_Object_Class_Semaphore;
  234. #endif
  235. #ifdef RT_USING_MUTEX
  236. /* initialize object container - mutex */
  237. rt_list_init(&(module->module_object[RT_Object_Class_Mutex].object_list));
  238. module->module_object[RT_Object_Class_Mutex].object_size = sizeof(struct rt_mutex);
  239. module->module_object[RT_Object_Class_Mutex].type = RT_Object_Class_Mutex;
  240. #endif
  241. #ifdef RT_USING_EVENT
  242. /* initialize object container - event */
  243. rt_list_init(&(module->module_object[RT_Object_Class_Event].object_list));
  244. module->module_object[RT_Object_Class_Event].object_size = sizeof(struct rt_event);
  245. module->module_object[RT_Object_Class_Event].type = RT_Object_Class_Event;
  246. #endif
  247. #ifdef RT_USING_MAILBOX
  248. /* initialize object container - mailbox */
  249. rt_list_init(&(module->module_object[RT_Object_Class_MailBox].object_list));
  250. module->module_object[RT_Object_Class_MailBox].object_size = sizeof(struct rt_mailbox);
  251. module->module_object[RT_Object_Class_MailBox].type = RT_Object_Class_MailBox;
  252. #endif
  253. #ifdef RT_USING_MESSAGEQUEUE
  254. /* initialize object container - message queue */
  255. rt_list_init(&(module->module_object[RT_Object_Class_MessageQueue].object_list));
  256. module->module_object[RT_Object_Class_MessageQueue].object_size = sizeof(struct rt_messagequeue);
  257. module->module_object[RT_Object_Class_MessageQueue].type = RT_Object_Class_MessageQueue;
  258. #endif
  259. #ifdef RT_USING_MEMPOOL
  260. /* initialize object container - memory pool */
  261. rt_list_init(&(module->module_object[RT_Object_Class_MemPool].object_list));
  262. module->module_object[RT_Object_Class_MemPool].object_size = sizeof(struct rt_mempool);
  263. module->module_object[RT_Object_Class_MemPool].type = RT_Object_Class_MemPool;
  264. #endif
  265. #ifdef RT_USING_DEVICE
  266. /* initialize object container - device */
  267. rt_list_init(&(module->module_object[RT_Object_Class_Device].object_list));
  268. module->module_object[RT_Object_Class_Device].object_size = sizeof(struct rt_device);
  269. module->module_object[RT_Object_Class_Device].type = RT_Object_Class_Device;
  270. #endif
  271. /* initialize object container - timer */
  272. rt_list_init(&(module->module_object[RT_Object_Class_Timer].object_list));
  273. module->module_object[RT_Object_Class_Timer].object_size = sizeof(struct rt_timer);
  274. module->module_object[RT_Object_Class_Timer].type = RT_Object_Class_Timer;
  275. }
  276. #ifdef RT_USING_HOOK
  277. static void (*rt_module_load_hook)(rt_module_t module);
  278. static void (*rt_module_unload_hook)(rt_module_t module);
  279. /**
  280. * @addtogroup Hook
  281. */
  282. /*@{*/
  283. /**
  284. * This function will set a hook function, which will be invoked when module
  285. * be loaded to system.
  286. *
  287. * @param hook the hook function
  288. */
  289. void rt_module_load_sethook(void (*hook)(rt_module_t module))
  290. {
  291. rt_module_load_hook = hook;
  292. }
  293. /**
  294. * This function will set a hook function, which will be invoked when module
  295. * be unloaded from system.
  296. *
  297. * @param hook the hook function
  298. */
  299. void rt_module_unload_sethook(void (*hook)(rt_module_t module))
  300. {
  301. rt_module_unload_hook = hook;
  302. }
  303. /*@}*/
  304. #endif
  305. static struct rt_module* _load_shared_object(const char* name, void* module_ptr)
  306. {
  307. rt_uint8_t *ptr = RT_NULL;
  308. rt_module_t module = RT_NULL;
  309. rt_bool_t linked = RT_FALSE;
  310. rt_uint32_t index, module_size = 0;
  311. RT_ASSERT(module_ptr != RT_NULL);
  312. if(rt_memcmp(elf_module->e_ident, RTMMAG, SELFMAG) == 0)
  313. {
  314. /* rtmlinker finished */
  315. linked = RT_TRUE;
  316. }
  317. /* get the ELF image size */
  318. for (index = 0; index < elf_module->e_phnum; index++)
  319. {
  320. if(phdr[index].p_type == PT_LOAD)
  321. module_size += phdr[index].p_memsz;
  322. }
  323. if (module_size == 0)
  324. {
  325. rt_kprintf(" module size error\n");
  326. return module;
  327. }
  328. /* allocate module */
  329. module = (struct rt_module *)rt_object_allocate(RT_Object_Class_Module, name);
  330. if (!module) return RT_NULL;
  331. /* allocate module space */
  332. module->module_space = rt_malloc(module_size);
  333. if (module->module_space == RT_NULL)
  334. {
  335. rt_object_delete(&(module->parent));
  336. return RT_NULL;
  337. }
  338. /* zero all space */
  339. ptr = module->module_space;
  340. rt_memset(ptr, 0, module_size);
  341. rt_kprintf(" load address at 0x%x\n", ptr);
  342. for (index = 0; index < elf_module->e_phnum; index++)
  343. {
  344. if (phdr[index].p_type == PT_LOAD)
  345. {
  346. rt_memcpy(ptr, (rt_uint8_t *)elf_module + phdr[index].p_offset, phdr[index].p_filesz);
  347. ptr += phdr[index].p_memsz;
  348. }
  349. }
  350. /* set module entry */
  351. module->module_entry = module->module_space + elf_module->e_entry;
  352. /* handle relocation section */
  353. for (index = 0; index < elf_module->e_shnum; index ++)
  354. {
  355. if (IS_REL(shdr[index]))
  356. {
  357. rt_uint32_t i, nr_reloc;
  358. Elf32_Sym *symtab;
  359. Elf32_Rel *rel;
  360. rt_uint8_t *strtab;
  361. static rt_bool_t unsolved = RT_FALSE;
  362. /* get relocate item */
  363. rel = (Elf32_Rel *)((rt_uint8_t *)module_ptr + shdr[index].sh_offset);
  364. /* locate .rel.plt and .rel.dyn section */
  365. symtab =(Elf32_Sym *) ((rt_uint8_t*)module_ptr + shdr[shdr[index].sh_link].sh_offset);
  366. strtab = (rt_uint8_t*) module_ptr + shdr[shdr[shdr[index].sh_link].sh_link].sh_offset;
  367. nr_reloc = (rt_uint32_t) (shdr[index].sh_size / sizeof(Elf32_Rel));
  368. /* relocate every items */
  369. for (i = 0; i < nr_reloc; i ++)
  370. {
  371. Elf32_Sym *sym = &symtab[ELF32_R_SYM(rel->r_info)];
  372. RT_DEBUG_LOG(RT_DEBUG_MODULE,
  373. ("relocate symbol %s shndx %d\n", strtab + sym->st_name, sym->st_shndx));
  374. if((sym->st_shndx != SHT_NULL) || (ELF_ST_BIND(sym->st_info) == STB_LOCAL))
  375. rt_module_arm_relocate(module, rel, (Elf32_Addr)(module->module_space + sym->st_value));
  376. else if(!linked)
  377. {
  378. Elf32_Addr addr;
  379. RT_DEBUG_LOG(RT_DEBUG_MODULE,
  380. ("relocate symbol: %s\n", strtab + sym->st_name));
  381. /* need to resolve symbol in kernel symbol table */
  382. addr = rt_module_symbol_find((const char *)(strtab + sym->st_name));
  383. if (addr == 0)
  384. {
  385. rt_kprintf("can't find %s in kernel symbol table\n", strtab + sym->st_name);
  386. unsolved = RT_TRUE;
  387. }
  388. else rt_module_arm_relocate(module, rel, addr);
  389. }
  390. rel ++;
  391. }
  392. if (unsolved)
  393. {
  394. rt_object_delete(&(module->parent));
  395. rt_free(module);
  396. return RT_NULL;
  397. }
  398. }
  399. }
  400. #if 0
  401. for (index = 0; index < elf_module->e_shnum; index ++)
  402. {
  403. /* find .dynsym section */
  404. rt_uint8_t* shstrab = (rt_uint8_t*) module_ptr + shdr[elf_module->e_shstrndx].sh_offset;
  405. if (rt_strcmp((const char *)(shstrab + shdr[index].sh_name), ELF_GOT) == 0)
  406. {
  407. rt_hw_set_got_base(module->module_space + shdr[index].sh_offset);
  408. break;
  409. }
  410. }
  411. #endif
  412. /* construct module symbol table */
  413. for (index = 0; index < elf_module->e_shnum; index ++)
  414. {
  415. /* find .dynsym section */
  416. rt_uint8_t *shstrab = (rt_uint8_t *)module_ptr + shdr[elf_module->e_shstrndx].sh_offset;
  417. if (rt_strcmp((const char *)(shstrab + shdr[index].sh_name), ELF_DYNSYM) == 0) break;
  418. }
  419. /* found .dynsym section */
  420. if (index != elf_module->e_shnum)
  421. {
  422. int i, count = 0;
  423. Elf32_Sym *symtab = RT_NULL;
  424. rt_uint8_t *strtab = RT_NULL;
  425. symtab =(Elf32_Sym *)((rt_uint8_t *)module_ptr + shdr[index].sh_offset);
  426. strtab = (rt_uint8_t *)module_ptr + shdr[shdr[index].sh_link].sh_offset;
  427. for (i=0; i<shdr[index].sh_size/sizeof(Elf32_Sym); i++)
  428. {
  429. if ((ELF_ST_BIND(symtab[i].st_info) == STB_GLOBAL) && (ELF_ST_TYPE(symtab[i].st_info) == STT_FUNC))
  430. count ++;
  431. }
  432. module->symtab = (struct rt_module_symtab *)rt_malloc(count * sizeof(struct rt_module_symtab));
  433. module->nsym = count;
  434. for (i=0, count=0; i<shdr[index].sh_size/sizeof(Elf32_Sym); i++)
  435. {
  436. if ((ELF_ST_BIND(symtab[i].st_info) == STB_GLOBAL) && (ELF_ST_TYPE(symtab[i].st_info) == STT_FUNC))
  437. {
  438. rt_size_t length = rt_strlen((const char *)(strtab + symtab[i].st_name)) + 1;
  439. module->symtab[count].addr = (void *)(module->module_space + symtab[i].st_value);
  440. module->symtab[count].name = rt_malloc(length);
  441. rt_memset((void *)module->symtab[count].name, 0, length);
  442. rt_memcpy((void *)module->symtab[count].name, strtab + symtab[i].st_name, length);
  443. count ++;
  444. }
  445. }
  446. }
  447. return module;
  448. }
  449. static struct rt_module* _load_relocated_object(const char* name, void* module_ptr)
  450. {
  451. rt_uint32_t index, rodata_addr = 0, bss_addr = 0, data_addr = 0;
  452. rt_uint32_t module_addr = 0, module_size = 0;
  453. struct rt_module* module = RT_NULL;
  454. rt_uint8_t *ptr, *strtab, *shstrab;
  455. rt_bool_t linked = RT_FALSE;
  456. if(rt_memcmp(elf_module->e_ident, RTMMAG, SELFMAG) == 0)
  457. {
  458. /* rtmlinker finished */
  459. linked = RT_TRUE;
  460. }
  461. /* get the ELF image size */
  462. for (index = 0; index < elf_module->e_shnum; index++)
  463. {
  464. /* text */
  465. if (IS_PROG(shdr[index]) && IS_AX(shdr[index]))
  466. {
  467. module_size += shdr[index].sh_size;
  468. module_addr = shdr[index].sh_addr;
  469. }
  470. /* rodata */
  471. if (IS_PROG(shdr[index]) && IS_ALLOC(shdr[index]))
  472. {
  473. module_size += shdr[index].sh_size;
  474. }
  475. /* data */
  476. if (IS_PROG(shdr[index]) && IS_AW(shdr[index]))
  477. {
  478. module_size += shdr[index].sh_size;
  479. }
  480. /* bss */
  481. if (IS_NOPROG(shdr[index]) && IS_AW(shdr[index]))
  482. {
  483. module_size += shdr[index].sh_size;
  484. }
  485. }
  486. /* no text, data and bss on image */
  487. if (module_size == 0) return RT_NULL;
  488. /* allocate module */
  489. module = (struct rt_module *)rt_object_allocate(RT_Object_Class_Module, (const char*)name);
  490. if (module == RT_NULL) return RT_NULL;
  491. /* allocate module space */
  492. module->module_space = rt_malloc(module_size);
  493. if (module->module_space == RT_NULL)
  494. {
  495. rt_object_delete(&(module->parent));
  496. return RT_NULL;
  497. }
  498. /* zero all space */
  499. ptr = module->module_space;
  500. rt_memset(ptr, 0, module_size);
  501. /* load text and data section */
  502. for (index = 0; index < elf_module->e_shnum; index++)
  503. {
  504. /* load text section */
  505. if (IS_PROG(shdr[index]) && IS_AX(shdr[index]))
  506. {
  507. rt_memcpy(ptr, (rt_uint8_t*)elf_module + shdr[index].sh_offset, shdr[index].sh_size);
  508. RT_DEBUG_LOG(RT_DEBUG_MODULE,("load text 0x%x, size %d\n", ptr, shdr[index].sh_size));
  509. ptr += shdr[index].sh_size;
  510. }
  511. /* load rodata section */
  512. if (IS_PROG(shdr[index]) && IS_ALLOC(shdr[index]))
  513. {
  514. rt_memcpy(ptr, (rt_uint8_t*)elf_module + shdr[index].sh_offset, shdr[index].sh_size);
  515. rodata_addr = (rt_uint32_t)ptr;
  516. RT_DEBUG_LOG(RT_DEBUG_MODULE,("load rodata 0x%x, size %d, rodata 0x%x\n", ptr, shdr[index].sh_size, *(rt_uint32_t*)data_addr));
  517. ptr += shdr[index].sh_size;
  518. }
  519. /* load data section */
  520. if (IS_PROG(shdr[index]) && IS_AW(shdr[index]))
  521. {
  522. rt_memcpy(ptr, (rt_uint8_t*)elf_module + shdr[index].sh_offset, shdr[index].sh_size);
  523. data_addr = (rt_uint32_t)ptr;
  524. RT_DEBUG_LOG(RT_DEBUG_MODULE,("load data 0x%x, size %d, data 0x%x\n", ptr, shdr[index].sh_size, *(rt_uint32_t*)data_addr));
  525. ptr += shdr[index].sh_size;
  526. }
  527. /* load bss section */
  528. if (IS_NOPROG(shdr[index]) && IS_AW(shdr[index]))
  529. {
  530. rt_memset(ptr, 0, shdr[index].sh_size);
  531. bss_addr = (rt_uint32_t)ptr;
  532. RT_DEBUG_LOG(RT_DEBUG_MODULE,("load bss 0x%x, size %d,\n", ptr, shdr[index].sh_size));
  533. }
  534. }
  535. /* set module entry */
  536. module->module_entry = (rt_uint8_t*)module->module_space + elf_module->e_entry - module_addr;
  537. /* handle relocation section */
  538. for (index = 0; index < elf_module->e_shnum; index ++)
  539. {
  540. if (IS_REL(shdr[index]))
  541. {
  542. rt_uint32_t i, nr_reloc;
  543. Elf32_Sym *symtab;
  544. Elf32_Rel *rel;
  545. /* get relocate item */
  546. rel = (Elf32_Rel *) ((rt_uint8_t*)module_ptr + shdr[index].sh_offset);
  547. /* locate .dynsym and .dynstr */
  548. symtab =(Elf32_Sym *) ((rt_uint8_t*)module_ptr + shdr[shdr[index].sh_link].sh_offset);
  549. strtab = (rt_uint8_t*) module_ptr + shdr[shdr[shdr[index].sh_link].sh_link].sh_offset;
  550. shstrab = (rt_uint8_t*) module_ptr + shdr[elf_module->e_shstrndx].sh_offset;
  551. nr_reloc = (rt_uint32_t) (shdr[index].sh_size / sizeof(Elf32_Rel));
  552. /* relocate every items */
  553. for (i = 0; i < nr_reloc; i ++)
  554. {
  555. Elf32_Sym *sym = &symtab[ELF32_R_SYM(rel->r_info)];
  556. RT_DEBUG_LOG(RT_DEBUG_MODULE,("relocate symbol: %s\n", strtab + sym->st_name));
  557. if (sym->st_shndx != STN_UNDEF)
  558. {
  559. if((ELF_ST_TYPE(sym->st_info) == STT_SECTION)
  560. || (ELF_ST_TYPE(sym->st_info) == STT_OBJECT))
  561. {
  562. if (rt_strncmp(shstrab + shdr[sym->st_shndx].sh_name, ELF_RODATA, 8) == 0)
  563. {
  564. /* relocate rodata section */
  565. RT_DEBUG_LOG(RT_DEBUG_MODULE,("rodata\n"));
  566. rt_module_arm_relocate(module, rel,(Elf32_Addr)(rodata_addr + sym->st_value));
  567. }
  568. else if(strncmp(shstrab + shdr[sym->st_shndx].sh_name, ELF_BSS, 5) == 0)
  569. {
  570. /* relocate bss section */
  571. RT_DEBUG_LOG(RT_DEBUG_MODULE,("bss\n"));
  572. rt_module_arm_relocate(module, rel, (Elf32_Addr)bss_addr + sym->st_value);
  573. }
  574. else if(strncmp(shstrab + shdr[sym->st_shndx].sh_name, ELF_DATA, 6) == 0)
  575. {
  576. /* relocate data section */
  577. RT_DEBUG_LOG(RT_DEBUG_MODULE,("data\n"));
  578. rt_module_arm_relocate(module, rel, (Elf32_Addr)data_addr + sym->st_value);
  579. }
  580. }
  581. }
  582. else if(ELF_ST_TYPE(sym->st_info) == STT_FUNC )
  583. {
  584. /* relocate function */
  585. rt_module_arm_relocate(module, rel,
  586. (Elf32_Addr)((rt_uint8_t*)module->module_space - module_addr + sym->st_value));
  587. }
  588. else
  589. {
  590. Elf32_Addr addr;
  591. if(ELF32_R_TYPE(rel->r_info) != R_ARM_V4BX)
  592. {
  593. RT_DEBUG_LOG(RT_DEBUG_MODULE,("relocate symbol: %s\n", strtab + sym->st_name));
  594. /* need to resolve symbol in kernel symbol table */
  595. addr = rt_module_symbol_find(strtab + sym->st_name);
  596. if (addr != (Elf32_Addr)RT_NULL)
  597. {
  598. rt_module_arm_relocate(module, rel, addr);
  599. RT_DEBUG_LOG(RT_DEBUG_MODULE,("symbol addr 0x%x\n", addr));
  600. }
  601. else rt_kprintf("can't find %s in kernel symbol table\n", strtab + sym->st_name);
  602. }
  603. else
  604. {
  605. rt_module_arm_relocate(module, rel, addr);
  606. }
  607. }
  608. rel ++;
  609. }
  610. }
  611. }
  612. return module;
  613. }
  614. /**
  615. * This function will load a module from memory and create a thread for it
  616. *
  617. * @param name the name of module, which shall be unique
  618. * @param module_ptr the memory address of module image
  619. *
  620. * @return the module object
  621. *
  622. */
  623. rt_module_t rt_module_load(const char* name, void* module_ptr)
  624. {
  625. rt_module_t module;
  626. RT_DEBUG_NOT_IN_INTERRUPT;
  627. rt_kprintf("rt_module_load: %s ,", name);
  628. /* check ELF header */
  629. if(rt_memcmp(elf_module->e_ident, RTMMAG, SELFMAG) != 0
  630. && rt_memcmp(elf_module->e_ident, ELFMAG, SELFMAG) != 0)
  631. {
  632. rt_kprintf(" module magic error\n");
  633. return RT_NULL;
  634. }
  635. /* check ELF class */
  636. if(elf_module->e_ident[EI_CLASS] != ELFCLASS32)
  637. {
  638. rt_kprintf(" module class error\n");
  639. return RT_NULL;
  640. }
  641. if(elf_module->e_type == ET_REL)
  642. {
  643. module = _load_relocated_object(name, module_ptr);
  644. }
  645. else if(elf_module->e_type == ET_DYN)
  646. {
  647. module = _load_shared_object(name, module_ptr);
  648. }
  649. else
  650. {
  651. rt_kprintf("unsupported elf type\n");
  652. return RT_NULL;
  653. }
  654. if(module == RT_NULL) return RT_NULL;
  655. /* init module object container */
  656. rt_module_init_object_container(module);
  657. /* increase module reference count */
  658. module->nref ++;
  659. if (elf_module->e_entry != 0)
  660. {
  661. /* init module memory allocator */
  662. module->mem_list = RT_NULL;
  663. #ifdef RT_USING_SLAB
  664. /* create page array */
  665. module->page_array = (void *)rt_malloc(PAGE_COUNT_MAX * sizeof(struct rt_page_info));
  666. module->page_cnt = 0;
  667. #endif
  668. /* create module thread */
  669. module->stack_size = 2048;
  670. module->thread_priority = 25;
  671. module->module_thread = rt_thread_create(name,
  672. module->module_entry, RT_NULL,
  673. module->stack_size,
  674. module->thread_priority, 10);
  675. module->module_thread->module_id = (void*)module;
  676. module->parent.flag = RT_MODULE_FLAG_WITHENTRY;
  677. /* startup module thread */
  678. rt_thread_startup(module->module_thread);
  679. }
  680. else
  681. {
  682. /* without entry point */
  683. module->parent.flag |= RT_MODULE_FLAG_WITHOUTENTRY;
  684. }
  685. #ifdef RT_USING_HOOK
  686. if (rt_module_load_hook != RT_NULL)
  687. {
  688. rt_module_load_hook(module);
  689. }
  690. #endif
  691. return module;
  692. }
  693. #ifdef RT_USING_DFS
  694. #include <dfs_posix.h>
  695. /**
  696. * This function will load a module from a file
  697. *
  698. * @param filename the file name of application module
  699. *
  700. * @return the module object
  701. *
  702. */
  703. rt_module_t rt_module_open(const char *path)
  704. {
  705. int fd, length;
  706. struct rt_module *module;
  707. struct stat s;
  708. char *buffer, *offset_ptr;
  709. RT_DEBUG_NOT_IN_INTERRUPT;
  710. /* check parameters */
  711. RT_ASSERT(path != RT_NULL);
  712. if (stat(path, &s) !=0)
  713. {
  714. rt_kprintf("access %s failed\n", path);
  715. return RT_NULL;
  716. }
  717. buffer = (char *)rt_malloc(s.st_size);
  718. if (buffer == RT_NULL)
  719. {
  720. rt_kprintf("out of memory\n");
  721. return RT_NULL;
  722. }
  723. offset_ptr = buffer;
  724. fd = open(path, O_RDONLY, 0);
  725. if (fd < 0)
  726. {
  727. rt_kprintf("open %s failed\n", path);
  728. rt_free(buffer);
  729. return RT_NULL;
  730. }
  731. do
  732. {
  733. length = read(fd, offset_ptr, 4096);
  734. if (length > 0)
  735. {
  736. offset_ptr += length;
  737. }
  738. }while (length > 0);
  739. /* close fd */
  740. close(fd);
  741. if ((rt_uint32_t)offset_ptr - (rt_uint32_t)buffer != s.st_size)
  742. {
  743. rt_kprintf("check: read file failed\n");
  744. rt_free(buffer);
  745. return RT_NULL;
  746. }
  747. module = rt_module_load(path, (void *)buffer);
  748. rt_free(buffer);
  749. return module;
  750. }
  751. #if defined(RT_USING_FINSH)
  752. #include <finsh.h>
  753. FINSH_FUNCTION_EXPORT_ALIAS(rt_module_open, exec, exec module from file);
  754. #endif
  755. #endif
  756. /**
  757. * This function will unload a module from memory and release resources
  758. *
  759. * @param module the module to be unloaded
  760. *
  761. * @return the operation status, RT_EOK on OK; -RT_ERROR on error
  762. *
  763. */
  764. rt_err_t rt_module_unload(rt_module_t module)
  765. {
  766. int i;
  767. struct rt_object *object;
  768. struct rt_list_node *list;
  769. RT_DEBUG_NOT_IN_INTERRUPT;
  770. /* check parameter */
  771. RT_ASSERT(module != RT_NULL);
  772. rt_kprintf("rt_module_unload: %s\n", module->parent.name);
  773. /* module has entry point */
  774. if ((module->parent.flag & RT_MODULE_FLAG_WITHOUTENTRY) != RT_MODULE_FLAG_WITHOUTENTRY)
  775. {
  776. /* suspend module main thread */
  777. if (module->module_thread != RT_NULL)
  778. {
  779. if (module->module_thread->stat == RT_THREAD_READY)
  780. rt_thread_suspend(module->module_thread);
  781. }
  782. /* delete threads */
  783. list = &module->module_object[RT_Object_Class_Thread].object_list;
  784. while (list->next != list)
  785. {
  786. object = rt_list_entry(list->next, struct rt_object, list);
  787. if (rt_object_is_systemobject(object) == RT_EOK)
  788. {
  789. /* detach static object */
  790. rt_thread_detach((rt_thread_t)object);
  791. }
  792. else
  793. {
  794. /* delete dynamic object */
  795. rt_thread_delete((rt_thread_t)object);
  796. }
  797. }
  798. #ifdef RT_USING_SEMAPHORE
  799. /* delete semaphores */
  800. list = &module->module_object[RT_Object_Class_Thread].object_list;
  801. while (list->next != list)
  802. {
  803. object = rt_list_entry(list->next, struct rt_object, list);
  804. if (rt_object_is_systemobject(object) == RT_EOK)
  805. {
  806. /* detach static object */
  807. rt_sem_detach((rt_sem_t)object);
  808. }
  809. else
  810. {
  811. /* delete dynamic object */
  812. rt_sem_delete((rt_sem_t)object);
  813. }
  814. }
  815. #endif
  816. #ifdef RT_USING_MUTEX
  817. /* delete mutexs*/
  818. list = &module->module_object[RT_Object_Class_Mutex].object_list;
  819. while (list->next != list)
  820. {
  821. object = rt_list_entry(list->next, struct rt_object, list);
  822. if (rt_object_is_systemobject(object) == RT_EOK)
  823. {
  824. /* detach static object */
  825. rt_mutex_detach((rt_mutex_t)object);
  826. }
  827. else
  828. {
  829. /* delete dynamic object */
  830. rt_mutex_delete((rt_mutex_t)object);
  831. }
  832. }
  833. #endif
  834. #ifdef RT_USING_EVENT
  835. /* delete mailboxs */
  836. list = &module->module_object[RT_Object_Class_Event].object_list;
  837. while (list->next != list)
  838. {
  839. object = rt_list_entry(list->next, struct rt_object, list);
  840. if (rt_object_is_systemobject(object) == RT_EOK)
  841. {
  842. /* detach static object */
  843. rt_event_detach((rt_event_t)object);
  844. }
  845. else
  846. {
  847. /* delete dynamic object */
  848. rt_event_delete((rt_event_t)object);
  849. }
  850. }
  851. #endif
  852. #ifdef RT_USING_MAILBOX
  853. /* delete mailboxs */
  854. list = &module->module_object[RT_Object_Class_MailBox].object_list;
  855. while (list->next != list)
  856. {
  857. object = rt_list_entry(list->next, struct rt_object, list);
  858. if (rt_object_is_systemobject(object) == RT_EOK)
  859. {
  860. /* detach static object */
  861. rt_mb_detach((rt_mailbox_t)object);
  862. }
  863. else
  864. {
  865. /* delete dynamic object */
  866. rt_mb_delete((rt_mailbox_t)object);
  867. }
  868. }
  869. #endif
  870. #ifdef RT_USING_MESSAGEQUEUE
  871. /* delete msgqueues */
  872. list = &module->module_object[RT_Object_Class_MessageQueue].object_list;
  873. while (list->next != list)
  874. {
  875. object = rt_list_entry(list->next, struct rt_object, list);
  876. if (rt_object_is_systemobject(object) == RT_EOK)
  877. {
  878. /* detach static object */
  879. rt_mq_detach((rt_mq_t)object);
  880. }
  881. else
  882. {
  883. /* delete dynamic object */
  884. rt_mq_delete((rt_mq_t)object);
  885. }
  886. }
  887. #endif
  888. #ifdef RT_USING_MEMPOOL
  889. /* delete mempools */
  890. list = &module->module_object[RT_Object_Class_MemPool].object_list;
  891. while (list->next != list)
  892. {
  893. object = rt_list_entry(list->next, struct rt_object, list);
  894. if (rt_object_is_systemobject(object) == RT_EOK)
  895. {
  896. /* detach static object */
  897. rt_mp_detach((rt_mp_t)object);
  898. }
  899. else
  900. {
  901. /* delete dynamic object */
  902. rt_mp_delete((rt_mp_t)object);
  903. }
  904. }
  905. #endif
  906. #ifdef RT_USING_DEVICE
  907. /* delete devices */
  908. list = &module->module_object[RT_Object_Class_Device].object_list;
  909. while (list->next != list)
  910. {
  911. object = rt_list_entry(list->next, struct rt_object, list);
  912. rt_device_unregister((rt_device_t)object);
  913. }
  914. #endif
  915. /* delete timers */
  916. list = &module->module_object[RT_Object_Class_Timer].object_list;
  917. while (list->next != list)
  918. {
  919. object = rt_list_entry(list->next, struct rt_object, list);
  920. if (rt_object_is_systemobject(object) == RT_EOK)
  921. {
  922. /* detach static object */
  923. rt_timer_detach((rt_timer_t)object);
  924. }
  925. else
  926. {
  927. /* delete dynamic object */
  928. rt_timer_delete((rt_timer_t)object);
  929. }
  930. }
  931. }
  932. #ifdef RT_USING_SLAB
  933. if(module->page_cnt > 0)
  934. {
  935. struct rt_page_info *page = (struct rt_page_info *)module->page_array;
  936. rt_kprintf("warning: module memory still hasn't been free finished\n");
  937. while(module->page_cnt != 0)
  938. {
  939. rt_module_free_page(module, page[0].page_ptr, page[0].npage);
  940. }
  941. }
  942. #endif
  943. /* release module space memory */
  944. rt_free(module->module_space);
  945. /* release module symbol table */
  946. for (i=0; i<module->nsym; i++) rt_free((void *)module->symtab[i].name);
  947. if (module->symtab != RT_NULL) rt_free(module->symtab);
  948. #ifdef RT_USING_HOOK
  949. if (rt_module_unload_hook != RT_NULL)
  950. {
  951. rt_module_unload_hook(module);
  952. }
  953. #endif
  954. rt_free(module->page_array);
  955. /* delete module object */
  956. rt_object_delete((rt_object_t)module);
  957. return RT_EOK;
  958. }
  959. /**
  960. * This function will find the specified module.
  961. *
  962. * @param name the name of module finding
  963. *
  964. * @return the module
  965. */
  966. rt_module_t rt_module_find(const char *name)
  967. {
  968. struct rt_object_information *information;
  969. struct rt_object *object;
  970. struct rt_list_node *node;
  971. extern struct rt_object_information rt_object_container[];
  972. RT_DEBUG_NOT_IN_INTERRUPT;
  973. /* enter critical */
  974. rt_enter_critical();
  975. /* try to find device object */
  976. information = &rt_object_container[RT_Object_Class_Module];
  977. for (node = information->object_list.next; node != &(information->object_list); node = node->next)
  978. {
  979. object = rt_list_entry(node, struct rt_object, list);
  980. if (rt_strncmp(object->name, name, RT_NAME_MAX) == 0)
  981. {
  982. /* leave critical */
  983. rt_exit_critical();
  984. return (rt_module_t)object;
  985. }
  986. }
  987. /* leave critical */
  988. rt_exit_critical();
  989. /* not found */
  990. return RT_NULL;
  991. }
  992. #ifdef RT_USING_SLAB
  993. /*
  994. * This function will allocate the numbers page with specified size
  995. * in page memory.
  996. *
  997. * @param size the size of memory to be allocated.
  998. * @note this function is used for RT-Thread Application Module
  999. */
  1000. static void *rt_module_malloc_page(rt_size_t npages)
  1001. {
  1002. void *chunk;
  1003. struct rt_page_info *page;
  1004. chunk = rt_page_alloc(npages);
  1005. if (chunk == RT_NULL) return RT_NULL;
  1006. page = (struct rt_page_info *)rt_current_module->page_array;
  1007. page[rt_current_module->page_cnt].page_ptr = chunk;
  1008. page[rt_current_module->page_cnt].npage = npages;
  1009. rt_current_module->page_cnt ++;
  1010. RT_ASSERT(rt_current_module->page_cnt <= PAGE_COUNT_MAX);
  1011. rt_kprintf("rt_module_malloc_page 0x%x %d\n", chunk, npages);
  1012. return chunk;
  1013. }
  1014. /*
  1015. * This function will release the previously allocated memory page
  1016. * by rt_malloc_page.
  1017. *
  1018. * @param page_ptr the page address to be released.
  1019. * @param npages the number of page shall be released.
  1020. *
  1021. * @note this function is used for RT-Thread Application Module
  1022. */
  1023. static void rt_module_free_page(rt_module_t module, void *page_ptr, rt_size_t npages)
  1024. {
  1025. int i, index;
  1026. struct rt_page_info *page;
  1027. rt_kprintf("rt_module_free_page 0x%x %d\n", page_ptr, npages);
  1028. rt_page_free(page_ptr, npages);
  1029. page = (struct rt_page_info*)module->page_array;
  1030. for(i=0; i<module->page_cnt; i++)
  1031. {
  1032. if (page[i].page_ptr == page_ptr)
  1033. {
  1034. if (page[i].npage == npages + 1)
  1035. {
  1036. page[i].page_ptr += npages * RT_MM_PAGE_SIZE / sizeof(rt_uint32_t);
  1037. page[i].npage -= npages;
  1038. }
  1039. else if(page[i].npage == npages)
  1040. {
  1041. for(index=i; index<module->page_cnt-1; index++)
  1042. {
  1043. page[index].page_ptr = page[index + 1].page_ptr;
  1044. page[index].npage = page[index + 1].npage;
  1045. }
  1046. page[module->page_cnt - 1].page_ptr = RT_NULL;
  1047. page[module->page_cnt - 1].npage = 0;
  1048. module->page_cnt--;
  1049. }
  1050. else RT_ASSERT(RT_FALSE);
  1051. rt_current_module->page_cnt--;
  1052. return;
  1053. }
  1054. }
  1055. /* should not be get here */
  1056. RT_ASSERT(RT_FALSE);
  1057. }
  1058. /*
  1059. rt_module_malloc - allocate memory block in free list
  1060. */
  1061. void *rt_module_malloc(rt_size_t size)
  1062. {
  1063. struct rt_mem_head *b, *n, *up;
  1064. struct rt_mem_head **prev;
  1065. rt_uint32_t npage;
  1066. rt_size_t nunits;
  1067. RT_DEBUG_NOT_IN_INTERRUPT;
  1068. nunits = (size + sizeof(struct rt_mem_head) -1)/sizeof(struct rt_mem_head) + 1;
  1069. RT_ASSERT(size != 0);
  1070. RT_ASSERT(nunits != 0);
  1071. rt_sem_take(&mod_sem, RT_WAITING_FOREVER);
  1072. for (prev = (struct rt_mem_head **)&rt_current_module->mem_list; (b = *prev) != RT_NULL; prev = &(b->next))
  1073. {
  1074. if (b->size > nunits)
  1075. {
  1076. /* split memory */
  1077. n = b + nunits;
  1078. n->next = b->next;
  1079. n->size = b->size - nunits;
  1080. b->size = nunits;
  1081. *prev = n;
  1082. rt_kprintf("rt_module_malloc 0x%x, %d\n",b + 1, size);
  1083. rt_sem_release(&mod_sem);
  1084. return (void *)(b + 1);
  1085. }
  1086. if (b->size == nunits)
  1087. {
  1088. /* this node fit, remove this node */
  1089. *prev = b->next;
  1090. rt_kprintf("rt_module_malloc 0x%x, %d\n",b + 1, size);
  1091. rt_sem_release(&mod_sem);
  1092. return (void *)(b + 1);
  1093. }
  1094. }
  1095. /* allocate pages from system heap */
  1096. npage = (size + sizeof(struct rt_mem_head) + RT_MM_PAGE_SIZE - 1)/RT_MM_PAGE_SIZE;
  1097. if ((up = (struct rt_mem_head *)rt_module_malloc_page(npage)) == RT_NULL) return RT_NULL;
  1098. up->size = npage * RT_MM_PAGE_SIZE / sizeof(struct rt_mem_head);
  1099. for (prev = (struct rt_mem_head **)&rt_current_module->mem_list; (b = *prev) != RT_NULL; prev = &(b->next))
  1100. {
  1101. if (b > up + up->size) break;
  1102. }
  1103. up->next = b;
  1104. *prev = up;
  1105. rt_sem_release(&mod_sem);
  1106. return rt_module_malloc(size);
  1107. }
  1108. /*
  1109. rt_module_free - free memory block in free list
  1110. */
  1111. void rt_module_free(rt_module_t module, void *addr)
  1112. {
  1113. struct rt_mem_head *b, *n, *r;
  1114. struct rt_mem_head **prev;
  1115. RT_DEBUG_NOT_IN_INTERRUPT;
  1116. RT_ASSERT(addr);
  1117. RT_ASSERT((((rt_uint32_t)addr) & (sizeof(struct rt_mem_head) -1)) == 0);
  1118. rt_kprintf("rt_module_free 0x%x\n", addr);
  1119. rt_sem_take(&mod_sem, RT_WAITING_FOREVER);
  1120. n = (struct rt_mem_head *)addr - 1;
  1121. prev = (struct rt_mem_head **)&module->mem_list;
  1122. while ((b = *prev) != RT_NULL)
  1123. {
  1124. RT_ASSERT(b->size > 0);
  1125. RT_ASSERT(b > n || b + b->size <= n);
  1126. if (b + b->size == n && ((rt_uint32_t)n % RT_MM_PAGE_SIZE != 0))
  1127. {
  1128. if (b + (b->size + n->size) == b->next)
  1129. {
  1130. b->size += b->next->size + n->size;
  1131. b->next = b->next->next;
  1132. }
  1133. else b->size += n->size;
  1134. if ((rt_uint32_t)b % RT_MM_PAGE_SIZE == 0)
  1135. {
  1136. int npage = b->size * sizeof(struct rt_page_info) / RT_MM_PAGE_SIZE;
  1137. if (npage > 0)
  1138. {
  1139. if ((b->size * sizeof(struct rt_page_info) % RT_MM_PAGE_SIZE) != 0)
  1140. {
  1141. rt_size_t nunits = npage * RT_MM_PAGE_SIZE / sizeof(struct rt_mem_head);
  1142. /* split memory */
  1143. r = b + nunits;
  1144. r->next = b->next;
  1145. r->size = b->size - nunits;
  1146. *prev = r;
  1147. }
  1148. else
  1149. {
  1150. *prev = b->next;
  1151. }
  1152. rt_module_free_page(module, b, npage);
  1153. }
  1154. }
  1155. /* unlock */
  1156. rt_sem_release(&mod_sem);
  1157. return;
  1158. }
  1159. if (b == n + n->size)
  1160. {
  1161. n->size = b->size + n->size;
  1162. n->next = b->next;
  1163. if ((rt_uint32_t)n % RT_MM_PAGE_SIZE == 0)
  1164. {
  1165. int npage = n->size * sizeof(struct rt_page_info) / RT_MM_PAGE_SIZE;
  1166. if (npage > 0)
  1167. {
  1168. if ((n->size * sizeof(struct rt_page_info) % RT_MM_PAGE_SIZE) != 0)
  1169. {
  1170. rt_size_t nunits = npage * RT_MM_PAGE_SIZE / sizeof(struct rt_mem_head);
  1171. /* split memory */
  1172. r = n + nunits;
  1173. r->next = n->next;
  1174. r->size = n->size - nunits;
  1175. *prev = r;
  1176. }
  1177. else *prev = n->next;
  1178. rt_module_free_page(module, n, npage);
  1179. }
  1180. }
  1181. else
  1182. {
  1183. *prev = n;
  1184. }
  1185. /* unlock */
  1186. rt_sem_release(&mod_sem);
  1187. return;
  1188. }
  1189. if (b > n + n->size) break;
  1190. prev = &(b->next);
  1191. }
  1192. if ((rt_uint32_t)n % RT_MM_PAGE_SIZE == 0)
  1193. {
  1194. int npage = n->size * sizeof(struct rt_page_info) / RT_MM_PAGE_SIZE;
  1195. if (npage > 0)
  1196. {
  1197. rt_module_free_page(module, n, npage);
  1198. if (n->size % RT_MM_PAGE_SIZE != 0)
  1199. {
  1200. rt_size_t nunits = npage * RT_MM_PAGE_SIZE / sizeof(struct rt_mem_head);
  1201. /* split memory */
  1202. r = n + nunits;
  1203. r->next = b;
  1204. r->size = n->size - nunits;
  1205. *prev = r;
  1206. }
  1207. else
  1208. {
  1209. *prev = b;
  1210. }
  1211. }
  1212. }
  1213. else
  1214. {
  1215. n->next = b;
  1216. *prev = n;
  1217. }
  1218. /* unlock */
  1219. rt_sem_release(&mod_sem);
  1220. }
  1221. /*
  1222. rt_module_realloc - realloc memory block in free list
  1223. */
  1224. void *rt_module_realloc(void *ptr, rt_size_t size)
  1225. {
  1226. struct rt_mem_head *b, *p, *prev, *tmpp;
  1227. rt_size_t nunits;
  1228. RT_DEBUG_NOT_IN_INTERRUPT;
  1229. if (!ptr) return rt_module_malloc(size);
  1230. if (size == 0)
  1231. {
  1232. rt_module_free(rt_current_module, ptr);
  1233. return RT_NULL;
  1234. }
  1235. nunits = (size + sizeof(struct rt_mem_head) - 1) / sizeof(struct rt_mem_head) + 1;
  1236. b = (struct rt_mem_head *)ptr - 1;
  1237. if (nunits <= b->size)
  1238. {
  1239. /* new size is smaller or equal then before */
  1240. if (nunits == b->size) return ptr;
  1241. else
  1242. {
  1243. p = b + nunits;
  1244. p->size = b->size - nunits;
  1245. b->size = nunits;
  1246. rt_module_free(rt_current_module, (void *)(p + 1));
  1247. return (void *)(b + 1);
  1248. }
  1249. }
  1250. else
  1251. {
  1252. /* more space then required */
  1253. prev = (struct rt_mem_head *)rt_current_module->mem_list;
  1254. for (p = prev->next; p != (b->size + b) && p != RT_NULL; prev = p, p = p->next) break;
  1255. /* available block after ap in freelist */
  1256. if (p != RT_NULL && (p->size >= (nunits - (b->size))) && p == (b + b->size))
  1257. {
  1258. /* perfect match */
  1259. if (p->size == (nunits - (b->size)))
  1260. {
  1261. b->size = nunits;
  1262. prev->next = p->next;
  1263. }
  1264. else /* more space then required, split block*/
  1265. {
  1266. /* pointer to old header */
  1267. tmpp = p;
  1268. p = b + nunits;
  1269. /* restoring old pointer */
  1270. p->next = tmpp->next;
  1271. /* new size for p */
  1272. p->size = tmpp->size + b->size - nunits;
  1273. b->size = nunits;
  1274. prev->next = p;
  1275. }
  1276. rt_current_module->mem_list = (void *)prev;
  1277. return (void *)(b + 1);
  1278. }
  1279. else /* allocate new memory and copy old data */
  1280. {
  1281. if ((p = rt_module_malloc(size)) == RT_NULL) return RT_NULL;
  1282. rt_memmove(p, (b+1), ((b->size) * sizeof(struct rt_mem_head)));
  1283. rt_module_free(rt_current_module, (void *)(b + 1));
  1284. return (void *)(p);
  1285. }
  1286. }
  1287. }
  1288. #endif
  1289. #ifdef RT_USING_FINSH
  1290. #include <finsh.h>
  1291. void list_memlist(const char* name)
  1292. {
  1293. rt_module_t module;
  1294. struct rt_mem_head **prev;
  1295. struct rt_mem_head *b;
  1296. module = rt_module_find(name);
  1297. if (module == RT_NULL) return;
  1298. for (prev = (struct rt_mem_head **)&module->mem_list; (b = *prev) != RT_NULL; prev = &(b->next))
  1299. {
  1300. rt_kprintf("0x%x--%d\n", b, b->size * sizeof(struct rt_mem_head));
  1301. }
  1302. }
  1303. FINSH_FUNCTION_EXPORT(list_memlist, list module free memory information)
  1304. void list_mempage(const char *name)
  1305. {
  1306. rt_module_t module;
  1307. struct rt_page_info *page;
  1308. int i;
  1309. module = rt_module_find(name);
  1310. if (module == RT_NULL) return;
  1311. page = (struct rt_page_info*)module->page_array;
  1312. for (i=0; i<module->page_cnt; i++)
  1313. {
  1314. rt_kprintf("0x%x--%d\n", page[i].page_ptr, page[i].npage);
  1315. }
  1316. }
  1317. FINSH_FUNCTION_EXPORT(list_mempage, list module using memory page information)
  1318. #endif
  1319. #endif