aot_runtime.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_runtime.h"
  6. #include "bh_log.h"
  7. #include "mem_alloc.h"
  8. #if WASM_ENABLE_SHARED_MEMORY != 0
  9. #include "../common/wasm_shared_memory.h"
  10. #endif
  11. static void
  12. set_error_buf(char *error_buf, uint32 error_buf_size, const char *string)
  13. {
  14. if (error_buf != NULL) {
  15. snprintf(error_buf, error_buf_size,
  16. "AOT module instantiate failed: %s", string);
  17. }
  18. }
  19. static void *
  20. runtime_malloc(uint64 size, char *error_buf, uint32 error_buf_size)
  21. {
  22. void *mem;
  23. if (size >= UINT32_MAX
  24. || !(mem = wasm_runtime_malloc((uint32)size))) {
  25. set_error_buf(error_buf, error_buf_size,
  26. "allocate memory failed");
  27. return NULL;
  28. }
  29. memset(mem, 0, (uint32)size);
  30. return mem;
  31. }
  32. static bool
  33. global_instantiate(AOTModuleInstance *module_inst, AOTModule *module,
  34. char *error_buf, uint32 error_buf_size)
  35. {
  36. uint32 i;
  37. InitializerExpression *init_expr;
  38. uint8 *p = (uint8*)module_inst->global_data.ptr;
  39. AOTImportGlobal *import_global = module->import_globals;
  40. AOTGlobal *global = module->globals;
  41. /* Initialize import global data */
  42. for (i = 0; i < module->import_global_count; i++, import_global++) {
  43. bh_assert(import_global->data_offset ==
  44. (uint32)(p - (uint8*)module_inst->global_data.ptr));
  45. memcpy(p, &import_global->global_data_linked, import_global->size);
  46. p += import_global->size;
  47. }
  48. /* Initialize defined global data */
  49. for (i = 0; i < module->global_count; i++, global++) {
  50. bh_assert(global->data_offset ==
  51. (uint32)(p - (uint8*)module_inst->global_data.ptr));
  52. init_expr = &global->init_expr;
  53. switch (init_expr->init_expr_type) {
  54. case INIT_EXPR_TYPE_GET_GLOBAL:
  55. if (init_expr->u.global_index >= module->import_global_count + i) {
  56. set_error_buf(error_buf, error_buf_size, "unknown global");
  57. return false;
  58. }
  59. memcpy(p,
  60. &module->import_globals[init_expr->u.global_index].global_data_linked,
  61. global->size);
  62. break;
  63. default:
  64. /* TODO: check whether global type and init_expr type are matching */
  65. memcpy(p, &init_expr->u, global->size);
  66. break;
  67. }
  68. p += global->size;
  69. }
  70. bh_assert(module_inst->global_data_size ==
  71. (uint32)(p - (uint8*)module_inst->global_data.ptr));
  72. return true;
  73. }
  74. static bool
  75. table_instantiate(AOTModuleInstance *module_inst, AOTModule *module,
  76. char *error_buf, uint32 error_buf_size)
  77. {
  78. uint32 i, global_index, global_data_offset, base_offset, length;
  79. AOTTableInitData *table_seg;
  80. for (i = 0; i < module->table_init_data_count; i++) {
  81. table_seg = module->table_init_data_list[i];
  82. bh_assert(table_seg->offset.init_expr_type ==
  83. INIT_EXPR_TYPE_I32_CONST
  84. || table_seg->offset.init_expr_type ==
  85. INIT_EXPR_TYPE_GET_GLOBAL);
  86. /* Resolve table data base offset */
  87. if (table_seg->offset.init_expr_type == INIT_EXPR_TYPE_GET_GLOBAL) {
  88. global_index = table_seg->offset.u.global_index;
  89. bh_assert(global_index <
  90. module->import_global_count + module->global_count);
  91. /* TODO: && globals[table_seg->offset.u.global_index].type ==
  92. VALUE_TYPE_I32*/
  93. if (global_index < module->import_global_count)
  94. global_data_offset =
  95. module->import_globals[global_index].data_offset;
  96. else
  97. global_data_offset =
  98. module->globals[global_index - module->import_global_count]
  99. .data_offset;
  100. base_offset = *(uint32*)
  101. ((uint8*)module_inst->global_data.ptr + global_data_offset);
  102. }
  103. else
  104. base_offset = (uint32)table_seg->offset.u.i32;
  105. /* Copy table data */
  106. bh_assert(module_inst->table_data.ptr);
  107. /* base_offset only since length might negative */
  108. if (base_offset > module_inst->table_size) {
  109. LOG_DEBUG("base_offset(%d) > table_size(%d)", base_offset,
  110. module_inst->table_size);
  111. set_error_buf(error_buf, error_buf_size,
  112. "elements segment does not fit");
  113. return false;
  114. }
  115. /* base_offset + length(could be zero) */
  116. length = table_seg->func_index_count;
  117. if (base_offset + length > module_inst->table_size) {
  118. LOG_DEBUG("base_offset(%d) + length(%d) > table_size(%d)",
  119. base_offset, length, module_inst->table_size);
  120. set_error_buf(error_buf, error_buf_size,
  121. "elements segment does not fit");
  122. return false;
  123. }
  124. /**
  125. * Check function index in the current module inst for now.
  126. * will check the linked table inst owner in future
  127. */
  128. memcpy((uint32 *)module_inst->table_data.ptr + base_offset,
  129. table_seg->func_indexes,
  130. length * sizeof(uint32));
  131. }
  132. return true;
  133. }
  134. static void
  135. memories_deinstantiate(AOTModuleInstance *module_inst)
  136. {
  137. uint32 i;
  138. AOTMemoryInstance *memory_inst;
  139. for (i = 0; i < module_inst->memory_count; i++) {
  140. memory_inst = ((AOTMemoryInstance **)module_inst->memories.ptr)[i];
  141. if (memory_inst) {
  142. #if WASM_ENABLE_SHARED_MEMORY != 0
  143. if (memory_inst->is_shared) {
  144. int32 ref_count =
  145. shared_memory_dec_reference(
  146. (WASMModuleCommon *)module_inst->aot_module.ptr);
  147. bh_assert(ref_count >= 0);
  148. /* if the reference count is not zero,
  149. don't free the memory */
  150. if (ref_count > 0)
  151. continue;
  152. }
  153. #endif
  154. if (memory_inst->heap_handle.ptr)
  155. mem_allocator_destroy(memory_inst->heap_handle.ptr);
  156. if (memory_inst->heap_data.ptr) {
  157. #ifndef OS_ENABLE_HW_BOUND_CHECK
  158. wasm_runtime_free(memory_inst->memory_data.ptr);
  159. #else
  160. os_munmap((uint8*)memory_inst->memory_data.ptr,
  161. 8 * (uint64)BH_GB);
  162. #endif
  163. }
  164. }
  165. }
  166. wasm_runtime_free(module_inst->memories.ptr);
  167. }
  168. static AOTMemoryInstance*
  169. memory_instantiate(AOTModuleInstance *module_inst, AOTModule *module,
  170. AOTMemoryInstance *memory_inst, AOTMemory *memory,
  171. uint32 heap_size, char *error_buf, uint32 error_buf_size)
  172. {
  173. void *heap_handle;
  174. uint32 num_bytes_per_page = memory->num_bytes_per_page;
  175. uint32 init_page_count = memory->mem_init_page_count;
  176. uint32 max_page_count = memory->mem_max_page_count;
  177. uint32 inc_page_count, aux_heap_base, global_idx;
  178. uint32 bytes_of_last_page, bytes_to_page_end;
  179. uint32 heap_offset = num_bytes_per_page *init_page_count;
  180. uint64 total_size;
  181. uint8 *p, *global_addr;
  182. #ifdef OS_ENABLE_HW_BOUND_CHECK
  183. uint8 *mapped_mem;
  184. uint64 map_size = 8 * (uint64)BH_GB;
  185. uint64 page_size = os_getpagesize();
  186. #endif
  187. #if WASM_ENABLE_SHARED_MEMORY != 0
  188. bool is_shared_memory = memory->memory_flags & 0x02 ? true : false;
  189. /* Shared memory */
  190. if (is_shared_memory) {
  191. AOTMemoryInstance *shared_memory_instance;
  192. WASMSharedMemNode *node =
  193. wasm_module_get_shared_memory((WASMModuleCommon *)module);
  194. /* If the memory of this module has been instantiated,
  195. return the memory instance directly */
  196. if (node) {
  197. uint32 ref_count;
  198. ref_count = shared_memory_inc_reference(
  199. (WASMModuleCommon *)module);
  200. bh_assert(ref_count > 0);
  201. shared_memory_instance =
  202. (AOTMemoryInstance *)shared_memory_get_memory_inst(node);
  203. bh_assert(shared_memory_instance);
  204. (void)ref_count;
  205. return shared_memory_instance;
  206. }
  207. }
  208. #endif
  209. if (heap_size > 0
  210. && module->malloc_func_index != (uint32)-1
  211. && module->free_func_index != (uint32)-1) {
  212. /* Disable app heap, use malloc/free function exported
  213. by wasm app to allocate/free memory instead */
  214. heap_size = 0;
  215. }
  216. if (init_page_count == max_page_count && init_page_count == 1) {
  217. /* If only one page and at most one page, we just append
  218. the app heap to the end of linear memory, enlarge the
  219. num_bytes_per_page, and don't change the page count*/
  220. heap_offset = num_bytes_per_page;
  221. num_bytes_per_page += heap_size;
  222. if (num_bytes_per_page < heap_size) {
  223. set_error_buf(error_buf, error_buf_size,
  224. "memory size must be at most 65536 pages (4GiB)");
  225. return NULL;
  226. }
  227. }
  228. else if (heap_size > 0) {
  229. if (module->aux_heap_base_global_index != (uint32)-1
  230. && module->aux_heap_base < num_bytes_per_page
  231. * init_page_count) {
  232. /* Insert app heap before __heap_base */
  233. aux_heap_base = module->aux_heap_base;
  234. bytes_of_last_page = aux_heap_base % num_bytes_per_page;
  235. if (bytes_of_last_page == 0)
  236. bytes_of_last_page = num_bytes_per_page;
  237. bytes_to_page_end = num_bytes_per_page - bytes_of_last_page;
  238. inc_page_count = (heap_size - bytes_to_page_end
  239. + num_bytes_per_page - 1) / num_bytes_per_page;
  240. heap_offset = aux_heap_base;
  241. aux_heap_base += heap_size;
  242. bytes_of_last_page = aux_heap_base % num_bytes_per_page;
  243. if (bytes_of_last_page == 0)
  244. bytes_of_last_page = num_bytes_per_page;
  245. bytes_to_page_end = num_bytes_per_page - bytes_of_last_page;
  246. if (bytes_to_page_end < 1 * BH_KB) {
  247. aux_heap_base += 1 * BH_KB;
  248. inc_page_count++;
  249. }
  250. /* Adjust __heap_base global value */
  251. global_idx = module->aux_heap_base_global_index
  252. - module->import_global_count;
  253. global_addr = (uint8*)module_inst->global_data.ptr +
  254. module->globals[global_idx].data_offset;
  255. *(uint32 *)global_addr = aux_heap_base;
  256. LOG_VERBOSE("Reset __heap_base global to %u", aux_heap_base);
  257. }
  258. else {
  259. /* Insert app heap before new page */
  260. inc_page_count = (heap_size + num_bytes_per_page - 1)
  261. / num_bytes_per_page;
  262. heap_offset = num_bytes_per_page * init_page_count;
  263. heap_size = num_bytes_per_page * inc_page_count;
  264. if (heap_size > 0)
  265. heap_size -= 1 * BH_KB;
  266. }
  267. init_page_count += inc_page_count;
  268. max_page_count += inc_page_count;
  269. if (init_page_count > 65536) {
  270. set_error_buf(error_buf, error_buf_size,
  271. "memory size must be at most 65536 pages (4GiB)");
  272. return NULL;
  273. }
  274. if (max_page_count > 65536)
  275. max_page_count = 65536;
  276. }
  277. LOG_VERBOSE("Memory instantiate:");
  278. LOG_VERBOSE(" page bytes: %u, init pages: %u, max pages: %u",
  279. num_bytes_per_page, init_page_count, max_page_count);
  280. LOG_VERBOSE(" heap offset: %u, heap size: %d\n", heap_offset, heap_size);
  281. total_size = (uint64)num_bytes_per_page * init_page_count;
  282. #if WASM_ENABLE_SHARED_MEMORY != 0
  283. if (is_shared_memory) {
  284. /* Allocate max page for shared memory */
  285. total_size = (uint64)num_bytes_per_page * max_page_count;
  286. }
  287. #endif
  288. #ifndef OS_ENABLE_HW_BOUND_CHECK
  289. /* Allocate memory */
  290. if (!(p = runtime_malloc(total_size, error_buf, error_buf_size))) {
  291. return NULL;
  292. }
  293. #else
  294. total_size = (total_size + page_size - 1) & ~(page_size - 1);
  295. /* Totally 8G is mapped, the opcode load/store address range is 0 to 8G:
  296. * ea = i + memarg.offset
  297. * both i and memarg.offset are u32 in range 0 to 4G
  298. * so the range of ea is 0 to 8G
  299. */
  300. if (total_size >= UINT32_MAX
  301. || !(p = mapped_mem = os_mmap(NULL, map_size,
  302. MMAP_PROT_NONE, MMAP_MAP_NONE))) {
  303. set_error_buf(error_buf, error_buf_size, "mmap memory failed");
  304. return NULL;
  305. }
  306. if (os_mprotect(p, total_size, MMAP_PROT_READ | MMAP_PROT_WRITE) != 0) {
  307. set_error_buf(error_buf, error_buf_size, "mprotec memory failed");
  308. os_munmap(mapped_mem, map_size);
  309. return NULL;
  310. }
  311. memset(p, 0, (uint32)total_size);
  312. #endif /* end of OS_ENABLE_HW_BOUND_CHECK */
  313. memory_inst->module_type = Wasm_Module_AoT;
  314. memory_inst->num_bytes_per_page = num_bytes_per_page;
  315. memory_inst->cur_page_count = init_page_count;
  316. memory_inst->max_page_count = max_page_count;
  317. /* Init memory info */
  318. memory_inst->memory_data.ptr = p;
  319. memory_inst->memory_data_end.ptr = p + (uint32)total_size;
  320. memory_inst->memory_data_size = (uint32)total_size;
  321. /* Initialize heap info */
  322. memory_inst->heap_data.ptr = p + heap_offset;
  323. memory_inst->heap_data_end.ptr = p + heap_offset + heap_size;
  324. if (heap_size > 0) {
  325. if (!(heap_handle = mem_allocator_create(memory_inst->heap_data.ptr,
  326. heap_size))) {
  327. set_error_buf(error_buf, error_buf_size,
  328. "init app heap failed");
  329. goto fail1;
  330. }
  331. memory_inst->heap_handle.ptr = heap_handle;
  332. }
  333. if (total_size > 0) {
  334. if (sizeof(uintptr_t) == sizeof(uint64)) {
  335. memory_inst->mem_bound_check_1byte.u64 = total_size - 1;
  336. memory_inst->mem_bound_check_2bytes.u64 = total_size - 2;
  337. memory_inst->mem_bound_check_4bytes.u64 = total_size - 4;
  338. memory_inst->mem_bound_check_8bytes.u64 = total_size - 8;
  339. }
  340. else {
  341. memory_inst->mem_bound_check_1byte.u32[0] = (uint32)total_size - 1;
  342. memory_inst->mem_bound_check_2bytes.u32[0] = (uint32)total_size - 2;
  343. memory_inst->mem_bound_check_4bytes.u32[0] = (uint32)total_size - 4;
  344. memory_inst->mem_bound_check_8bytes.u32[0] = (uint32)total_size - 8;
  345. }
  346. }
  347. #if WASM_ENABLE_SHARED_MEMORY != 0
  348. if (is_shared_memory) {
  349. memory_inst->is_shared = true;
  350. if (!shared_memory_set_memory_inst((WASMModuleCommon *)module,
  351. (WASMMemoryInstanceCommon *)memory_inst)) {
  352. set_error_buf(error_buf, error_buf_size,
  353. "allocate memory failed");
  354. goto fail2;
  355. }
  356. }
  357. #endif
  358. return memory_inst;
  359. #if WASM_ENABLE_SHARED_MEMORY != 0
  360. fail2:
  361. if (heap_size > 0) {
  362. mem_allocator_destroy(memory_inst->heap_handle.ptr);
  363. memory_inst->heap_handle.ptr = NULL;
  364. }
  365. #endif
  366. fail1:
  367. #ifndef OS_ENABLE_HW_BOUND_CHECK
  368. wasm_runtime_free(memory_inst->memory_data.ptr);
  369. #else
  370. os_munmap(mapped_mem, map_size);
  371. #endif
  372. memory_inst->memory_data.ptr = NULL;
  373. return NULL;
  374. }
  375. static AOTMemoryInstance*
  376. aot_get_default_memory(AOTModuleInstance *module_inst)
  377. {
  378. if (module_inst->memories.ptr)
  379. return ((AOTMemoryInstance **)module_inst->memories.ptr)[0];
  380. else
  381. return NULL;
  382. }
  383. static bool
  384. memories_instantiate(AOTModuleInstance *module_inst, AOTModule *module,
  385. uint32 heap_size, char *error_buf, uint32 error_buf_size)
  386. {
  387. uint32 global_index, global_data_offset, base_offset, length;
  388. uint32 i, memory_count = module->memory_count;
  389. AOTMemoryInstance *memories, *memory_inst;
  390. AOTMemInitData *data_seg;
  391. uint64 total_size;
  392. module_inst->memory_count = memory_count;
  393. total_size = sizeof(AOTPointer) * (uint64)memory_count;
  394. if (!(module_inst->memories.ptr =
  395. runtime_malloc(total_size, error_buf, error_buf_size))) {
  396. return false;
  397. }
  398. memories = module_inst->global_table_data.memory_instances;
  399. for (i = 0; i < memory_count; i++, memories++) {
  400. memory_inst =
  401. memory_instantiate(module_inst, module,
  402. memories, &module->memories[i],
  403. heap_size, error_buf, error_buf_size);
  404. if (!memory_inst) {
  405. return false;
  406. }
  407. ((AOTMemoryInstance **)module_inst->memories.ptr)[i] = memory_inst;
  408. }
  409. /* Get default memory instance */
  410. memory_inst = aot_get_default_memory(module_inst);
  411. for (i = 0; i < module->mem_init_data_count; i++) {
  412. data_seg = module->mem_init_data_list[i];
  413. #if WASM_ENABLE_BULK_MEMORY != 0
  414. if (data_seg->is_passive)
  415. continue;
  416. #endif
  417. bh_assert(data_seg->offset.init_expr_type ==
  418. INIT_EXPR_TYPE_I32_CONST
  419. || data_seg->offset.init_expr_type ==
  420. INIT_EXPR_TYPE_GET_GLOBAL);
  421. /* Resolve memory data base offset */
  422. if (data_seg->offset.init_expr_type == INIT_EXPR_TYPE_GET_GLOBAL) {
  423. global_index = data_seg->offset.u.global_index;
  424. bh_assert(global_index <
  425. module->import_global_count + module->global_count);
  426. /* TODO: && globals[data_seg->offset.u.global_index].type ==
  427. VALUE_TYPE_I32*/
  428. if (global_index < module->import_global_count)
  429. global_data_offset =
  430. module->import_globals[global_index].data_offset;
  431. else
  432. global_data_offset =
  433. module->globals[global_index - module->import_global_count]
  434. .data_offset;
  435. base_offset = *(uint32*)
  436. ((uint8*)module_inst->global_data.ptr + global_data_offset);
  437. } else {
  438. base_offset = (uint32)data_seg->offset.u.i32;
  439. }
  440. /* Copy memory data */
  441. bh_assert(memory_inst->memory_data.ptr);
  442. /* Check memory data */
  443. /* check offset since length might negative */
  444. if (base_offset > memory_inst->memory_data_size) {
  445. LOG_DEBUG("base_offset(%d) > memory_data_size(%d)", base_offset,
  446. memory_inst->memory_data_size);
  447. set_error_buf(error_buf, error_buf_size,
  448. "data segment does not fit");
  449. return false;
  450. }
  451. /* check offset + length(could be zero) */
  452. length = data_seg->byte_count;
  453. if (base_offset + length > memory_inst->memory_data_size) {
  454. LOG_DEBUG("base_offset(%d) + length(%d) > memory_data_size(%d)",
  455. base_offset, length, memory_inst->memory_data_size);
  456. set_error_buf(error_buf, error_buf_size,
  457. "data segment does not fit");
  458. return false;
  459. }
  460. bh_memcpy_s((uint8*)memory_inst->memory_data.ptr + base_offset,
  461. memory_inst->memory_data_size - base_offset,
  462. data_seg->bytes, length);
  463. }
  464. return true;
  465. }
  466. static bool
  467. init_func_ptrs(AOTModuleInstance *module_inst, AOTModule *module,
  468. char *error_buf, uint32 error_buf_size)
  469. {
  470. uint32 i;
  471. void **func_ptrs;
  472. uint64 total_size =
  473. ((uint64)module->import_func_count + module->func_count) * sizeof(void*);
  474. /* Allocate memory */
  475. if (!(module_inst->func_ptrs.ptr = runtime_malloc
  476. (total_size, error_buf, error_buf_size))) {
  477. return false;
  478. }
  479. /* Set import function pointers */
  480. func_ptrs = (void**)module_inst->func_ptrs.ptr;
  481. for (i = 0; i < module->import_func_count; i++, func_ptrs++)
  482. *func_ptrs = (void*)module->import_funcs[i].func_ptr_linked;
  483. /* Set defined function pointers */
  484. memcpy(func_ptrs, module->func_ptrs, module->func_count * sizeof(void*));
  485. return true;
  486. }
  487. static bool
  488. init_func_type_indexes(AOTModuleInstance *module_inst, AOTModule *module,
  489. char *error_buf, uint32 error_buf_size)
  490. {
  491. uint32 i;
  492. uint32 *func_type_index;
  493. uint64 total_size =
  494. ((uint64)module->import_func_count + module->func_count) * sizeof(uint32);
  495. /* Allocate memory */
  496. if (!(module_inst->func_type_indexes.ptr =
  497. runtime_malloc(total_size, error_buf, error_buf_size))) {
  498. return false;
  499. }
  500. /* Set import function type indexes */
  501. func_type_index = (uint32*)module_inst->func_type_indexes.ptr;
  502. for (i = 0; i < module->import_func_count; i++, func_type_index++)
  503. *func_type_index = module->import_funcs[i].func_type_index;
  504. memcpy(func_type_index, module->func_type_indexes,
  505. module->func_count * sizeof(uint32));
  506. return true;
  507. }
  508. static bool
  509. create_export_funcs(AOTModuleInstance *module_inst, AOTModule *module,
  510. char *error_buf, uint32 error_buf_size)
  511. {
  512. AOTExport *exports = module->exports;
  513. AOTFunctionInstance *export_func;
  514. uint64 size;
  515. uint32 i, func_index, ftype_index;
  516. for (i = 0; i < module->export_count; i++) {
  517. if (exports[i].kind == EXPORT_KIND_FUNC)
  518. module_inst->export_func_count++;
  519. }
  520. if (module_inst->export_func_count > 0) {
  521. /* Allocate memory */
  522. size = sizeof(AOTFunctionInstance)
  523. * (uint64)module_inst->export_func_count;
  524. if (!(module_inst->export_funcs.ptr = export_func =
  525. runtime_malloc(size, error_buf, error_buf_size))) {
  526. return false;
  527. }
  528. for (i = 0; i < module->export_count; i++) {
  529. if (exports[i].kind == EXPORT_KIND_FUNC) {
  530. export_func->func_name = exports[i].name;
  531. export_func->func_index = exports[i].index;
  532. if (export_func->func_index < module->import_func_count) {
  533. export_func->is_import_func = true;
  534. export_func->u.func_import =
  535. &module->import_funcs[export_func->func_index];
  536. }
  537. else {
  538. export_func->is_import_func = false;
  539. func_index = export_func->func_index
  540. - module->import_func_count;
  541. ftype_index = module->func_type_indexes[func_index];
  542. export_func->u.func.func_type =
  543. module->func_types[ftype_index];
  544. export_func->u.func.func_ptr =
  545. module->func_ptrs[func_index];
  546. }
  547. export_func++;
  548. }
  549. }
  550. }
  551. return true;
  552. }
  553. static bool
  554. create_exports(AOTModuleInstance *module_inst, AOTModule *module,
  555. char *error_buf, uint32 error_buf_size)
  556. {
  557. return create_export_funcs(module_inst, module,
  558. error_buf, error_buf_size);
  559. }
  560. static bool
  561. execute_post_inst_function(AOTModuleInstance *module_inst)
  562. {
  563. AOTFunctionInstance *post_inst_func =
  564. aot_lookup_function(module_inst, "__post_instantiate", "()");
  565. if (!post_inst_func)
  566. /* Not found */
  567. return true;
  568. return aot_create_exec_env_and_call_function(module_inst, post_inst_func, 0, NULL);
  569. }
  570. static bool
  571. execute_start_function(AOTModuleInstance *module_inst)
  572. {
  573. AOTModule *module = (AOTModule*)module_inst->aot_module.ptr;
  574. WASMExecEnv *exec_env;
  575. typedef void (*F)(WASMExecEnv*);
  576. union { F f; void *v; } u;
  577. if (!module->start_function)
  578. return true;
  579. if (!(exec_env = wasm_exec_env_create((WASMModuleInstanceCommon*)module_inst,
  580. module_inst->default_wasm_stack_size))) {
  581. aot_set_exception(module_inst, "allocate memory failed");
  582. return false;
  583. }
  584. u.v = module->start_function;
  585. u.f(exec_env);
  586. wasm_exec_env_destroy(exec_env);
  587. return !aot_get_exception(module_inst);
  588. }
  589. #if WASM_ENABLE_BULK_MEMORY != 0
  590. static bool
  591. execute_memory_init_function(AOTModuleInstance *module_inst)
  592. {
  593. AOTFunctionInstance *memory_init_func =
  594. aot_lookup_function(module_inst, "__wasm_call_ctors", "()");
  595. if (!memory_init_func)
  596. /* Not found */
  597. return true;
  598. return aot_create_exec_env_and_call_function(module_inst, memory_init_func,
  599. 0, NULL);
  600. }
  601. #endif
  602. AOTModuleInstance*
  603. aot_instantiate(AOTModule *module, bool is_sub_inst,
  604. uint32 stack_size, uint32 heap_size,
  605. char *error_buf, uint32 error_buf_size)
  606. {
  607. AOTModuleInstance *module_inst;
  608. uint32 module_inst_struct_size =
  609. offsetof(AOTModuleInstance, global_table_data.bytes);
  610. uint64 module_inst_mem_inst_size =
  611. (uint64)module->memory_count * sizeof(AOTMemoryInstance);
  612. uint32 table_size = module->table_count > 0 ?
  613. module->tables[0].table_init_size : 0;
  614. uint64 table_data_size = (uint64)table_size * sizeof(uint32);
  615. uint64 total_size = (uint64)module_inst_struct_size
  616. + module_inst_mem_inst_size
  617. + module->global_data_size
  618. + table_data_size;
  619. uint8 *p;
  620. /* Check heap size */
  621. heap_size = align_uint(heap_size, 8);
  622. if (heap_size > APP_HEAP_SIZE_MAX)
  623. heap_size = APP_HEAP_SIZE_MAX;
  624. /* Allocate module instance, global data, table data and heap data */
  625. if (!(module_inst = runtime_malloc(total_size,
  626. error_buf, error_buf_size))) {
  627. return NULL;
  628. }
  629. module_inst->module_type = Wasm_Module_AoT;
  630. module_inst->aot_module.ptr = module;
  631. /* Initialize global info */
  632. p = (uint8*)module_inst + module_inst_struct_size +
  633. module_inst_mem_inst_size;
  634. module_inst->global_data.ptr = p;
  635. module_inst->global_data_size = module->global_data_size;
  636. if (!global_instantiate(module_inst, module, error_buf, error_buf_size))
  637. goto fail;
  638. /* Initialize table info */
  639. p += module->global_data_size;
  640. module_inst->table_data.ptr = p;
  641. module_inst->table_size = table_size;
  642. /* Set all elements to -1 to mark them as uninitialized elements */
  643. memset(module_inst->table_data.ptr, -1, (uint32)table_data_size);
  644. if (!table_instantiate(module_inst, module, error_buf, error_buf_size))
  645. goto fail;
  646. /* Initialize memory space */
  647. if (!memories_instantiate(module_inst, module, heap_size,
  648. error_buf, error_buf_size))
  649. goto fail;
  650. /* Initialize function pointers */
  651. if (!init_func_ptrs(module_inst, module, error_buf, error_buf_size))
  652. goto fail;
  653. /* Initialize function type indexes */
  654. if (!init_func_type_indexes(module_inst, module, error_buf, error_buf_size))
  655. goto fail;
  656. if (!create_exports(module_inst, module, error_buf, error_buf_size))
  657. goto fail;
  658. #if WASM_ENABLE_LIBC_WASI != 0
  659. if (!is_sub_inst) {
  660. if (heap_size > 0
  661. && !wasm_runtime_init_wasi((WASMModuleInstanceCommon*)module_inst,
  662. module->wasi_args.dir_list,
  663. module->wasi_args.dir_count,
  664. module->wasi_args.map_dir_list,
  665. module->wasi_args.map_dir_count,
  666. module->wasi_args.env,
  667. module->wasi_args.env_count,
  668. module->wasi_args.argv,
  669. module->wasi_args.argc,
  670. error_buf, error_buf_size))
  671. goto fail;
  672. }
  673. #endif
  674. /* Initialize the thread related data */
  675. if (stack_size == 0)
  676. stack_size = DEFAULT_WASM_STACK_SIZE;
  677. #if WASM_ENABLE_SPEC_TEST != 0
  678. if (stack_size < 48 *1024)
  679. stack_size = 48 * 1024;
  680. #endif
  681. module_inst->default_wasm_stack_size = stack_size;
  682. /* Execute __post_instantiate function and start function*/
  683. if (!execute_post_inst_function(module_inst)
  684. || !execute_start_function(module_inst)) {
  685. set_error_buf(error_buf, error_buf_size,
  686. module_inst->cur_exception);
  687. goto fail;
  688. }
  689. #if WASM_ENABLE_BULK_MEMORY != 0
  690. #if WASM_ENABLE_LIBC_WASI != 0
  691. if (!module->is_wasi_module) {
  692. #endif
  693. /* Only execute the memory init function for main instance because
  694. the data segments will be dropped once initialized.
  695. */
  696. if (!is_sub_inst) {
  697. if (!execute_memory_init_function(module_inst)) {
  698. set_error_buf(error_buf, error_buf_size,
  699. module_inst->cur_exception);
  700. goto fail;
  701. }
  702. }
  703. #if WASM_ENABLE_LIBC_WASI != 0
  704. }
  705. #endif
  706. #endif
  707. return module_inst;
  708. fail:
  709. aot_deinstantiate(module_inst, is_sub_inst);
  710. return NULL;
  711. }
  712. void
  713. aot_deinstantiate(AOTModuleInstance *module_inst, bool is_sub_inst)
  714. {
  715. #if WASM_ENABLE_LIBC_WASI != 0
  716. /* Destroy wasi resource before freeing app heap, since some fields of
  717. wasi contex are allocated from app heap, and if app heap is freed,
  718. these fields will be set to NULL, we cannot free their internal data
  719. which may allocated from global heap. */
  720. /* Only destroy wasi ctx in the main module instance */
  721. if (!is_sub_inst)
  722. wasm_runtime_destroy_wasi((WASMModuleInstanceCommon*)module_inst);
  723. #endif
  724. if (module_inst->memories.ptr)
  725. memories_deinstantiate(module_inst);
  726. if (module_inst->export_funcs.ptr)
  727. wasm_runtime_free(module_inst->export_funcs.ptr);
  728. if (module_inst->func_ptrs.ptr)
  729. wasm_runtime_free(module_inst->func_ptrs.ptr);
  730. if (module_inst->func_type_indexes.ptr)
  731. wasm_runtime_free(module_inst->func_type_indexes.ptr);
  732. wasm_runtime_free(module_inst);
  733. }
  734. AOTFunctionInstance*
  735. aot_lookup_function(const AOTModuleInstance *module_inst,
  736. const char *name, const char *signature)
  737. {
  738. uint32 i;
  739. AOTFunctionInstance *export_funcs = (AOTFunctionInstance *)
  740. module_inst->export_funcs.ptr;
  741. for (i = 0; i < module_inst->export_func_count; i++)
  742. if (!strcmp(export_funcs[i].func_name, name))
  743. return &export_funcs[i];
  744. (void)signature;
  745. return NULL;
  746. }
  747. #define PUT_I64_TO_ADDR(addr, value) do { \
  748. union { int64 val; uint32 parts[2]; } u; \
  749. u.val = (value); \
  750. (addr)[0] = u.parts[0]; \
  751. (addr)[1] = u.parts[1]; \
  752. } while (0)
  753. #define PUT_F64_TO_ADDR(addr, value) do { \
  754. union { float64 val; uint32 parts[2]; } u; \
  755. u.val = (value); \
  756. (addr)[0] = u.parts[0]; \
  757. (addr)[1] = u.parts[1]; \
  758. } while (0)
  759. #ifdef OS_ENABLE_HW_BOUND_CHECK
  760. #define STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT 3
  761. static os_thread_local_attribute WASMExecEnv *aot_exec_env = NULL;
  762. static inline uint8 *
  763. get_stack_min_addr(WASMExecEnv *exec_env, uint32 page_size)
  764. {
  765. uintptr_t stack_bound = (uintptr_t)exec_env->native_stack_boundary;
  766. return (uint8*)(stack_bound & ~(uintptr_t)(page_size -1 ));
  767. }
  768. static void
  769. aot_signal_handler(void *sig_addr)
  770. {
  771. AOTModuleInstance *module_inst;
  772. AOTMemoryInstance *memory_inst;
  773. WASMJmpBuf *jmpbuf_node;
  774. uint8 *mapped_mem_start_addr, *mapped_mem_end_addr;
  775. uint8 *stack_min_addr;
  776. uint32 page_size;
  777. uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
  778. /* Check whether current thread is running aot function */
  779. if (aot_exec_env
  780. && aot_exec_env->handle == os_self_thread()
  781. && (jmpbuf_node = aot_exec_env->jmpbuf_stack_top)) {
  782. /* Get mapped mem info of current instance */
  783. module_inst = (AOTModuleInstance *)aot_exec_env->module_inst;
  784. /* Get the default memory instance */
  785. memory_inst = aot_get_default_memory(module_inst);
  786. if (memory_inst) {
  787. mapped_mem_start_addr = (uint8*)memory_inst->memory_data.ptr;
  788. mapped_mem_end_addr = (uint8*)memory_inst->memory_data.ptr
  789. + 8 * (uint64)BH_GB;
  790. }
  791. /* Get stack info of current thread */
  792. page_size = os_getpagesize();
  793. stack_min_addr = get_stack_min_addr(aot_exec_env, page_size);
  794. if (memory_inst
  795. && (mapped_mem_start_addr <= (uint8*)sig_addr
  796. && (uint8*)sig_addr < mapped_mem_end_addr)) {
  797. /* The address which causes segmentation fault is inside
  798. aot instance's guard regions */
  799. aot_set_exception_with_id(module_inst, EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS);
  800. os_longjmp(jmpbuf_node->jmpbuf, 1);
  801. }
  802. else if (stack_min_addr - page_size <= (uint8*)sig_addr
  803. && (uint8*)sig_addr < stack_min_addr
  804. + page_size * guard_page_count) {
  805. /* The address which causes segmentation fault is inside
  806. native thread's guard page */
  807. aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
  808. os_longjmp(jmpbuf_node->jmpbuf, 1);
  809. }
  810. }
  811. }
  812. bool
  813. aot_signal_init()
  814. {
  815. return os_signal_init(aot_signal_handler) == 0 ? true : false;
  816. }
  817. void
  818. aot_signal_destroy()
  819. {
  820. os_signal_destroy();
  821. }
  822. #if defined(__GNUC__)
  823. __attribute__((no_sanitize_address)) static uint32
  824. #else
  825. static uint32
  826. #endif
  827. touch_pages(uint8 *stack_min_addr, uint32 page_size)
  828. {
  829. uint8 sum = 0;
  830. while (1) {
  831. volatile uint8 *touch_addr =
  832. (volatile uint8*)os_alloca(page_size / 2);
  833. if (touch_addr < stack_min_addr + page_size) {
  834. sum += *(stack_min_addr + page_size - 1);
  835. break;
  836. }
  837. sum += *touch_addr;
  838. }
  839. return sum;
  840. }
  841. static bool
  842. invoke_native_with_hw_bound_check(WASMExecEnv *exec_env, void *func_ptr,
  843. const WASMType *func_type, const char *signature,
  844. void *attachment,
  845. uint32 *argv, uint32 argc, uint32 *argv_ret)
  846. {
  847. AOTModuleInstance *module_inst = (AOTModuleInstance*)exec_env->module_inst;
  848. WASMExecEnv **p_aot_exec_env = &aot_exec_env;
  849. WASMJmpBuf *jmpbuf_node, *jmpbuf_node_pop;
  850. uint32 page_size = os_getpagesize();
  851. uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
  852. uint8 *stack_min_addr = get_stack_min_addr(exec_env, page_size);
  853. bool ret;
  854. /* Check native stack overflow firstly to ensure we have enough
  855. native stack to run the following codes before actually calling
  856. the aot function in invokeNative function. */
  857. if ((uint8*)&module_inst < exec_env->native_stack_boundary
  858. + page_size * (guard_page_count + 1)) {
  859. aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
  860. return false;
  861. }
  862. if (aot_exec_env
  863. && (aot_exec_env != exec_env)) {
  864. aot_set_exception(module_inst, "invalid exec env");
  865. return false;
  866. }
  867. if (!exec_env->jmpbuf_stack_top) {
  868. /* Touch each stack page to ensure that it has been mapped: the OS may
  869. lazily grow the stack mapping as a guard page is hit. */
  870. touch_pages(stack_min_addr, page_size);
  871. /* First time to call aot function, protect one page */
  872. if (os_mprotect(stack_min_addr, page_size * guard_page_count,
  873. MMAP_PROT_NONE) != 0) {
  874. aot_set_exception(module_inst, "set protected page failed");
  875. return false;
  876. }
  877. }
  878. if (!(jmpbuf_node = wasm_runtime_malloc(sizeof(WASMJmpBuf)))) {
  879. aot_set_exception_with_id(module_inst, EXCE_OUT_OF_MEMORY);
  880. return false;
  881. }
  882. wasm_exec_env_push_jmpbuf(exec_env, jmpbuf_node);
  883. aot_exec_env = exec_env;
  884. if (os_setjmp(jmpbuf_node->jmpbuf) == 0) {
  885. ret = wasm_runtime_invoke_native(exec_env, func_ptr, func_type,
  886. signature, attachment,
  887. argv, argc, argv_ret);
  888. }
  889. else {
  890. /* Exception has been set in signal handler before calling longjmp */
  891. ret = false;
  892. }
  893. jmpbuf_node_pop = wasm_exec_env_pop_jmpbuf(exec_env);
  894. bh_assert(jmpbuf_node == jmpbuf_node_pop);
  895. wasm_runtime_free(jmpbuf_node);
  896. if (!exec_env->jmpbuf_stack_top) {
  897. /* Unprotect the guard page when the nested call depth is zero */
  898. os_mprotect(stack_min_addr, page_size * guard_page_count,
  899. MMAP_PROT_READ | MMAP_PROT_WRITE);
  900. *p_aot_exec_env = NULL;
  901. }
  902. os_sigreturn();
  903. os_signal_unmask();
  904. (void)jmpbuf_node_pop;
  905. return ret;
  906. }
  907. #define invoke_native_internal invoke_native_with_hw_bound_check
  908. #else /* else of OS_ENABLE_HW_BOUND_CHECK */
  909. #define invoke_native_internal wasm_runtime_invoke_native
  910. #endif /* end of OS_ENABLE_HW_BOUND_CHECK */
  911. bool
  912. aot_call_function(WASMExecEnv *exec_env,
  913. AOTFunctionInstance *function,
  914. unsigned argc, uint32 argv[])
  915. {
  916. AOTModuleInstance *module_inst = (AOTModuleInstance*)exec_env->module_inst;
  917. AOTFuncType *func_type = function->u.func.func_type;
  918. uint32 result_count = func_type->result_count;
  919. uint32 ext_ret_count = result_count > 1 ? result_count - 1 : 0;
  920. bool ret;
  921. if (ext_ret_count > 0) {
  922. uint32 cell_num = 0, i;
  923. uint8 *ext_ret_types = func_type->types + func_type->param_count + 1;
  924. uint32 argv1_buf[32], *argv1 = argv1_buf, *ext_rets = NULL;
  925. uint32 *argv_ret = argv;
  926. uint32 ext_ret_cell = wasm_get_cell_num(ext_ret_types, ext_ret_count);
  927. uint64 size;
  928. /* Allocate memory all arguments */
  929. size = sizeof(uint32) * (uint64)argc /* original arguments */
  930. + sizeof(void*) * (uint64)ext_ret_count /* extra result values' addr */
  931. + sizeof(uint32) * (uint64)ext_ret_cell; /* extra result values */
  932. if (size > sizeof(argv1_buf)
  933. && !(argv1 = runtime_malloc(size, module_inst->cur_exception,
  934. sizeof(module_inst->cur_exception)))) {
  935. aot_set_exception_with_id(module_inst, EXCE_OUT_OF_MEMORY);
  936. return false;
  937. }
  938. /* Copy original arguments */
  939. bh_memcpy_s(argv1, (uint32)size, argv, sizeof(uint32) * argc);
  940. /* Get the extra result value's address */
  941. ext_rets = argv1 + argc + sizeof(void*)/sizeof(uint32) * ext_ret_count;
  942. /* Append each extra result value's address to original arguments */
  943. for (i = 0; i < ext_ret_count; i++) {
  944. *(uintptr_t*)(argv1 + argc + sizeof(void*) / sizeof(uint32) * i) =
  945. (uintptr_t)(ext_rets + cell_num);
  946. cell_num += wasm_value_type_cell_num(ext_ret_types[i]);
  947. }
  948. ret = invoke_native_internal(exec_env, function->u.func.func_ptr,
  949. func_type, NULL, NULL, argv1, argc, argv);
  950. if (!ret || aot_get_exception(module_inst)) {
  951. if (argv1 != argv1_buf)
  952. wasm_runtime_free(argv1);
  953. return false;
  954. }
  955. /* Get extra result values */
  956. switch (func_type->types[func_type->param_count]) {
  957. case VALUE_TYPE_I32:
  958. case VALUE_TYPE_F32:
  959. argv_ret++;
  960. break;
  961. case VALUE_TYPE_I64:
  962. case VALUE_TYPE_F64:
  963. argv_ret += 2;
  964. break;
  965. default:
  966. bh_assert(0);
  967. break;
  968. }
  969. ext_rets = argv1 + argc + sizeof(void*)/sizeof(uint32) * ext_ret_count;
  970. bh_memcpy_s(argv_ret, sizeof(uint32) * cell_num,
  971. ext_rets, sizeof(uint32) * cell_num);
  972. if (argv1 != argv1_buf)
  973. wasm_runtime_free(argv1);
  974. return true;
  975. }
  976. else {
  977. ret = invoke_native_internal(exec_env, function->u.func.func_ptr,
  978. func_type, NULL, NULL, argv, argc, argv);
  979. return ret && !aot_get_exception(module_inst) ? true : false;
  980. }
  981. }
  982. bool
  983. aot_create_exec_env_and_call_function(AOTModuleInstance *module_inst,
  984. AOTFunctionInstance *func,
  985. unsigned argc, uint32 argv[])
  986. {
  987. WASMExecEnv *exec_env;
  988. bool ret;
  989. if (!(exec_env = wasm_exec_env_create((WASMModuleInstanceCommon*)module_inst,
  990. module_inst->default_wasm_stack_size))) {
  991. aot_set_exception(module_inst, "allocate memory failed");
  992. return false;
  993. }
  994. /* set thread handle and stack boundary */
  995. wasm_exec_env_set_thread_info(exec_env);
  996. ret = aot_call_function(exec_env, func, argc, argv);
  997. wasm_exec_env_destroy(exec_env);
  998. return ret;
  999. }
  1000. void
  1001. aot_set_exception(AOTModuleInstance *module_inst,
  1002. const char *exception)
  1003. {
  1004. if (exception)
  1005. snprintf(module_inst->cur_exception,
  1006. sizeof(module_inst->cur_exception),
  1007. "Exception: %s", exception);
  1008. else
  1009. module_inst->cur_exception[0] = '\0';
  1010. }
  1011. void
  1012. aot_set_exception_with_id(AOTModuleInstance *module_inst,
  1013. uint32 id)
  1014. {
  1015. switch (id) {
  1016. case EXCE_UNREACHABLE:
  1017. aot_set_exception(module_inst, "unreachable");
  1018. break;
  1019. case EXCE_OUT_OF_MEMORY:
  1020. aot_set_exception(module_inst, "allocate memory failed");
  1021. break;
  1022. case EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS:
  1023. aot_set_exception(module_inst, "out of bounds memory access");
  1024. break;
  1025. case EXCE_INTEGER_OVERFLOW:
  1026. aot_set_exception(module_inst, "integer overflow");
  1027. break;
  1028. case EXCE_INTEGER_DIVIDE_BY_ZERO:
  1029. aot_set_exception(module_inst, "integer divide by zero");
  1030. break;
  1031. case EXCE_INVALID_CONVERSION_TO_INTEGER:
  1032. aot_set_exception(module_inst, "invalid conversion to integer");
  1033. break;
  1034. case EXCE_INVALID_FUNCTION_TYPE_INDEX:
  1035. aot_set_exception(module_inst, "indirect call type mismatch");
  1036. break;
  1037. case EXCE_INVALID_FUNCTION_INDEX:
  1038. aot_set_exception(module_inst, "invalid function index");
  1039. break;
  1040. case EXCE_UNDEFINED_ELEMENT:
  1041. aot_set_exception(module_inst, "undefined element");
  1042. break;
  1043. case EXCE_UNINITIALIZED_ELEMENT:
  1044. aot_set_exception(module_inst, "uninitialized element");
  1045. break;
  1046. case EXCE_CALL_UNLINKED_IMPORT_FUNC:
  1047. aot_set_exception(module_inst, "fail to call unlinked import function");
  1048. break;
  1049. case EXCE_NATIVE_STACK_OVERFLOW:
  1050. aot_set_exception(module_inst, "native stack overflow");
  1051. break;
  1052. case EXCE_UNALIGNED_ATOMIC:
  1053. aot_set_exception(module_inst, "unaligned atomic");
  1054. break;
  1055. default:
  1056. break;
  1057. }
  1058. }
  1059. const char*
  1060. aot_get_exception(AOTModuleInstance *module_inst)
  1061. {
  1062. if (module_inst->cur_exception[0] == '\0')
  1063. return NULL;
  1064. else
  1065. return module_inst->cur_exception;
  1066. }
  1067. void
  1068. aot_clear_exception(AOTModuleInstance *module_inst)
  1069. {
  1070. module_inst->cur_exception[0] = '\0';
  1071. }
  1072. static bool
  1073. execute_malloc_function(AOTModuleInstance *module_inst,
  1074. AOTFunctionInstance *malloc_func,
  1075. uint32 size, uint32 *p_result)
  1076. {
  1077. uint32 argv[2];
  1078. bool ret;
  1079. argv[0] = size;
  1080. #ifdef OS_ENABLE_HW_BOUND_CHECK
  1081. if (aot_exec_env != NULL) {
  1082. bh_assert(aot_exec_env->module_inst
  1083. == (WASMModuleInstanceCommon *)module_inst);
  1084. ret = aot_call_function(aot_exec_env, malloc_func, 1, argv);
  1085. }
  1086. else
  1087. #endif
  1088. {
  1089. ret = aot_create_exec_env_and_call_function
  1090. (module_inst, malloc_func, 1, argv);
  1091. }
  1092. if (ret)
  1093. *p_result = argv[0];
  1094. return ret;
  1095. }
  1096. static bool
  1097. execute_free_function(AOTModuleInstance *module_inst,
  1098. AOTFunctionInstance *free_func,
  1099. uint32 offset)
  1100. {
  1101. uint32 argv[2];
  1102. argv[0] = offset;
  1103. #ifdef OS_ENABLE_HW_BOUND_CHECK
  1104. if (aot_exec_env != NULL) {
  1105. bh_assert(aot_exec_env->module_inst
  1106. == (WASMModuleInstanceCommon *)module_inst);
  1107. return aot_call_function(aot_exec_env, free_func, 1, argv);
  1108. }
  1109. else
  1110. #endif
  1111. {
  1112. return aot_create_exec_env_and_call_function
  1113. (module_inst, free_func, 1, argv);
  1114. }
  1115. }
  1116. int32
  1117. aot_module_malloc(AOTModuleInstance *module_inst, uint32 size,
  1118. void **p_native_addr)
  1119. {
  1120. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1121. AOTModule *module = (AOTModule *)module_inst->aot_module.ptr;
  1122. uint8 *addr = NULL;
  1123. uint32 offset = 0;
  1124. if (memory_inst->heap_handle.ptr) {
  1125. addr = mem_allocator_malloc(memory_inst->heap_handle.ptr, size);
  1126. }
  1127. else if (module->malloc_func_index != (uint32)-1
  1128. && module->free_func_index != (uint32)-1) {
  1129. AOTFunctionInstance *malloc_func =
  1130. aot_lookup_function(module_inst, "malloc", "(i)i");
  1131. bh_assert(malloc_func);
  1132. if (!execute_malloc_function(module_inst, malloc_func,
  1133. size, &offset)) {
  1134. return 0;
  1135. }
  1136. addr = offset
  1137. ? (uint8*)memory_inst->memory_data.ptr + offset
  1138. : NULL;
  1139. }
  1140. if (!addr) {
  1141. aot_set_exception(module_inst, "out of memory");
  1142. return 0;
  1143. }
  1144. if (p_native_addr)
  1145. *p_native_addr = addr;
  1146. return (int32)(addr - (uint8*)memory_inst->memory_data.ptr);
  1147. }
  1148. void
  1149. aot_module_free(AOTModuleInstance *module_inst, int32 ptr)
  1150. {
  1151. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1152. AOTModule *module = (AOTModule *)module_inst->aot_module.ptr;
  1153. if (ptr) {
  1154. uint8 *addr = (uint8 *)memory_inst->memory_data.ptr + ptr;
  1155. if (memory_inst->heap_handle.ptr
  1156. &&(uint8 *)memory_inst->heap_data.ptr < addr
  1157. && addr < (uint8 *)memory_inst->heap_data_end.ptr) {
  1158. mem_allocator_free(memory_inst->heap_handle.ptr, addr);
  1159. }
  1160. else if (module->malloc_func_index != (uint32)-1
  1161. && module->free_func_index != (uint32)-1
  1162. && (uint8 *)memory_inst->memory_data.ptr <= addr
  1163. && addr < (uint8 *)memory_inst->memory_data_end.ptr) {
  1164. AOTFunctionInstance *free_func =
  1165. aot_lookup_function(module_inst, "free", "(i)i");
  1166. bh_assert(free_func);
  1167. execute_free_function(module_inst, free_func, (uint32)ptr);
  1168. }
  1169. }
  1170. }
  1171. int32
  1172. aot_module_dup_data(AOTModuleInstance *module_inst,
  1173. const char *src, uint32 size)
  1174. {
  1175. char *buffer;
  1176. int32 buffer_offset = aot_module_malloc(module_inst, size,
  1177. (void**)&buffer);
  1178. if (buffer_offset != 0) {
  1179. buffer = aot_addr_app_to_native(module_inst, buffer_offset);
  1180. bh_memcpy_s(buffer, size, src, size);
  1181. }
  1182. return buffer_offset;
  1183. }
  1184. bool
  1185. aot_validate_app_addr(AOTModuleInstance *module_inst,
  1186. int32 app_offset, uint32 size)
  1187. {
  1188. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1189. /* integer overflow check */
  1190. if((uint32)app_offset + size < (uint32)app_offset) {
  1191. goto fail;
  1192. }
  1193. if ((uint32)app_offset + size <= memory_inst->memory_data_size) {
  1194. return true;
  1195. }
  1196. fail:
  1197. aot_set_exception(module_inst, "out of bounds memory access");
  1198. return false;
  1199. }
  1200. bool
  1201. aot_validate_native_addr(AOTModuleInstance *module_inst,
  1202. void *native_ptr, uint32 size)
  1203. {
  1204. uint8 *addr = (uint8 *)native_ptr;
  1205. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1206. /* integer overflow check */
  1207. if (addr + size < addr) {
  1208. goto fail;
  1209. }
  1210. if ((uint8 *)memory_inst->memory_data.ptr <= addr
  1211. && addr + size <= (uint8 *)memory_inst->memory_data_end.ptr)
  1212. return true;
  1213. fail:
  1214. aot_set_exception(module_inst, "out of bounds memory access");
  1215. return false;
  1216. }
  1217. void *
  1218. aot_addr_app_to_native(AOTModuleInstance *module_inst, int32 app_offset)
  1219. {
  1220. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1221. uint8 *addr = (uint8 *)memory_inst->memory_data.ptr + (uint32)app_offset;
  1222. if ((uint8 *)memory_inst->memory_data.ptr <= addr
  1223. && addr < (uint8 *)memory_inst->memory_data_end.ptr)
  1224. return addr;
  1225. return NULL;
  1226. }
  1227. int32
  1228. aot_addr_native_to_app(AOTModuleInstance *module_inst, void *native_ptr)
  1229. {
  1230. uint8 *addr = (uint8 *)native_ptr;
  1231. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1232. if ((uint8 *)memory_inst->memory_data.ptr <= addr
  1233. && addr < (uint8 *)memory_inst->memory_data_end.ptr)
  1234. return (int32)(addr - (uint8 *)memory_inst->memory_data.ptr);
  1235. return 0;
  1236. }
  1237. bool
  1238. aot_get_app_addr_range(AOTModuleInstance *module_inst,
  1239. int32 app_offset,
  1240. int32 *p_app_start_offset,
  1241. int32 *p_app_end_offset)
  1242. {
  1243. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1244. uint32 memory_data_size = memory_inst->memory_data_size;
  1245. if ((uint32)app_offset < memory_data_size) {
  1246. if (p_app_start_offset)
  1247. *p_app_start_offset = 0;
  1248. if (p_app_end_offset)
  1249. *p_app_end_offset = (int32)memory_data_size;
  1250. return true;
  1251. }
  1252. return false;
  1253. }
  1254. bool
  1255. aot_get_native_addr_range(AOTModuleInstance *module_inst,
  1256. uint8 *native_ptr,
  1257. uint8 **p_native_start_addr,
  1258. uint8 **p_native_end_addr)
  1259. {
  1260. uint8 *addr = (uint8 *)native_ptr;
  1261. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1262. if ((uint8 *)memory_inst->memory_data.ptr <= addr
  1263. && addr < (uint8 *)memory_inst->memory_data_end.ptr) {
  1264. if (p_native_start_addr)
  1265. *p_native_start_addr = (uint8 *)memory_inst->memory_data.ptr;
  1266. if (p_native_end_addr)
  1267. *p_native_end_addr = (uint8 *)memory_inst->memory_data_end.ptr;
  1268. return true;
  1269. }
  1270. return false;
  1271. }
  1272. #ifndef OS_ENABLE_HW_BOUND_CHECK
  1273. bool
  1274. aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count)
  1275. {
  1276. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1277. uint32 num_bytes_per_page = memory_inst->num_bytes_per_page;
  1278. uint32 cur_page_count = memory_inst->cur_page_count;
  1279. uint32 max_page_count = memory_inst->max_page_count;
  1280. uint32 total_page_count = cur_page_count + inc_page_count;
  1281. uint32 total_size_old = memory_inst->memory_data_size;
  1282. uint64 total_size = (uint64)num_bytes_per_page * total_page_count;
  1283. uint32 heap_size = (uint32)((uint8 *)memory_inst->heap_data_end.ptr
  1284. - (uint8 *)memory_inst->heap_data.ptr);
  1285. uint8 *memory_data_old = (uint8 *)memory_inst->memory_data.ptr;
  1286. uint8 *heap_data_old = (uint8 *)memory_inst->heap_data.ptr;
  1287. uint8 *memory_data, *heap_data;
  1288. void *heap_handle_old = memory_inst->heap_handle.ptr;
  1289. if (inc_page_count <= 0)
  1290. /* No need to enlarge memory */
  1291. return true;
  1292. if (total_page_count < cur_page_count /* integer overflow */
  1293. || total_page_count > max_page_count) {
  1294. return false;
  1295. }
  1296. if (total_size >= UINT32_MAX) {
  1297. return false;
  1298. }
  1299. #if WASM_ENABLE_SHARED_MEMORY != 0
  1300. if (memory_inst->is_shared) {
  1301. /* For shared memory, we have reserved the maximum spaces during
  1302. instantiate, only change the cur_page_count here */
  1303. memory_inst->cur_page_count = total_page_count;
  1304. return true;
  1305. }
  1306. #endif
  1307. if (heap_size > 0) {
  1308. /* Destroy heap's lock firstly, if its memory is re-allocated,
  1309. we cannot access its lock again. */
  1310. mem_allocator_destroy_lock(memory_inst->heap_handle.ptr);
  1311. }
  1312. if (!(memory_data = wasm_runtime_realloc(memory_data_old,
  1313. (uint32)total_size))) {
  1314. if (!(memory_data = wasm_runtime_malloc((uint32)total_size))) {
  1315. if (heap_size > 0) {
  1316. /* Restore heap's lock if memory re-alloc failed */
  1317. mem_allocator_reinit_lock(memory_inst->heap_handle.ptr);
  1318. }
  1319. return false;
  1320. }
  1321. bh_memcpy_s(memory_data, (uint32)total_size,
  1322. memory_data_old, total_size_old);
  1323. wasm_runtime_free(memory_data_old);
  1324. }
  1325. memset(memory_data + total_size_old,
  1326. 0, (uint32)total_size - total_size_old);
  1327. memory_inst->cur_page_count = total_page_count;
  1328. memory_inst->memory_data_size = (uint32)total_size;
  1329. memory_inst->memory_data.ptr = memory_data;
  1330. memory_inst->memory_data_end.ptr = memory_data + total_size;
  1331. if (heap_size > 0) {
  1332. memory_inst->heap_handle.ptr = (uint8 *)heap_handle_old
  1333. + (memory_data - memory_data_old);
  1334. if (mem_allocator_migrate(memory_inst->heap_handle.ptr,
  1335. heap_handle_old) != 0) {
  1336. return false;
  1337. }
  1338. }
  1339. heap_data = heap_data_old + (memory_data - memory_data_old);
  1340. memory_inst->heap_data.ptr = heap_data;
  1341. memory_inst->heap_data_end.ptr = heap_data + heap_size;
  1342. if (sizeof(uintptr_t) == sizeof(uint64)) {
  1343. memory_inst->mem_bound_check_1byte.u64 = total_size - 1;
  1344. memory_inst->mem_bound_check_2bytes.u64 = total_size - 2;
  1345. memory_inst->mem_bound_check_4bytes.u64 = total_size - 4;
  1346. memory_inst->mem_bound_check_8bytes.u64 = total_size - 8;
  1347. }
  1348. else {
  1349. memory_inst->mem_bound_check_1byte.u32[0] = (uint32)total_size - 1;
  1350. memory_inst->mem_bound_check_2bytes.u32[0] = (uint32)total_size - 2;
  1351. memory_inst->mem_bound_check_4bytes.u32[0] = (uint32)total_size - 4;
  1352. memory_inst->mem_bound_check_8bytes.u32[0] = (uint32)total_size - 8;
  1353. }
  1354. return true;
  1355. }
  1356. #else /* else of OS_ENABLE_HW_BOUND_CHECK */
  1357. bool
  1358. aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count)
  1359. {
  1360. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1361. uint32 num_bytes_per_page = memory_inst->num_bytes_per_page;
  1362. uint32 cur_page_count = memory_inst->cur_page_count;
  1363. uint32 max_page_count = memory_inst->max_page_count;
  1364. uint32 total_page_count = cur_page_count + inc_page_count;
  1365. uint64 total_size = (uint64)num_bytes_per_page * total_page_count;
  1366. if (inc_page_count <= 0)
  1367. /* No need to enlarge memory */
  1368. return true;
  1369. if (total_page_count < cur_page_count /* integer overflow */
  1370. || total_page_count > max_page_count) {
  1371. return false;
  1372. }
  1373. if (os_mprotect(memory_inst->memory_data_end.ptr,
  1374. num_bytes_per_page * inc_page_count,
  1375. MMAP_PROT_READ | MMAP_PROT_WRITE) != 0) {
  1376. return false;
  1377. }
  1378. memset(memory_inst->memory_data_end.ptr, 0,
  1379. num_bytes_per_page * inc_page_count);
  1380. memory_inst->cur_page_count = total_page_count;
  1381. memory_inst->memory_data_size = (uint32)total_size;
  1382. memory_inst->memory_data_end.ptr = (uint8 *)memory_inst->memory_data.ptr
  1383. + (uint32)total_size;
  1384. if (sizeof(uintptr_t) == sizeof(uint64)) {
  1385. memory_inst->mem_bound_check_1byte.u64 = total_size - 1;
  1386. memory_inst->mem_bound_check_2bytes.u64 = total_size - 2;
  1387. memory_inst->mem_bound_check_4bytes.u64 = total_size - 4;
  1388. memory_inst->mem_bound_check_8bytes.u64 = total_size - 8;
  1389. }
  1390. else {
  1391. memory_inst->mem_bound_check_1byte.u32[0] = (uint32)total_size - 1;
  1392. memory_inst->mem_bound_check_2bytes.u32[0] = (uint32)total_size - 2;
  1393. memory_inst->mem_bound_check_4bytes.u32[0] = (uint32)total_size - 4;
  1394. memory_inst->mem_bound_check_8bytes.u32[0] = (uint32)total_size - 8;
  1395. }
  1396. return true;
  1397. }
  1398. #endif /* end of OS_ENABLE_HW_BOUND_CHECK */
  1399. bool
  1400. aot_is_wasm_type_equal(AOTModuleInstance *module_inst,
  1401. uint32 type1_idx, uint32 type2_idx)
  1402. {
  1403. WASMType *type1, *type2;
  1404. AOTModule *module = (AOTModule*)module_inst->aot_module.ptr;
  1405. if (type1_idx >= module->func_type_count
  1406. || type2_idx >= module->func_type_count) {
  1407. aot_set_exception(module_inst, "type index out of bounds");
  1408. return false;
  1409. }
  1410. if (type1_idx == type2_idx)
  1411. return true;
  1412. type1 = module->func_types[type1_idx];
  1413. type2 = module->func_types[type2_idx];
  1414. return wasm_type_equal(type1, type2);
  1415. }
  1416. bool
  1417. aot_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
  1418. uint32 argc, uint32 *argv)
  1419. {
  1420. AOTModuleInstance *module_inst = (AOTModuleInstance*)
  1421. wasm_runtime_get_module_inst(exec_env);
  1422. AOTModule *aot_module = (AOTModule*)module_inst->aot_module.ptr;
  1423. uint32 *func_type_indexes = (uint32*)module_inst->func_type_indexes.ptr;
  1424. uint32 func_type_idx = func_type_indexes[func_idx];
  1425. AOTFuncType *func_type = aot_module->func_types[func_type_idx];
  1426. void **func_ptrs = (void**)module_inst->func_ptrs.ptr;
  1427. void *func_ptr = func_ptrs[func_idx];
  1428. AOTImportFunc *import_func;
  1429. const char *signature;
  1430. void *attachment;
  1431. char buf[128];
  1432. #ifdef OS_ENABLE_HW_BOUND_CHECK
  1433. uint32 page_size = os_getpagesize();
  1434. uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
  1435. /* Check native stack overflow firstly to ensure we have enough
  1436. native stack to run the following codes before actually calling
  1437. the aot function in invokeNative function. */
  1438. if ((uint8*)&module_inst < exec_env->native_stack_boundary
  1439. + page_size * (guard_page_count + 1)) {
  1440. aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
  1441. return false;
  1442. }
  1443. #endif
  1444. bh_assert(func_idx < aot_module->import_func_count);
  1445. import_func = aot_module->import_funcs + func_idx;
  1446. if (!func_ptr) {
  1447. snprintf(buf, sizeof(buf),
  1448. "fail to call unlinked import function (%s, %s)",
  1449. import_func->module_name, import_func->func_name);
  1450. aot_set_exception(module_inst, buf);
  1451. return false;
  1452. }
  1453. signature = import_func->signature;
  1454. attachment = import_func->attachment;
  1455. if (!import_func->call_conv_raw) {
  1456. return wasm_runtime_invoke_native(exec_env, func_ptr,
  1457. func_type, signature, attachment,
  1458. argv, argc, argv);
  1459. }
  1460. else {
  1461. return wasm_runtime_invoke_native_raw(exec_env, func_ptr,
  1462. func_type, signature, attachment,
  1463. argv, argc, argv);
  1464. }
  1465. }
  1466. bool
  1467. aot_call_indirect(WASMExecEnv *exec_env,
  1468. bool check_func_type, uint32 func_type_idx,
  1469. uint32 table_elem_idx,
  1470. uint32 argc, uint32 *argv)
  1471. {
  1472. AOTModuleInstance *module_inst = (AOTModuleInstance*)
  1473. wasm_runtime_get_module_inst(exec_env);
  1474. AOTModule *aot_module = (AOTModule*)module_inst->aot_module.ptr;
  1475. uint32 *func_type_indexes = (uint32*)module_inst->func_type_indexes.ptr;
  1476. uint32 *table_data = (uint32*)module_inst->table_data.ptr;
  1477. AOTFuncType *func_type;
  1478. void **func_ptrs = (void**)module_inst->func_ptrs.ptr, *func_ptr;
  1479. uint32 table_size = module_inst->table_size;
  1480. uint32 func_idx, func_type_idx1;
  1481. uint32 ext_ret_count;
  1482. AOTImportFunc *import_func;
  1483. const char *signature = NULL;
  1484. void *attachment = NULL;
  1485. char buf[128];
  1486. bool ret;
  1487. /* this function is called from native code, so exec_env->handle and
  1488. exec_env->native_stack_boundary must have been set, we don't set
  1489. it again */
  1490. if ((uint8*)&module_inst < exec_env->native_stack_boundary) {
  1491. aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
  1492. return false;
  1493. }
  1494. if (table_elem_idx >= table_size) {
  1495. aot_set_exception_with_id(module_inst, EXCE_UNDEFINED_ELEMENT);
  1496. return false;
  1497. }
  1498. func_idx = table_data[table_elem_idx];
  1499. if (func_idx == (uint32)-1) {
  1500. aot_set_exception_with_id(module_inst, EXCE_UNINITIALIZED_ELEMENT);
  1501. return false;
  1502. }
  1503. func_type_idx1 = func_type_indexes[func_idx];
  1504. if (check_func_type
  1505. && !aot_is_wasm_type_equal(module_inst, func_type_idx,
  1506. func_type_idx1)) {
  1507. aot_set_exception_with_id(module_inst,
  1508. EXCE_INVALID_FUNCTION_TYPE_INDEX);
  1509. return false;
  1510. }
  1511. func_type = aot_module->func_types[func_type_idx1];
  1512. if (!(func_ptr = func_ptrs[func_idx])) {
  1513. bh_assert(func_idx < aot_module->import_func_count);
  1514. import_func = aot_module->import_funcs + func_idx;
  1515. snprintf(buf, sizeof(buf),
  1516. "fail to call unlinked import function (%s, %s)",
  1517. import_func->module_name, import_func->func_name);
  1518. aot_set_exception(module_inst, buf);
  1519. return false;
  1520. }
  1521. if (func_idx < aot_module->import_func_count) {
  1522. /* Call native function */
  1523. import_func = aot_module->import_funcs + func_idx;
  1524. signature = import_func->signature;
  1525. if (import_func->call_conv_raw) {
  1526. attachment = import_func->attachment;
  1527. return wasm_runtime_invoke_native_raw(exec_env, func_ptr,
  1528. func_type, signature,
  1529. attachment,
  1530. argv, argc, argv);
  1531. }
  1532. }
  1533. ext_ret_count = func_type->result_count > 1
  1534. ? func_type->result_count - 1 : 0;
  1535. if (ext_ret_count > 0) {
  1536. uint32 argv1_buf[32], *argv1 = argv1_buf;
  1537. uint32 *ext_rets = NULL, *argv_ret = argv;
  1538. uint32 cell_num = 0, i;
  1539. uint8 *ext_ret_types = func_type->types + func_type->param_count + 1;
  1540. uint32 ext_ret_cell = wasm_get_cell_num(ext_ret_types, ext_ret_count);
  1541. uint64 size;
  1542. /* Allocate memory all arguments */
  1543. size = sizeof(uint32) * (uint64)argc /* original arguments */
  1544. + sizeof(void*) * (uint64)ext_ret_count /* extra result values' addr */
  1545. + sizeof(uint32) * (uint64)ext_ret_cell; /* extra result values */
  1546. if (size > sizeof(argv1_buf)
  1547. && !(argv1 = runtime_malloc(size, module_inst->cur_exception,
  1548. sizeof(module_inst->cur_exception)))) {
  1549. aot_set_exception_with_id(module_inst, EXCE_OUT_OF_MEMORY);
  1550. return false;
  1551. }
  1552. /* Copy original arguments */
  1553. bh_memcpy_s(argv1, (uint32)size, argv, sizeof(uint32) * argc);
  1554. /* Get the extra result value's address */
  1555. ext_rets = argv1 + argc + sizeof(void*)/sizeof(uint32) * ext_ret_count;
  1556. /* Append each extra result value's address to original arguments */
  1557. for (i = 0; i < ext_ret_count; i++) {
  1558. *(uintptr_t*)(argv1 + argc + sizeof(void*) / sizeof(uint32) * i) =
  1559. (uintptr_t)(ext_rets + cell_num);
  1560. cell_num += wasm_value_type_cell_num(ext_ret_types[i]);
  1561. }
  1562. ret = invoke_native_internal(exec_env, func_ptr,
  1563. func_type, signature, attachment,
  1564. argv1, argc, argv);
  1565. if (!ret || aot_get_exception(module_inst)) {
  1566. if (argv1 != argv1_buf)
  1567. wasm_runtime_free(argv1);
  1568. return false;
  1569. }
  1570. /* Get extra result values */
  1571. switch (func_type->types[func_type->param_count]) {
  1572. case VALUE_TYPE_I32:
  1573. case VALUE_TYPE_F32:
  1574. argv_ret++;
  1575. break;
  1576. case VALUE_TYPE_I64:
  1577. case VALUE_TYPE_F64:
  1578. argv_ret += 2;
  1579. break;
  1580. default:
  1581. bh_assert(0);
  1582. break;
  1583. }
  1584. ext_rets = argv1 + argc + sizeof(void*)/sizeof(uint32) * ext_ret_count;
  1585. bh_memcpy_s(argv_ret, sizeof(uint32) * cell_num,
  1586. ext_rets, sizeof(uint32) * cell_num);
  1587. if (argv1 != argv1_buf)
  1588. wasm_runtime_free(argv1);
  1589. return true;
  1590. }
  1591. else {
  1592. return invoke_native_internal(exec_env, func_ptr,
  1593. func_type, signature, attachment,
  1594. argv, argc, argv);
  1595. }
  1596. }
  1597. #if WASM_ENABLE_BULK_MEMORY != 0
  1598. bool
  1599. aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index,
  1600. uint32 offset, uint32 len, uint32 dst)
  1601. {
  1602. AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
  1603. AOTModule *aot_module;
  1604. uint8 *data = NULL;
  1605. uint8 *maddr;
  1606. uint64 seg_len = 0;
  1607. aot_module = (AOTModule *)module_inst->aot_module.ptr;
  1608. if (aot_module->is_jit_mode) {
  1609. #if WASM_ENABLE_JIT != 0
  1610. seg_len = aot_module->wasm_module->data_segments[seg_index]->data_length;
  1611. data = aot_module->wasm_module->data_segments[seg_index]->data;
  1612. #endif
  1613. }
  1614. else {
  1615. seg_len = aot_module->mem_init_data_list[seg_index]->byte_count;
  1616. data = aot_module->mem_init_data_list[seg_index]->bytes;
  1617. }
  1618. if (!aot_validate_app_addr(module_inst, dst, len))
  1619. return false;
  1620. if ((uint64)offset + (uint64)len > seg_len) {
  1621. aot_set_exception(module_inst, "out of bounds memory access");
  1622. return false;
  1623. }
  1624. maddr = aot_addr_app_to_native(module_inst, dst);
  1625. bh_memcpy_s(maddr, memory_inst->memory_data_size - dst,
  1626. data + offset, len);
  1627. return true;
  1628. }
  1629. bool
  1630. aot_data_drop(AOTModuleInstance *module_inst, uint32 seg_index)
  1631. {
  1632. AOTModule *aot_module = (AOTModule *)(module_inst->aot_module.ptr);
  1633. if (aot_module->is_jit_mode) {
  1634. #if WASM_ENABLE_JIT != 0
  1635. aot_module->wasm_module->data_segments[seg_index]->data_length = 0;
  1636. /* Currently we can't free the dropped data segment
  1637. as they are stored in wasm bytecode */
  1638. #endif
  1639. }
  1640. else {
  1641. aot_module->mem_init_data_list[seg_index]->byte_count = 0;
  1642. /* Currently we can't free the dropped data segment
  1643. as the mem_init_data_count is a continuous array */
  1644. }
  1645. return true;
  1646. }
  1647. #endif /* WASM_ENABLE_BULK_MEMORY */
  1648. #if WASM_ENABLE_THREAD_MGR != 0
  1649. bool
  1650. aot_set_aux_stack(WASMExecEnv *exec_env,
  1651. uint32 start_offset, uint32 size)
  1652. {
  1653. AOTModuleInstance *module_inst =
  1654. (AOTModuleInstance*)exec_env->module_inst;
  1655. AOTModule *module = (AOTModule *)module_inst->aot_module.ptr;
  1656. uint32 stack_top_idx = module->aux_stack_top_global_index;
  1657. uint32 data_end = module->aux_data_end;
  1658. uint32 stack_bottom = module->aux_stack_bottom;
  1659. bool is_stack_before_data = stack_bottom < data_end ? true : false;
  1660. /* Check the aux stack space, currently we don't allocate space in heap */
  1661. if ((is_stack_before_data && (size > start_offset))
  1662. || ((!is_stack_before_data) && (start_offset - data_end < size)))
  1663. return false;
  1664. if (stack_top_idx != (uint32)-1) {
  1665. /* The aux stack top is a wasm global,
  1666. set the initial value for the global */
  1667. uint32 global_offset =
  1668. module->globals[stack_top_idx].data_offset;
  1669. uint8 *global_addr = (uint8 *)module_inst->global_data.ptr + global_offset;
  1670. *(int32*)global_addr = start_offset;
  1671. /* The aux stack boundary is a constant value,
  1672. set the value to exec_env */
  1673. exec_env->aux_stack_boundary = start_offset - size;
  1674. return true;
  1675. }
  1676. return false;
  1677. }
  1678. bool
  1679. aot_get_aux_stack(WASMExecEnv *exec_env,
  1680. uint32 *start_offset, uint32 *size)
  1681. {
  1682. AOTModuleInstance *module_inst =
  1683. (AOTModuleInstance*)exec_env->module_inst;
  1684. AOTModule *module = (AOTModule *)module_inst->aot_module.ptr;
  1685. /* The aux stack information is resolved in loader
  1686. and store in module */
  1687. uint32 stack_bottom = module->aux_stack_bottom;
  1688. uint32 total_aux_stack_size = module->aux_stack_size;
  1689. if (stack_bottom != 0 && total_aux_stack_size != 0) {
  1690. if (start_offset)
  1691. *start_offset = stack_bottom;
  1692. if (size)
  1693. *size = total_aux_stack_size;
  1694. return true;
  1695. }
  1696. return false;
  1697. }
  1698. #endif