ems_kfc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. static gc_handle_t
  7. gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
  8. {
  9. hmu_tree_node_t *root = NULL, *q = NULL;
  10. int ret;
  11. memset(heap, 0, sizeof *heap);
  12. memset(base_addr, 0, heap_max_size);
  13. ret = os_mutex_init(&heap->lock);
  14. if (ret != BHT_OK) {
  15. LOG_ERROR("[GC_ERROR]failed to init lock\n");
  16. return NULL;
  17. }
  18. /* init all data structures*/
  19. heap->current_size = heap_max_size;
  20. heap->base_addr = (gc_uint8 *)base_addr;
  21. heap->heap_id = (gc_handle_t)heap;
  22. heap->total_free_size = heap->current_size;
  23. heap->highmark_size = 0;
  24. #if WASM_ENABLE_GC != 0
  25. heap->gc_threshold_factor = GC_DEFAULT_THRESHOLD_FACTOR;
  26. gc_update_threshold(heap);
  27. #endif
  28. root = heap->kfc_tree_root = (hmu_tree_node_t *)heap->kfc_tree_root_buf;
  29. memset(root, 0, sizeof *root);
  30. root->size = sizeof *root;
  31. hmu_set_ut(&root->hmu_header, HMU_FC);
  32. hmu_set_size(&root->hmu_header, sizeof *root);
  33. q = (hmu_tree_node_t *)heap->base_addr;
  34. memset(q, 0, sizeof *q);
  35. hmu_set_ut(&q->hmu_header, HMU_FC);
  36. hmu_set_size(&q->hmu_header, heap->current_size);
  37. ASSERT_TREE_NODE_ALIGNED_ACCESS(q);
  38. ASSERT_TREE_NODE_ALIGNED_ACCESS(root);
  39. hmu_mark_pinuse(&q->hmu_header);
  40. root->right = q;
  41. q->parent = root;
  42. q->size = heap->current_size;
  43. bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE);
  44. return heap;
  45. }
  46. gc_handle_t
  47. gc_init_with_pool(char *buf, gc_size_t buf_size)
  48. {
  49. char *buf_end = buf + buf_size;
  50. char *buf_aligned = (char *)(((uintptr_t)buf + 7) & (uintptr_t)~7);
  51. char *base_addr = buf_aligned + sizeof(gc_heap_t);
  52. gc_heap_t *heap = (gc_heap_t *)buf_aligned;
  53. gc_size_t heap_max_size;
  54. if (buf_size < APP_HEAP_SIZE_MIN) {
  55. LOG_ERROR("[GC_ERROR]heap init buf size (%" PRIu32 ") < %" PRIu32 "\n",
  56. buf_size, (uint32)APP_HEAP_SIZE_MIN);
  57. return NULL;
  58. }
  59. base_addr =
  60. (char *)(((uintptr_t)base_addr + 7) & (uintptr_t)~7) + GC_HEAD_PADDING;
  61. heap_max_size = (uint32)(buf_end - base_addr) & (uint32)~7;
  62. #if WASM_ENABLE_MEMORY_TRACING != 0
  63. os_printf("Heap created, total size: %u\n", buf_size);
  64. os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
  65. os_printf(" actual heap size: %u\n", heap_max_size);
  66. os_printf(" padding bytes: %u\n",
  67. buf_size - sizeof(gc_heap_t) - heap_max_size);
  68. #endif
  69. return gc_init_internal(heap, base_addr, heap_max_size);
  70. }
  71. gc_handle_t
  72. gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
  73. char *pool_buf, gc_size_t pool_buf_size)
  74. {
  75. gc_heap_t *heap = (gc_heap_t *)struct_buf;
  76. char *base_addr = pool_buf + GC_HEAD_PADDING;
  77. char *pool_buf_end = pool_buf + pool_buf_size;
  78. gc_size_t heap_max_size;
  79. if ((((uintptr_t)struct_buf) & 7) != 0) {
  80. LOG_ERROR("[GC_ERROR]heap init struct buf not 8-byte aligned\n");
  81. return NULL;
  82. }
  83. if (struct_buf_size < sizeof(gc_handle_t)) {
  84. LOG_ERROR("[GC_ERROR]heap init struct buf size (%" PRIu32 ") < %zu\n",
  85. struct_buf_size, sizeof(gc_handle_t));
  86. return NULL;
  87. }
  88. if ((((uintptr_t)pool_buf) & 7) != 0) {
  89. LOG_ERROR("[GC_ERROR]heap init pool buf not 8-byte aligned\n");
  90. return NULL;
  91. }
  92. if (pool_buf_size < APP_HEAP_SIZE_MIN) {
  93. LOG_ERROR("[GC_ERROR]heap init buf size (%" PRIu32 ") < %u\n",
  94. pool_buf_size, APP_HEAP_SIZE_MIN);
  95. return NULL;
  96. }
  97. heap_max_size = (uint32)(pool_buf_end - base_addr) & (uint32)~7;
  98. #if WASM_ENABLE_MEMORY_TRACING != 0
  99. os_printf("Heap created, total size: %u\n",
  100. struct_buf_size + pool_buf_size);
  101. os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
  102. os_printf(" actual heap size: %u\n", heap_max_size);
  103. os_printf(" padding bytes: %u\n", pool_buf_size - heap_max_size);
  104. #endif
  105. return gc_init_internal(heap, base_addr, heap_max_size);
  106. }
  107. int
  108. gc_destroy_with_pool(gc_handle_t handle)
  109. {
  110. gc_heap_t *heap = (gc_heap_t *)handle;
  111. int ret = GC_SUCCESS;
  112. #if WASM_ENABLE_GC != 0
  113. gc_size_t i = 0;
  114. if (heap->extra_info_node_cnt > 0) {
  115. for (i = 0; i < heap->extra_info_node_cnt; i++) {
  116. extra_info_node_t *node = heap->extra_info_nodes[i];
  117. #if BH_ENABLE_GC_VERIFY != 0
  118. os_printf("Memory leak detected: gc object [%p] not claimed\n",
  119. node->obj);
  120. #endif
  121. bh_assert(heap->is_reclaim_enabled);
  122. node->finalizer(node->obj, node->data);
  123. BH_FREE(heap->extra_info_nodes[i]);
  124. }
  125. if (heap->extra_info_nodes != heap->extra_info_normal_nodes) {
  126. BH_FREE(heap->extra_info_nodes);
  127. }
  128. }
  129. #endif
  130. #if BH_ENABLE_GC_VERIFY != 0
  131. hmu_t *cur = (hmu_t *)heap->base_addr;
  132. hmu_t *end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  133. if (
  134. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  135. !heap->is_heap_corrupted &&
  136. #endif
  137. (hmu_t *)((char *)cur + hmu_get_size(cur)) != end) {
  138. LOG_WARNING("Memory leak detected:\n");
  139. gci_dump(heap);
  140. ret = GC_ERROR;
  141. }
  142. #endif
  143. os_mutex_destroy(&heap->lock);
  144. memset(heap->base_addr, 0, heap->current_size);
  145. memset(heap, 0, sizeof(gc_heap_t));
  146. return ret;
  147. }
  148. #if WASM_ENABLE_GC != 0
  149. #if WASM_ENABLE_THREAD_MGR == 0
  150. void
  151. gc_enable_gc_reclaim(gc_handle_t handle, void *exec_env)
  152. {
  153. gc_heap_t *heap = (gc_heap_t *)handle;
  154. heap->is_reclaim_enabled = 1;
  155. heap->exec_env = exec_env;
  156. }
  157. #else
  158. void
  159. gc_enable_gc_reclaim(gc_handle_t handle, void *cluster)
  160. {
  161. gc_heap_t *heap = (gc_heap_t *)handle;
  162. heap->is_reclaim_enabled = 1;
  163. heap->cluster = cluster;
  164. }
  165. #endif
  166. #endif
  167. uint32
  168. gc_get_heap_struct_size()
  169. {
  170. return sizeof(gc_heap_t);
  171. }
  172. static void
  173. adjust_ptr(uint8 **p_ptr, intptr_t offset)
  174. {
  175. if ((!*p_ptr)) {
  176. return;
  177. }
  178. /*
  179. * to resolve a possible signed integer overflow issue
  180. * when p_ptr is over 0x8000000000000000 by not using
  181. * `(intptr_t)`
  182. */
  183. uintptr_t offset_val = 0;
  184. #if UINTPTR_MAX == UINT64_MAX
  185. offset_val = labs(offset);
  186. #else
  187. offset_val = abs(offset);
  188. #endif
  189. if (offset > 0) {
  190. *p_ptr = (uint8 *)((uintptr_t)(*p_ptr) + offset_val);
  191. }
  192. else {
  193. *p_ptr = (uint8 *)((uintptr_t)(*p_ptr) - offset_val);
  194. }
  195. }
  196. int
  197. gc_migrate(gc_handle_t handle, char *pool_buf_new, gc_size_t pool_buf_size)
  198. {
  199. gc_heap_t *heap = (gc_heap_t *)handle;
  200. char *base_addr_new = pool_buf_new + GC_HEAD_PADDING;
  201. char *pool_buf_end = pool_buf_new + pool_buf_size;
  202. intptr_t offset = (uint8 *)base_addr_new - (uint8 *)heap->base_addr;
  203. hmu_t *cur = NULL, *end = NULL;
  204. hmu_tree_node_t *tree_node;
  205. uint8 **p_left, **p_right, **p_parent;
  206. gc_size_t heap_max_size, size;
  207. if ((((uintptr_t)pool_buf_new) & 7) != 0) {
  208. LOG_ERROR("[GC_ERROR]heap migrate pool buf not 8-byte aligned\n");
  209. return GC_ERROR;
  210. }
  211. heap_max_size = (uint32)(pool_buf_end - base_addr_new) & (uint32)~7;
  212. if (pool_buf_end < base_addr_new || heap_max_size < heap->current_size) {
  213. LOG_ERROR("[GC_ERROR]heap migrate invalid pool buf size\n");
  214. return GC_ERROR;
  215. }
  216. if (offset == 0)
  217. return 0;
  218. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  219. if (heap->is_heap_corrupted) {
  220. LOG_ERROR("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  221. return GC_ERROR;
  222. }
  223. #endif
  224. heap->base_addr = (uint8 *)base_addr_new;
  225. ASSERT_TREE_NODE_ALIGNED_ACCESS(heap->kfc_tree_root);
  226. p_left = (uint8 **)((uint8 *)heap->kfc_tree_root
  227. + offsetof(hmu_tree_node_t, left));
  228. p_right = (uint8 **)((uint8 *)heap->kfc_tree_root
  229. + offsetof(hmu_tree_node_t, right));
  230. p_parent = (uint8 **)((uint8 *)heap->kfc_tree_root
  231. + offsetof(hmu_tree_node_t, parent));
  232. adjust_ptr(p_left, offset);
  233. adjust_ptr(p_right, offset);
  234. adjust_ptr(p_parent, offset);
  235. cur = (hmu_t *)heap->base_addr;
  236. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  237. while (cur < end) {
  238. size = hmu_get_size(cur);
  239. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  240. if (size <= 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
  241. LOG_ERROR("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  242. heap->is_heap_corrupted = true;
  243. return GC_ERROR;
  244. }
  245. #endif
  246. if (hmu_get_ut(cur) == HMU_FC && !HMU_IS_FC_NORMAL(size)) {
  247. tree_node = (hmu_tree_node_t *)cur;
  248. ASSERT_TREE_NODE_ALIGNED_ACCESS(tree_node);
  249. p_left = (uint8 **)((uint8 *)tree_node
  250. + offsetof(hmu_tree_node_t, left));
  251. p_right = (uint8 **)((uint8 *)tree_node
  252. + offsetof(hmu_tree_node_t, right));
  253. p_parent = (uint8 **)((uint8 *)tree_node
  254. + offsetof(hmu_tree_node_t, parent));
  255. adjust_ptr(p_left, offset);
  256. adjust_ptr(p_right, offset);
  257. if (tree_node->parent != heap->kfc_tree_root)
  258. /* The root node belongs to heap structure,
  259. it is fixed part and isn't changed. */
  260. adjust_ptr(p_parent, offset);
  261. }
  262. cur = (hmu_t *)((char *)cur + size);
  263. }
  264. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  265. if (cur != end) {
  266. LOG_ERROR("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  267. heap->is_heap_corrupted = true;
  268. return GC_ERROR;
  269. }
  270. #else
  271. bh_assert(cur == end);
  272. #endif
  273. return 0;
  274. }
  275. bool
  276. gc_is_heap_corrupted(gc_handle_t handle)
  277. {
  278. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  279. gc_heap_t *heap = (gc_heap_t *)handle;
  280. return heap->is_heap_corrupted ? true : false;
  281. #else
  282. return false;
  283. #endif
  284. }
  285. #if BH_ENABLE_GC_VERIFY != 0
  286. void
  287. gci_verify_heap(gc_heap_t *heap)
  288. {
  289. hmu_t *cur = NULL, *end = NULL;
  290. bh_assert(heap && gci_is_heap_valid(heap));
  291. cur = (hmu_t *)heap->base_addr;
  292. end = (hmu_t *)(heap->base_addr + heap->current_size);
  293. while (cur < end) {
  294. hmu_verify(heap, cur);
  295. cur = (hmu_t *)((gc_uint8 *)cur + hmu_get_size(cur));
  296. }
  297. bh_assert(cur == end);
  298. }
  299. #endif
  300. void
  301. gc_heap_stat(void *heap_ptr, gc_stat_t *stat)
  302. {
  303. hmu_t *cur = NULL, *end = NULL;
  304. hmu_type_t ut;
  305. gc_size_t size;
  306. gc_heap_t *heap = (gc_heap_t *)heap_ptr;
  307. memset(stat, 0, sizeof(gc_stat_t));
  308. cur = (hmu_t *)heap->base_addr;
  309. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  310. while (cur < end) {
  311. ut = hmu_get_ut(cur);
  312. size = hmu_get_size(cur);
  313. bh_assert(size > 0);
  314. if (ut == HMU_FC || ut == HMU_FM
  315. || (ut == HMU_VO && hmu_is_vo_freed(cur))
  316. || (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
  317. if (ut == HMU_VO)
  318. stat->vo_free += size;
  319. if (ut == HMU_WO)
  320. stat->wo_free += size;
  321. stat->free += size;
  322. stat->free_block++;
  323. if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
  324. stat->free_sizes[size / sizeof(int)] += 1;
  325. else
  326. stat->free_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
  327. }
  328. else {
  329. if (ut == HMU_VO)
  330. stat->vo_usage += size;
  331. if (ut == HMU_WO)
  332. stat->wo_usage += size;
  333. stat->usage += size;
  334. stat->usage_block++;
  335. if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
  336. stat->usage_sizes[size / sizeof(int)] += 1;
  337. else
  338. stat->usage_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
  339. }
  340. cur = (hmu_t *)((char *)cur + size);
  341. }
  342. }
  343. void
  344. gc_print_stat(void *heap_ptr, int verbose)
  345. {
  346. gc_stat_t stat;
  347. int i;
  348. bh_assert(heap_ptr != NULL);
  349. gc_heap_t *heap = (gc_heap_t *)(heap_ptr);
  350. gc_heap_stat(heap, &stat);
  351. os_printf("# stat %s %p use %d free %d \n", "instance", heap, stat.usage,
  352. stat.free);
  353. os_printf("# stat %s %p wo_usage %d vo_usage %d \n", "instance", heap,
  354. stat.wo_usage, stat.vo_usage);
  355. os_printf("# stat %s %p wo_free %d vo_free %d \n", "instance", heap,
  356. stat.wo_free, stat.vo_free);
  357. #if WASM_ENABLE_GC == 0
  358. os_printf("# stat free size %" PRIu32 " high %" PRIu32 "\n",
  359. heap->total_free_size, heap->highmark_size);
  360. #else
  361. os_printf("# stat gc %" PRIu32 " free size %" PRIu32 " high %" PRIu32 "\n",
  362. heap->total_gc_count, heap->total_free_size, heap->highmark_size);
  363. #endif
  364. if (verbose) {
  365. os_printf("usage sizes: \n");
  366. for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
  367. if (stat.usage_sizes[i])
  368. os_printf(" %d: %d; ", i * 4, stat.usage_sizes[i]);
  369. os_printf(" \n");
  370. os_printf("free sizes: \n");
  371. for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
  372. if (stat.free_sizes[i])
  373. os_printf(" %d: %d; ", i * 4, stat.free_sizes[i]);
  374. }
  375. }
  376. void *
  377. gc_heap_stats(void *heap_arg, uint32 *stats, int size)
  378. {
  379. int i;
  380. gc_heap_t *heap = (gc_heap_t *)heap_arg;
  381. if (!gci_is_heap_valid(heap)) {
  382. for (i = 0; i < size; i++)
  383. stats[i] = 0;
  384. return NULL;
  385. }
  386. for (i = 0; i < size; i++) {
  387. switch (i) {
  388. case GC_STAT_TOTAL:
  389. stats[i] = heap->current_size;
  390. break;
  391. case GC_STAT_FREE:
  392. stats[i] = heap->total_free_size;
  393. break;
  394. case GC_STAT_HIGHMARK:
  395. stats[i] = heap->highmark_size;
  396. break;
  397. #if WASM_ENABLE_GC != 0
  398. case GC_STAT_COUNT:
  399. stats[i] = heap->total_gc_count;
  400. break;
  401. case GC_STAT_TIME:
  402. stats[i] = heap->total_gc_time;
  403. break;
  404. #endif
  405. default:
  406. break;
  407. }
  408. }
  409. return heap;
  410. }
  411. void
  412. gc_traverse_tree(hmu_tree_node_t *node, gc_size_t *stats, int *n)
  413. {
  414. if (!node)
  415. return;
  416. if (*n > 0)
  417. gc_traverse_tree(node->right, stats, n);
  418. if (*n > 0) {
  419. (*n)--;
  420. stats[*n] = node->size;
  421. }
  422. if (*n > 0)
  423. gc_traverse_tree(node->left, stats, n);
  424. }
  425. void
  426. gc_show_stat(void *heap)
  427. {
  428. uint32 stats[GC_STAT_MAX];
  429. heap = gc_heap_stats(heap, stats, GC_STAT_MAX);
  430. os_printf("\n[GC stats %p] %" PRIu32 " %" PRIu32 " %" PRIu32 " %" PRIu32
  431. " %" PRIu32 "\n",
  432. heap, stats[0], stats[1], stats[2], stats[3], stats[4]);
  433. }
  434. #if WASM_ENABLE_GC != 0
  435. void
  436. gc_show_fragment(void *heap_arg)
  437. {
  438. uint32 stats[3];
  439. int n = 3;
  440. gc_heap_t *heap = (gc_heap_t *)heap_arg;
  441. memset(stats, 0, n * sizeof(int));
  442. gct_vm_mutex_lock(&heap->lock);
  443. gc_traverse_tree(heap->kfc_tree_root, (gc_size_t *)stats, &n);
  444. gct_vm_mutex_unlock(&heap->lock);
  445. os_printf("\n[GC %p top sizes] %" PRIu32 " %" PRIu32 " %" PRIu32 "\n", heap,
  446. stats[0], stats[1], stats[2]);
  447. }
  448. #if WASM_ENABLE_GC_PERF_PROFILING != 0
  449. void
  450. gc_dump_perf_profiling(gc_handle_t *handle)
  451. {
  452. gc_heap_t *gc_heap_handle = (void *)handle;
  453. if (gc_heap_handle) {
  454. os_printf("\nGC performance summary\n");
  455. os_printf(" Total GC time (ms): %u\n",
  456. gc_heap_handle->total_gc_time);
  457. os_printf(" Max GC time (ms): %u\n", gc_heap_handle->max_gc_time);
  458. }
  459. else {
  460. os_printf("Failed to dump GC performance\n");
  461. }
  462. }
  463. #endif
  464. #endif