ems_gc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. * Copyright (C) 2022 Tencent Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc.h"
  6. #include "ems_gc_internal.h"
  7. #define GB (1 << 30UL)
  8. #define MARK_NODE_OBJ_CNT 256
  9. #if WASM_ENABLE_GC != 0
  10. /* mark node is used for gc marker*/
  11. typedef struct mark_node_struct {
  12. /* number of to-expand objects can be saved in this node */
  13. gc_size_t cnt;
  14. /* the first unused index */
  15. uint32 idx;
  16. /* next node on the node list */
  17. struct mark_node_struct *next;
  18. /* the actual to-expand objects list */
  19. gc_object_t set[MARK_NODE_OBJ_CNT];
  20. } mark_node_t;
  21. /**
  22. * Alloc a mark node from the native heap
  23. *
  24. * @return a valid mark node if success, NULL otherwise
  25. */
  26. static mark_node_t *
  27. alloc_mark_node(void)
  28. {
  29. mark_node_t *ret = (mark_node_t *)BH_MALLOC(sizeof(mark_node_t));
  30. if (!ret) {
  31. LOG_ERROR("alloc a new mark node failed");
  32. return NULL;
  33. }
  34. ret->cnt = sizeof(ret->set) / sizeof(ret->set[0]);
  35. ret->idx = 0;
  36. ret->next = NULL;
  37. return ret;
  38. }
  39. /* Free a mark node to the native heap
  40. *
  41. * @param node the mark node to free, should not be NULL
  42. */
  43. static void
  44. free_mark_node(mark_node_t *node)
  45. {
  46. bh_assert(node);
  47. BH_FREE((gc_object_t)node);
  48. }
  49. /**
  50. * Sweep phase of mark_sweep algorithm
  51. * @param heap the heap to sweep, should be a valid instance heap
  52. * which has already been marked
  53. */
  54. static void
  55. sweep_instance_heap(gc_heap_t *heap)
  56. {
  57. hmu_t *cur = NULL, *end = NULL, *last = NULL;
  58. hmu_type_t ut;
  59. gc_size_t size;
  60. int i, lsize;
  61. gc_size_t tot_free = 0;
  62. bh_assert(gci_is_heap_valid(heap));
  63. cur = (hmu_t *)heap->base_addr;
  64. last = NULL;
  65. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  66. /* reset KFC */
  67. lsize =
  68. (int)(sizeof(heap->kfc_normal_list) / sizeof(heap->kfc_normal_list[0]));
  69. for (i = 0; i < lsize; i++) {
  70. heap->kfc_normal_list[i].next = NULL;
  71. }
  72. heap->kfc_tree_root->right = NULL;
  73. heap->root_set = NULL;
  74. while (cur < end) {
  75. ut = hmu_get_ut(cur);
  76. size = hmu_get_size(cur);
  77. bh_assert(size > 0);
  78. if (ut == HMU_FC || ut == HMU_FM
  79. || (ut == HMU_VO && hmu_is_vo_freed(cur))
  80. || (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
  81. /* merge previous free areas with current one */
  82. if (!last)
  83. last = cur;
  84. if (ut == HMU_WO) {
  85. /* Invoke registered finalizer */
  86. gc_object_t cur_obj = hmu_to_obj(cur);
  87. if (gct_vm_get_extra_info_flag(cur_obj)) {
  88. extra_info_node_t *node = gc_search_extra_info_node(
  89. (gc_handle_t)heap, cur_obj, NULL);
  90. bh_assert(node);
  91. node->finalizer(node->obj, node->data);
  92. gc_unset_finalizer((gc_handle_t)heap, cur_obj);
  93. }
  94. }
  95. }
  96. else {
  97. /* current block is still live */
  98. if (last) {
  99. tot_free += (gc_size_t)((char *)cur - (char *)last);
  100. gci_add_fc(heap, last, (gc_size_t)((char *)cur - (char *)last));
  101. hmu_mark_pinuse(last);
  102. last = NULL;
  103. }
  104. if (ut == HMU_WO) {
  105. /* unmark it */
  106. hmu_unmark_wo(cur);
  107. }
  108. }
  109. cur = (hmu_t *)((char *)cur + size);
  110. }
  111. bh_assert(cur == end);
  112. if (last) {
  113. tot_free += (gc_size_t)((char *)cur - (char *)last);
  114. gci_add_fc(heap, last, (gc_size_t)((char *)cur - (char *)last));
  115. hmu_mark_pinuse(last);
  116. }
  117. heap->total_free_size = tot_free;
  118. #if GC_STAT_DATA != 0
  119. heap->total_gc_count++;
  120. if ((heap->current_size - tot_free) > heap->highmark_size)
  121. heap->highmark_size = heap->current_size - tot_free;
  122. #endif
  123. gc_update_threshold(heap);
  124. }
  125. /**
  126. * Add a to-expand node to the to-expand list
  127. *
  128. * @param heap should be a valid instance heap
  129. * @param obj should be a valid wo inside @heap
  130. *
  131. * @return GC_ERROR if there is no more resource for marking,
  132. * GC_SUCCESS if success
  133. */
  134. static int
  135. add_wo_to_expand(gc_heap_t *heap, gc_object_t obj)
  136. {
  137. mark_node_t *mark_node = NULL, *new_node = NULL;
  138. hmu_t *hmu = NULL;
  139. bh_assert(obj);
  140. hmu = obj_to_hmu(obj);
  141. bh_assert(gci_is_heap_valid(heap));
  142. bh_assert((gc_uint8 *)hmu >= heap->base_addr
  143. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  144. bh_assert(hmu_get_ut(hmu) == HMU_WO);
  145. if (hmu_is_wo_marked(hmu))
  146. return GC_SUCCESS; /* already marked*/
  147. mark_node = (mark_node_t *)heap->root_set;
  148. if (!mark_node || mark_node->idx == mark_node->cnt) {
  149. new_node = alloc_mark_node();
  150. if (!new_node) {
  151. LOG_ERROR("can not add obj to mark node because of mark node "
  152. "allocation failed");
  153. return GC_ERROR;
  154. }
  155. new_node->next = mark_node;
  156. heap->root_set = new_node;
  157. mark_node = new_node;
  158. }
  159. mark_node->set[mark_node->idx++] = obj;
  160. hmu_mark_wo(hmu);
  161. return GC_SUCCESS;
  162. }
  163. /* Check ems_gc.h for description*/
  164. int
  165. gc_add_root(void *heap_p, gc_object_t obj)
  166. {
  167. gc_heap_t *heap = (gc_heap_t *)heap_p;
  168. hmu_t *hmu = NULL;
  169. if (!obj) {
  170. LOG_ERROR("gc_add_root with NULL obj");
  171. return GC_ERROR;
  172. }
  173. hmu = obj_to_hmu(obj);
  174. if (!gci_is_heap_valid(heap)) {
  175. LOG_ERROR("vm_get_gc_handle_for_current_instance returns invalid heap");
  176. return GC_ERROR;
  177. }
  178. if (!((gc_uint8 *)hmu >= heap->base_addr
  179. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size)) {
  180. LOG_ERROR("Obj is not a object in current instance heap");
  181. return GC_ERROR;
  182. }
  183. if (hmu_get_ut(hmu) != HMU_WO) {
  184. LOG_ERROR("Given object is not wo");
  185. return GC_ERROR;
  186. }
  187. if (add_wo_to_expand(heap, obj) != GC_SUCCESS) {
  188. heap->is_fast_marking_failed = 1;
  189. return GC_ERROR;
  190. }
  191. return GC_SUCCESS;
  192. }
  193. /**
  194. * Unmark all marked objects to do rollback
  195. *
  196. * @param heap the heap to do rollback, should be a valid instance heap
  197. */
  198. static void
  199. rollback_mark(gc_heap_t *heap)
  200. {
  201. mark_node_t *mark_node = NULL, *next_mark_node = NULL;
  202. hmu_t *cur = NULL, *end = NULL;
  203. hmu_type_t ut;
  204. gc_size_t size;
  205. bh_assert(gci_is_heap_valid(heap));
  206. /* roll back*/
  207. mark_node = (mark_node_t *)heap->root_set;
  208. while (mark_node) {
  209. next_mark_node = mark_node->next;
  210. free_mark_node(mark_node);
  211. mark_node = next_mark_node;
  212. }
  213. heap->root_set = NULL;
  214. /* then traverse the heap to unmark all marked wos*/
  215. cur = (hmu_t *)heap->base_addr;
  216. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  217. while (cur < end) {
  218. ut = hmu_get_ut(cur);
  219. size = hmu_get_size(cur);
  220. if (ut == HMU_WO && hmu_is_wo_marked(cur)) {
  221. hmu_unmark_wo(cur);
  222. }
  223. cur = (hmu_t *)((char *)cur + size);
  224. }
  225. bh_assert(cur == end);
  226. }
  227. /**
  228. * Reclaim GC instance heap
  229. *
  230. * @param heap the heap to reclaim, should be a valid instance heap
  231. *
  232. * @return GC_SUCCESS if success, GC_ERROR otherwise
  233. */
  234. static int
  235. reclaim_instance_heap(gc_heap_t *heap)
  236. {
  237. mark_node_t *mark_node = NULL;
  238. int idx = 0, j = 0;
  239. bool ret, is_compact_mode = false;
  240. gc_object_t obj = NULL, ref = NULL;
  241. hmu_t *hmu = NULL;
  242. gc_uint32 ref_num = 0, ref_start_offset = 0, size = 0, offset = 0;
  243. gc_uint16 *ref_list = NULL;
  244. bh_assert(gci_is_heap_valid(heap));
  245. heap->root_set = NULL;
  246. #if WASM_ENABLE_THREAD_MGR == 0
  247. if (!heap->exec_env)
  248. return GC_SUCCESS;
  249. ret = gct_vm_begin_rootset_enumeration(heap->exec_env, heap);
  250. #else
  251. if (!heap->cluster)
  252. return GC_SUCCESS;
  253. ret = gct_vm_begin_rootset_enumeration(heap->cluster, heap);
  254. #endif
  255. if (!ret)
  256. return GC_ERROR;
  257. #if BH_ENABLE_GC_VERIFY != 0
  258. /* no matter whether the enumeration is successful or not, the data
  259. collected should be checked at first */
  260. mark_node = (mark_node_t *)heap->root_set;
  261. while (mark_node) {
  262. /* all nodes except first should be full filled */
  263. bh_assert(mark_node == (mark_node_t *)heap->root_set
  264. || mark_node->idx == mark_node->cnt);
  265. /* all nodes should be non-empty */
  266. bh_assert(mark_node->idx > 0);
  267. for (idx = 0; idx < (int)mark_node->idx; idx++) {
  268. obj = mark_node->set[idx];
  269. hmu = obj_to_hmu(obj);
  270. bh_assert(hmu_is_wo_marked(hmu));
  271. bh_assert((gc_uint8 *)hmu >= heap->base_addr
  272. && (gc_uint8 *)hmu
  273. < heap->base_addr + heap->current_size);
  274. }
  275. mark_node = mark_node->next;
  276. }
  277. #endif
  278. /* TODO: when fast marking failed, we can still do slow
  279. marking, currently just simply roll it back. */
  280. if (heap->is_fast_marking_failed) {
  281. LOG_ERROR("enumerate rootset failed");
  282. LOG_ERROR("all marked wos will be unmarked to keep heap consistency");
  283. rollback_mark(heap);
  284. heap->is_fast_marking_failed = 0;
  285. return GC_ERROR;
  286. }
  287. /* the algorithm we use to mark all objects */
  288. /* 1. mark rootset and organize them into a mark_node list (last marked
  289. * roots at list header, i.e. stack top) */
  290. /* 2. in every iteration, we use the top node to expand*/
  291. /* 3. execute step 2 till no expanding */
  292. /* this is a BFS & DFS mixed algorithm, but more like DFS */
  293. mark_node = (mark_node_t *)heap->root_set;
  294. while (mark_node) {
  295. heap->root_set = mark_node->next;
  296. /* note that mark_node->idx may change in each loop */
  297. for (idx = 0; idx < (int)mark_node->idx; idx++) {
  298. obj = mark_node->set[idx];
  299. hmu = obj_to_hmu(obj);
  300. size = hmu_get_size(hmu);
  301. if (!gct_vm_get_wasm_object_ref_list(obj, &is_compact_mode,
  302. &ref_num, &ref_list,
  303. &ref_start_offset)) {
  304. LOG_ERROR("mark process failed because failed "
  305. "vm_get_wasm_object_ref_list");
  306. break;
  307. }
  308. if (ref_num >= 2U * GB) {
  309. LOG_ERROR("Invalid ref_num returned");
  310. break;
  311. }
  312. if (is_compact_mode) {
  313. for (j = 0; j < (int)ref_num; j++) {
  314. offset = ref_start_offset + j * sizeof(void *);
  315. bh_assert(offset + sizeof(void *) < size);
  316. ref = *(gc_object_t *)(((gc_uint8 *)obj) + offset);
  317. if (ref == NULL_REF || ((uintptr_t)ref & 1))
  318. continue; /* null object or i31 object */
  319. if (add_wo_to_expand(heap, ref) == GC_ERROR) {
  320. LOG_ERROR("add_wo_to_expand failed");
  321. break;
  322. }
  323. }
  324. if (j < (int)ref_num)
  325. break;
  326. }
  327. else {
  328. for (j = 0; j < (int)ref_num; j++) {
  329. offset = ref_list[j];
  330. bh_assert(offset + sizeof(void *) < size);
  331. ref = *(gc_object_t *)(((gc_uint8 *)obj) + offset);
  332. if (ref == NULL_REF || ((uintptr_t)ref & 1))
  333. continue; /* null object or i31 object */
  334. if (add_wo_to_expand(heap, ref) == GC_ERROR) {
  335. LOG_ERROR("mark process failed");
  336. break;
  337. }
  338. }
  339. if (j < (int)ref_num)
  340. break;
  341. }
  342. }
  343. if (idx < (int)mark_node->idx)
  344. break; /* not yet done */
  345. /* obj's in mark_node are all expanded */
  346. free_mark_node(mark_node);
  347. mark_node = heap->root_set;
  348. }
  349. if (mark_node) {
  350. LOG_ERROR("mark process is not successfully finished");
  351. free_mark_node(mark_node);
  352. /* roll back is required */
  353. rollback_mark(heap);
  354. return GC_ERROR;
  355. }
  356. /* now sweep */
  357. sweep_instance_heap(heap);
  358. (void)size;
  359. return GC_SUCCESS;
  360. }
  361. /**
  362. * Do GC on given heap
  363. *
  364. * @param the heap to do GC, should be a valid heap
  365. *
  366. * @return GC_SUCCESS if success, GC_ERROR otherwise
  367. */
  368. int
  369. gci_gc_heap(void *h)
  370. {
  371. int ret = GC_ERROR;
  372. gc_heap_t *heap = (gc_heap_t *)h;
  373. bh_assert(gci_is_heap_valid(heap));
  374. LOG_VERBOSE("#reclaim instance heap %p", heap);
  375. /* TODO: get exec_env of current thread when GC multi-threading
  376. is enabled, and pass it to runtime */
  377. gct_vm_gc_prepare(NULL);
  378. gct_vm_mutex_lock(&heap->lock);
  379. heap->is_doing_reclaim = 1;
  380. ret = reclaim_instance_heap(heap);
  381. heap->is_doing_reclaim = 0;
  382. gct_vm_mutex_unlock(&heap->lock);
  383. /* TODO: get exec_env of current thread when GC multi-threading
  384. is enabled, and pass it to runtime */
  385. gct_vm_gc_finished(NULL);
  386. LOG_VERBOSE("#reclaim instance heap %p done", heap);
  387. #if BH_ENABLE_GC_VERIFY != 0
  388. gci_verify_heap(heap);
  389. #endif
  390. #if GC_STAT_SHOW != 0
  391. gc_show_stat(heap);
  392. gc_show_fragment(heap);
  393. #endif
  394. return ret;
  395. }
  396. int
  397. gc_is_dead_object(void *obj)
  398. {
  399. return !hmu_is_wo_marked(obj_to_hmu(obj));
  400. }
  401. #else
  402. int
  403. gci_gc_heap(void *h)
  404. {
  405. (void)h;
  406. return GC_ERROR;
  407. }
  408. #endif /* end of WASM_ENABLE_GC != 0 */