ems_alloc.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. #if WASM_ENABLE_GC != 0
  7. #define LOCK_HEAP(heap) \
  8. do { \
  9. if (!heap->is_doing_reclaim) \
  10. /* If the heap is doing reclaim, it must have been locked, \
  11. we should not lock the heap again. */ \
  12. os_mutex_lock(&heap->lock); \
  13. } while (0)
  14. #define UNLOCK_HEAP(heap) \
  15. do { \
  16. if (!heap->is_doing_reclaim) \
  17. /* If the heap is doing reclaim, it must have been locked, \
  18. and will be unlocked after reclaim, we should not \
  19. unlock the heap again. */ \
  20. os_mutex_unlock(&heap->lock); \
  21. } while (0)
  22. #else
  23. #define LOCK_HEAP(heap) os_mutex_lock(&heap->lock)
  24. #define UNLOCK_HEAP(heap) os_mutex_unlock(&heap->lock)
  25. #endif
  26. static inline bool
  27. hmu_is_in_heap(void *hmu, gc_uint8 *heap_base_addr, gc_uint8 *heap_end_addr)
  28. {
  29. gc_uint8 *addr = (gc_uint8 *)hmu;
  30. return (addr >= heap_base_addr && addr < heap_end_addr) ? true : false;
  31. }
  32. /**
  33. * Remove a node from the tree it belongs to
  34. *
  35. * @param p the node to remove, can not be NULL, can not be the ROOT node
  36. * the node will be removed from the tree, and the left, right and
  37. * parent pointers of the node @p will be set to be NULL. Other fields
  38. * won't be touched. The tree will be re-organized so that the order
  39. * conditions are still satisfied.
  40. */
  41. static bool
  42. remove_tree_node(gc_heap_t *heap, hmu_tree_node_t *p)
  43. {
  44. hmu_tree_node_t *q = NULL, **slot = NULL;
  45. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  46. hmu_tree_node_t *root = heap->kfc_tree_root, *parent;
  47. gc_uint8 *base_addr = heap->base_addr;
  48. gc_uint8 *end_addr = base_addr + heap->current_size;
  49. #endif
  50. bh_assert(p);
  51. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  52. parent = p->parent;
  53. if (!parent || p == root /* p can not be the ROOT node */
  54. || !hmu_is_in_heap(p, base_addr, end_addr)
  55. || (parent != root && !hmu_is_in_heap(parent, base_addr, end_addr))) {
  56. goto fail;
  57. }
  58. #endif
  59. /* get the slot which holds pointer to node p */
  60. if (p == p->parent->right) {
  61. /* Don't use `slot = &p->parent->right` to avoid compiler warning */
  62. slot = (hmu_tree_node_t **)((uint8 *)p->parent
  63. + offsetof(hmu_tree_node_t, right));
  64. }
  65. else if (p == p->parent->left) {
  66. /* p should be a child of its parent */
  67. /* Don't use `slot = &p->parent->left` to avoid compiler warning */
  68. slot = (hmu_tree_node_t **)((uint8 *)p->parent
  69. + offsetof(hmu_tree_node_t, left));
  70. }
  71. else {
  72. goto fail;
  73. }
  74. /**
  75. * algorithms used to remove node p
  76. * case 1: if p has no left child, replace p with its right child
  77. * case 2: if p has no right child, replace p with its left child
  78. * case 3: otherwise, find p's predecessor, remove it from the tree
  79. * and replace p with it.
  80. * use predecessor can keep the left <= root < right condition.
  81. */
  82. if (!p->left) {
  83. /* move right child up*/
  84. *slot = p->right;
  85. if (p->right) {
  86. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  87. if (!hmu_is_in_heap(p->right, base_addr, end_addr)) {
  88. goto fail;
  89. }
  90. #endif
  91. p->right->parent = p->parent;
  92. }
  93. p->left = p->right = p->parent = NULL;
  94. return true;
  95. }
  96. if (!p->right) {
  97. /* move left child up*/
  98. *slot = p->left;
  99. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  100. if (!hmu_is_in_heap(p->left, base_addr, end_addr)) {
  101. goto fail;
  102. }
  103. #endif
  104. /* p->left can never be NULL unless it is corrupted. */
  105. p->left->parent = p->parent;
  106. p->left = p->right = p->parent = NULL;
  107. return true;
  108. }
  109. /* both left & right exist, find p's predecessor at first*/
  110. q = p->left;
  111. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  112. if (!hmu_is_in_heap(q, base_addr, end_addr)) {
  113. goto fail;
  114. }
  115. #endif
  116. while (q->right) {
  117. q = q->right;
  118. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  119. if (!hmu_is_in_heap(q, base_addr, end_addr)) {
  120. goto fail;
  121. }
  122. #endif
  123. }
  124. /* remove from the tree*/
  125. if (!remove_tree_node(heap, q))
  126. return false;
  127. *slot = q;
  128. q->parent = p->parent;
  129. q->left = p->left;
  130. q->right = p->right;
  131. if (q->left) {
  132. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  133. if (!hmu_is_in_heap(q->left, base_addr, end_addr)) {
  134. goto fail;
  135. }
  136. #endif
  137. q->left->parent = q;
  138. }
  139. if (q->right) {
  140. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  141. if (!hmu_is_in_heap(q->right, base_addr, end_addr)) {
  142. goto fail;
  143. }
  144. #endif
  145. q->right->parent = q;
  146. }
  147. p->left = p->right = p->parent = NULL;
  148. return true;
  149. fail:
  150. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  151. heap->is_heap_corrupted = true;
  152. #endif
  153. return false;
  154. }
  155. static bool
  156. unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
  157. {
  158. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  159. gc_uint8 *base_addr, *end_addr;
  160. #endif
  161. gc_size_t size;
  162. bh_assert(gci_is_heap_valid(heap));
  163. bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
  164. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  165. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  166. if (hmu_get_ut(hmu) != HMU_FC) {
  167. heap->is_heap_corrupted = true;
  168. return false;
  169. }
  170. #endif
  171. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  172. base_addr = heap->base_addr;
  173. end_addr = base_addr + heap->current_size;
  174. #endif
  175. size = hmu_get_size(hmu);
  176. if (HMU_IS_FC_NORMAL(size)) {
  177. uint32 node_idx = size >> 3;
  178. hmu_normal_node_t *node_prev = NULL, *node_next;
  179. hmu_normal_node_t *node = heap->kfc_normal_list[node_idx].next;
  180. while (node) {
  181. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  182. if (!hmu_is_in_heap(node, base_addr, end_addr)) {
  183. heap->is_heap_corrupted = true;
  184. return false;
  185. }
  186. #endif
  187. node_next = get_hmu_normal_node_next(node);
  188. if ((hmu_t *)node == hmu) {
  189. if (!node_prev) /* list head */
  190. heap->kfc_normal_list[node_idx].next = node_next;
  191. else
  192. set_hmu_normal_node_next(node_prev, node_next);
  193. break;
  194. }
  195. node_prev = node;
  196. node = node_next;
  197. }
  198. if (!node) {
  199. LOG_ERROR("[GC_ERROR]couldn't find the node in the normal list\n");
  200. }
  201. }
  202. else {
  203. if (!remove_tree_node(heap, (hmu_tree_node_t *)hmu))
  204. return false;
  205. }
  206. return true;
  207. }
  208. static void
  209. hmu_set_free_size(hmu_t *hmu)
  210. {
  211. gc_size_t size;
  212. bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
  213. size = hmu_get_size(hmu);
  214. *((uint32 *)((char *)hmu + size) - 1) = size;
  215. }
  216. /**
  217. * Add free chunk back to KFC
  218. *
  219. * @param heap should not be NULL and it should be a valid heap
  220. * @param hmu should not be NULL and it should be a HMU of length @size inside
  221. * @heap hmu should be 8-bytes aligned
  222. * @param size should be positive and multiple of 8
  223. * hmu with size @size will be added into KFC as a new FC.
  224. */
  225. bool
  226. gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
  227. {
  228. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  229. gc_uint8 *base_addr, *end_addr;
  230. #endif
  231. hmu_normal_node_t *np = NULL;
  232. hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
  233. uint32 node_idx;
  234. bh_assert(gci_is_heap_valid(heap));
  235. bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
  236. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  237. bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
  238. bh_assert(size > 0
  239. && ((gc_uint8 *)hmu) + size
  240. <= heap->base_addr + heap->current_size);
  241. bh_assert(!(size & 7));
  242. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  243. base_addr = heap->base_addr;
  244. end_addr = base_addr + heap->current_size;
  245. #endif
  246. hmu_set_ut(hmu, HMU_FC);
  247. hmu_set_size(hmu, size);
  248. hmu_set_free_size(hmu);
  249. if (HMU_IS_FC_NORMAL(size)) {
  250. np = (hmu_normal_node_t *)hmu;
  251. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  252. if (!hmu_is_in_heap(np, base_addr, end_addr)) {
  253. heap->is_heap_corrupted = true;
  254. return false;
  255. }
  256. #endif
  257. node_idx = size >> 3;
  258. set_hmu_normal_node_next(np, heap->kfc_normal_list[node_idx].next);
  259. heap->kfc_normal_list[node_idx].next = np;
  260. return true;
  261. }
  262. /* big block */
  263. node = (hmu_tree_node_t *)hmu;
  264. node->size = size;
  265. node->left = node->right = node->parent = NULL;
  266. /* find proper node to link this new node to */
  267. root = heap->kfc_tree_root;
  268. tp = root;
  269. bh_assert(tp->size < size);
  270. while (1) {
  271. if (tp->size < size) {
  272. if (!tp->right) {
  273. tp->right = node;
  274. node->parent = tp;
  275. break;
  276. }
  277. tp = tp->right;
  278. }
  279. else { /* tp->size >= size */
  280. if (!tp->left) {
  281. tp->left = node;
  282. node->parent = tp;
  283. break;
  284. }
  285. tp = tp->left;
  286. }
  287. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  288. if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
  289. heap->is_heap_corrupted = true;
  290. return false;
  291. }
  292. #endif
  293. }
  294. return true;
  295. }
  296. /**
  297. * Find a proper hmu for required memory size
  298. *
  299. * @param heap should not be NULL and should be a valid heap
  300. * @param size should cover the header and should be 8 bytes aligned
  301. * GC will not be performed here.
  302. * Heap extension will not be performed here.
  303. *
  304. * @return hmu allocated if success, which will be aligned to 8 bytes,
  305. * NULL otherwise
  306. */
  307. static hmu_t *
  308. alloc_hmu(gc_heap_t *heap, gc_size_t size)
  309. {
  310. gc_uint8 *base_addr, *end_addr;
  311. hmu_normal_list_t *normal_head = NULL;
  312. hmu_normal_node_t *p = NULL;
  313. uint32 node_idx = 0, init_node_idx = 0;
  314. hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
  315. hmu_t *next, *rest;
  316. uintptr_t tp_ret;
  317. bh_assert(gci_is_heap_valid(heap));
  318. bh_assert(size > 0 && !(size & 7));
  319. #if WASM_ENABLE_GC != 0
  320. /* In doing reclaim, gc must not alloc memory again. */
  321. bh_assert(!heap->is_doing_reclaim);
  322. #endif
  323. base_addr = heap->base_addr;
  324. end_addr = base_addr + heap->current_size;
  325. if (size < GC_SMALLEST_SIZE)
  326. size = GC_SMALLEST_SIZE;
  327. /* check normal list at first*/
  328. if (HMU_IS_FC_NORMAL(size)) {
  329. /* find a non-empty slot in normal_node_list with good size*/
  330. init_node_idx = (size >> 3);
  331. for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
  332. node_idx++) {
  333. normal_head = heap->kfc_normal_list + node_idx;
  334. if (normal_head->next)
  335. break;
  336. normal_head = NULL;
  337. }
  338. /* found in normal list*/
  339. if (normal_head) {
  340. bh_assert(node_idx >= init_node_idx);
  341. p = normal_head->next;
  342. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  343. if (!hmu_is_in_heap(p, base_addr, end_addr)) {
  344. heap->is_heap_corrupted = true;
  345. return NULL;
  346. }
  347. #endif
  348. normal_head->next = get_hmu_normal_node_next(p);
  349. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  350. if (((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) != 0) {
  351. heap->is_heap_corrupted = true;
  352. return NULL;
  353. }
  354. #endif
  355. if ((gc_size_t)node_idx != (uint32)init_node_idx
  356. /* with bigger size*/
  357. && ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) {
  358. rest = (hmu_t *)(((char *)p) + size);
  359. if (!gci_add_fc(heap, rest, (node_idx << 3) - size)) {
  360. return NULL;
  361. }
  362. hmu_mark_pinuse(rest);
  363. }
  364. else {
  365. size = node_idx << 3;
  366. next = (hmu_t *)((char *)p + size);
  367. if (hmu_is_in_heap(next, base_addr, end_addr))
  368. hmu_mark_pinuse(next);
  369. }
  370. heap->total_free_size -= size;
  371. if ((heap->current_size - heap->total_free_size)
  372. > heap->highmark_size)
  373. heap->highmark_size =
  374. heap->current_size - heap->total_free_size;
  375. hmu_set_size((hmu_t *)p, size);
  376. return (hmu_t *)p;
  377. }
  378. }
  379. /* need to find a node in tree*/
  380. root = heap->kfc_tree_root;
  381. /* find the best node*/
  382. bh_assert(root);
  383. tp = root->right;
  384. while (tp) {
  385. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  386. if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
  387. heap->is_heap_corrupted = true;
  388. return NULL;
  389. }
  390. #endif
  391. if (tp->size < size) {
  392. tp = tp->right;
  393. continue;
  394. }
  395. /* record the last node with size equal to or bigger than given size*/
  396. last_tp = tp;
  397. tp = tp->left;
  398. }
  399. if (last_tp) {
  400. bh_assert(last_tp->size >= size);
  401. /* alloc in last_p*/
  402. /* remove node last_p from tree*/
  403. if (!remove_tree_node(heap, last_tp))
  404. return NULL;
  405. if (last_tp->size >= size + GC_SMALLEST_SIZE) {
  406. rest = (hmu_t *)((char *)last_tp + size);
  407. if (!gci_add_fc(heap, rest, last_tp->size - size))
  408. return NULL;
  409. hmu_mark_pinuse(rest);
  410. }
  411. else {
  412. size = last_tp->size;
  413. next = (hmu_t *)((char *)last_tp + size);
  414. if (hmu_is_in_heap(next, base_addr, end_addr))
  415. hmu_mark_pinuse(next);
  416. }
  417. heap->total_free_size -= size;
  418. if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
  419. heap->highmark_size = heap->current_size - heap->total_free_size;
  420. hmu_set_size((hmu_t *)last_tp, size);
  421. tp_ret = (uintptr_t)last_tp;
  422. return (hmu_t *)tp_ret;
  423. }
  424. return NULL;
  425. }
  426. #if WASM_ENABLE_GC != 0
  427. static int
  428. do_gc_heap(gc_heap_t *heap)
  429. {
  430. int ret = GC_SUCCESS;
  431. #if WASM_ENABLE_GC_PERF_PROFILING != 0
  432. uint64 start = 0, end = 0, time = 0;
  433. start = os_time_get_boot_microsecond();
  434. #endif
  435. if (heap->is_reclaim_enabled) {
  436. UNLOCK_HEAP(heap);
  437. ret = gci_gc_heap(heap);
  438. LOCK_HEAP(heap);
  439. }
  440. #if WASM_ENABLE_GC_PERF_PROFILING != 0
  441. end = os_time_get_boot_microsecond();
  442. time = end - start;
  443. heap->total_gc_time += time;
  444. if (time > heap->max_gc_time) {
  445. heap->max_gc_time = time;
  446. }
  447. heap->total_gc_count += 1;
  448. #endif
  449. return ret;
  450. }
  451. #endif
  452. /**
  453. * Find a proper HMU with given size
  454. *
  455. * @param heap should not be NULL and should be a valid heap
  456. * @param size should cover the header and should be 8 bytes aligned
  457. *
  458. * Note: This function will try several ways to satisfy the allocation request:
  459. * 1. Find a proper on available HMUs.
  460. * 2. GC will be triggered if 1 failed.
  461. * 3. Find a proper on available HMUS.
  462. * 4. Return NULL if 3 failed
  463. *
  464. * @return hmu allocated if success, which will be aligned to 8 bytes,
  465. * NULL otherwise
  466. */
  467. static hmu_t *
  468. alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
  469. {
  470. bh_assert(gci_is_heap_valid(heap));
  471. bh_assert(size > 0 && !(size & 7));
  472. #if WASM_ENABLE_GC != 0
  473. #if GC_IN_EVERY_ALLOCATION != 0
  474. if (GC_SUCCESS != do_gc_heap(heap))
  475. return NULL;
  476. #else
  477. if (heap->total_free_size < heap->gc_threshold) {
  478. if (GC_SUCCESS != do_gc_heap(heap))
  479. return NULL;
  480. }
  481. else {
  482. hmu_t *ret = NULL;
  483. if ((ret = alloc_hmu(heap, size))) {
  484. return ret;
  485. }
  486. if (GC_SUCCESS != do_gc_heap(heap))
  487. return NULL;
  488. }
  489. #endif
  490. #endif
  491. return alloc_hmu(heap, size);
  492. }
  493. /* Convert object pointer to HMU pointer - handles aligned allocations */
  494. hmu_t *
  495. obj_to_hmu(gc_object_t obj)
  496. {
  497. /* Check for aligned allocation magic signature */
  498. if (gc_is_aligned_allocation(obj)) {
  499. /* This is an aligned allocation, read offset */
  500. uint32_t *offset_ptr = ALIGNED_ALLOC_GET_OFFSET_PTR(obj);
  501. return (hmu_t *)((char *)obj - *offset_ptr);
  502. }
  503. /* Normal allocation: standard offset */
  504. return (hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1;
  505. }
  506. #if BH_ENABLE_GC_VERIFY == 0
  507. gc_object_t
  508. gc_alloc_vo(void *vheap, gc_size_t size)
  509. #else
  510. gc_object_t
  511. gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
  512. #endif
  513. {
  514. gc_heap_t *heap = (gc_heap_t *)vheap;
  515. hmu_t *hmu = NULL;
  516. gc_object_t ret = (gc_object_t)NULL;
  517. gc_size_t tot_size = 0, tot_size_unaligned;
  518. /* hmu header + prefix + obj + suffix */
  519. tot_size_unaligned = size + OBJ_EXTRA_SIZE;
  520. /* aligned size*/
  521. tot_size = GC_ALIGN_8(tot_size_unaligned);
  522. if (tot_size < size)
  523. /* integer overflow */
  524. return NULL;
  525. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  526. if (heap->is_heap_corrupted) {
  527. LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  528. return NULL;
  529. }
  530. #endif
  531. LOCK_HEAP(heap);
  532. hmu = alloc_hmu_ex(heap, tot_size);
  533. if (!hmu)
  534. goto finish;
  535. bh_assert(hmu_get_size(hmu) >= tot_size);
  536. /* the total size allocated may be larger than
  537. the required size, reset it here */
  538. tot_size = hmu_get_size(hmu);
  539. #if GC_STAT_DATA != 0
  540. heap->total_size_allocated += tot_size;
  541. #endif
  542. hmu_set_ut(hmu, HMU_VO);
  543. hmu_unfree_vo(hmu);
  544. #if BH_ENABLE_GC_VERIFY != 0
  545. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  546. #endif
  547. ret = hmu_to_obj(hmu);
  548. if (tot_size > tot_size_unaligned)
  549. /* clear buffer appended by GC_ALIGN_8() */
  550. memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
  551. finish:
  552. UNLOCK_HEAP(heap);
  553. return ret;
  554. }
  555. #if BH_ENABLE_GC_VERIFY == 0
  556. gc_object_t
  557. gc_alloc_vo_aligned(void *vheap, gc_size_t size, gc_size_t alignment)
  558. #else
  559. gc_object_t
  560. gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment,
  561. const char *file, int line)
  562. #endif
  563. {
  564. gc_heap_t *heap = (gc_heap_t *)vheap;
  565. hmu_t *hmu = NULL;
  566. gc_object_t ret = NULL;
  567. gc_size_t tot_size, tot_size_unaligned;
  568. gc_uint8 *base_obj;
  569. uintptr_t aligned_addr;
  570. uint32_t offset, alignment_log2;
  571. uint32_t max_alignment;
  572. /* Get system page size for maximum alignment check */
  573. max_alignment = (uint32_t)os_getpagesize();
  574. /* Validation */
  575. if (alignment == 0 || (alignment & (alignment - 1)) != 0) {
  576. /* Zero or not power of 2 */
  577. return NULL;
  578. }
  579. if (alignment < GC_MIN_ALIGNMENT) {
  580. alignment = GC_MIN_ALIGNMENT;
  581. }
  582. if (alignment > max_alignment) {
  583. /* Exceeds page size */
  584. return NULL;
  585. }
  586. if (size % alignment != 0) {
  587. /* POSIX requirement: size must be multiple of alignment */
  588. return NULL;
  589. }
  590. if (size > SIZE_MAX - GC_ALIGNED_SMALLEST_SIZE(alignment)) {
  591. /* Would overflow */
  592. return NULL;
  593. }
  594. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  595. if (heap->is_heap_corrupted) {
  596. LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  597. return NULL;
  598. }
  599. #endif
  600. /* Calculate total size needed */
  601. tot_size_unaligned = size + OBJ_EXTRA_SIZE + ALIGNED_ALLOC_EXTRA_OVERHEAD
  602. + (alignment > 8 ? (alignment - 8) : 8);
  603. tot_size = GC_ALIGN_8(tot_size_unaligned);
  604. if (tot_size < size) {
  605. /* Integer overflow */
  606. return NULL;
  607. }
  608. LOCK_HEAP(heap);
  609. hmu = alloc_hmu_ex(heap, tot_size);
  610. if (!hmu)
  611. goto finish;
  612. bh_assert(hmu_get_size(hmu) >= tot_size);
  613. tot_size = hmu_get_size(hmu);
  614. #if GC_STAT_DATA != 0
  615. heap->total_size_allocated += tot_size;
  616. #endif
  617. /* Get base object pointer */
  618. base_obj = (gc_uint8 *)hmu + HMU_SIZE + OBJ_PREFIX_SIZE;
  619. /* Find next aligned address, reserving space for metadata */
  620. aligned_addr =
  621. (((uintptr_t)base_obj + ALIGNED_ALLOC_METADATA_SIZE + alignment - 1)
  622. & ~(uintptr_t)(alignment - 1));
  623. ret = (gc_object_t)aligned_addr;
  624. /* Verify we have enough space */
  625. bh_assert((gc_uint8 *)ret + size + OBJ_SUFFIX_SIZE
  626. <= (gc_uint8 *)hmu + tot_size);
  627. /* Calculate offset from HMU to returned pointer */
  628. offset = (uint32_t)((char *)ret - (char *)hmu);
  629. /* Calculate log2 of alignment for magic value */
  630. alignment_log2 = 0;
  631. while ((1U << alignment_log2) < alignment) {
  632. alignment_log2++;
  633. }
  634. /* Store offset before returned pointer */
  635. *ALIGNED_ALLOC_GET_OFFSET_PTR(ret) = offset;
  636. /* Store magic with encoded alignment */
  637. *ALIGNED_ALLOC_GET_MAGIC_PTR(ret) =
  638. ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2;
  639. /* Initialize HMU */
  640. hmu_set_ut(hmu, HMU_VO);
  641. hmu_unfree_vo(hmu);
  642. #if BH_ENABLE_GC_VERIFY != 0
  643. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  644. #endif
  645. finish:
  646. UNLOCK_HEAP(heap);
  647. return ret;
  648. }
  649. #if BH_ENABLE_GC_VERIFY == 0
  650. gc_object_t
  651. gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
  652. #else
  653. gc_object_t
  654. gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
  655. int line)
  656. #endif
  657. {
  658. gc_heap_t *heap = (gc_heap_t *)vheap;
  659. hmu_t *hmu = NULL, *hmu_old = NULL, *hmu_next;
  660. gc_object_t ret = (gc_object_t)NULL, obj_old = (gc_object_t)ptr;
  661. gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0, tot_size_next;
  662. gc_size_t obj_size, obj_size_old;
  663. gc_uint8 *base_addr, *end_addr;
  664. hmu_type_t ut;
  665. /* hmu header + prefix + obj + suffix */
  666. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  667. /* aligned size*/
  668. tot_size = GC_ALIGN_8(tot_size_unaligned);
  669. if (tot_size < size)
  670. /* integer overflow */
  671. return NULL;
  672. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  673. if (heap->is_heap_corrupted) {
  674. LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  675. return NULL;
  676. }
  677. #endif
  678. /* Check if this is an aligned allocation - not supported */
  679. if (gc_is_aligned_allocation(obj_old)) {
  680. LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned "
  681. "allocations\n");
  682. return NULL;
  683. }
  684. if (obj_old) {
  685. hmu_old = obj_to_hmu(obj_old);
  686. tot_size_old = hmu_get_size(hmu_old);
  687. if (tot_size <= tot_size_old)
  688. /* current node already meets requirement */
  689. return obj_old;
  690. }
  691. base_addr = heap->base_addr;
  692. end_addr = base_addr + heap->current_size;
  693. LOCK_HEAP(heap);
  694. if (hmu_old) {
  695. hmu_next = (hmu_t *)((char *)hmu_old + tot_size_old);
  696. if (hmu_is_in_heap(hmu_next, base_addr, end_addr)) {
  697. ut = hmu_get_ut(hmu_next);
  698. tot_size_next = hmu_get_size(hmu_next);
  699. if (ut == HMU_FC && tot_size <= tot_size_old + tot_size_next) {
  700. /* current node and next node meets requirement */
  701. if (!unlink_hmu(heap, hmu_next)) {
  702. UNLOCK_HEAP(heap);
  703. return NULL;
  704. }
  705. hmu_set_size(hmu_old, tot_size);
  706. memset((char *)hmu_old + tot_size_old, 0,
  707. tot_size - tot_size_old);
  708. #if BH_ENABLE_GC_VERIFY != 0
  709. hmu_init_prefix_and_suffix(hmu_old, tot_size, file, line);
  710. #endif
  711. if (tot_size < tot_size_old + tot_size_next) {
  712. hmu_next = (hmu_t *)((char *)hmu_old + tot_size);
  713. tot_size_next = tot_size_old + tot_size_next - tot_size;
  714. if (!gci_add_fc(heap, hmu_next, tot_size_next)) {
  715. UNLOCK_HEAP(heap);
  716. return NULL;
  717. }
  718. hmu_mark_pinuse(hmu_next);
  719. }
  720. UNLOCK_HEAP(heap);
  721. return obj_old;
  722. }
  723. }
  724. }
  725. hmu = alloc_hmu_ex(heap, tot_size);
  726. if (!hmu)
  727. goto finish;
  728. bh_assert(hmu_get_size(hmu) >= tot_size);
  729. /* the total size allocated may be larger than
  730. the required size, reset it here */
  731. tot_size = hmu_get_size(hmu);
  732. #if GC_STAT_DATA != 0
  733. heap->total_size_allocated += tot_size;
  734. #endif
  735. hmu_set_ut(hmu, HMU_VO);
  736. hmu_unfree_vo(hmu);
  737. #if BH_ENABLE_GC_VERIFY != 0
  738. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  739. #endif
  740. ret = hmu_to_obj(hmu);
  741. finish:
  742. if (ret) {
  743. obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  744. memset(ret, 0, obj_size);
  745. if (obj_old) {
  746. obj_size_old =
  747. tot_size_old - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  748. bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
  749. }
  750. }
  751. UNLOCK_HEAP(heap);
  752. if (ret && obj_old)
  753. gc_free_vo(vheap, obj_old);
  754. return ret;
  755. }
  756. #if GC_MANUALLY != 0
  757. void
  758. gc_free_wo(void *vheap, void *ptr)
  759. {
  760. gc_heap_t *heap = (gc_heap_t *)vheap;
  761. gc_object_t *obj = (gc_object_t *)ptr;
  762. hmu_t *hmu = obj_to_hmu(obj);
  763. bh_assert(gci_is_heap_valid(heap));
  764. bh_assert(obj);
  765. bh_assert((gc_uint8 *)hmu >= heap->base_addr
  766. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  767. bh_assert(hmu_get_ut(hmu) == HMU_WO);
  768. hmu_unmark_wo(hmu);
  769. (void)heap;
  770. }
  771. #endif
  772. /* see ems_gc.h for description*/
  773. #if BH_ENABLE_GC_VERIFY == 0
  774. gc_object_t
  775. gc_alloc_wo(void *vheap, gc_size_t size)
  776. #else
  777. gc_object_t
  778. gc_alloc_wo_internal(void *vheap, gc_size_t size, const char *file, int line)
  779. #endif
  780. {
  781. gc_heap_t *heap = (gc_heap_t *)vheap;
  782. hmu_t *hmu = NULL;
  783. gc_object_t ret = (gc_object_t)NULL;
  784. gc_size_t tot_size = 0, tot_size_unaligned;
  785. /* hmu header + prefix + obj + suffix */
  786. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  787. /* aligned size*/
  788. tot_size = GC_ALIGN_8(tot_size_unaligned);
  789. if (tot_size < size)
  790. /* integer overflow */
  791. return NULL;
  792. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  793. if (heap->is_heap_corrupted) {
  794. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  795. return NULL;
  796. }
  797. #endif
  798. LOCK_HEAP(heap);
  799. hmu = alloc_hmu_ex(heap, tot_size);
  800. if (!hmu)
  801. goto finish;
  802. /* Don't memset the memory to improve performance, the caller should
  803. decide whether to memset it or not */
  804. bh_assert(hmu_get_size(hmu) >= tot_size);
  805. /* the total size allocated may be larger than
  806. the required size, reset it here */
  807. tot_size = hmu_get_size(hmu);
  808. #if GC_STAT_DATA != 0
  809. heap->total_size_allocated += tot_size;
  810. #endif
  811. hmu_set_ut(hmu, HMU_WO);
  812. #if GC_MANUALLY != 0
  813. hmu_mark_wo(hmu);
  814. #else
  815. hmu_unmark_wo(hmu);
  816. #endif
  817. #if BH_ENABLE_GC_VERIFY != 0
  818. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  819. #endif
  820. ret = hmu_to_obj(hmu);
  821. if (tot_size > tot_size_unaligned)
  822. /* clear buffer appended by GC_ALIGN_8() */
  823. memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
  824. finish:
  825. UNLOCK_HEAP(heap);
  826. return ret;
  827. }
  828. /**
  829. * Do some checking to see if given pointer is a possible valid heap
  830. * @return GC_TRUE if all checking passed, GC_FALSE otherwise
  831. */
  832. int
  833. gci_is_heap_valid(gc_heap_t *heap)
  834. {
  835. if (!heap)
  836. return GC_FALSE;
  837. if (heap->heap_id != (gc_handle_t)heap)
  838. return GC_FALSE;
  839. return GC_TRUE;
  840. }
  841. #if BH_ENABLE_GC_VERIFY == 0
  842. int
  843. gc_free_vo(void *vheap, gc_object_t obj)
  844. #else
  845. int
  846. gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
  847. #endif
  848. {
  849. gc_heap_t *heap = (gc_heap_t *)vheap;
  850. gc_uint8 *base_addr, *end_addr;
  851. hmu_t *hmu = NULL;
  852. hmu_t *prev = NULL;
  853. hmu_t *next = NULL;
  854. gc_size_t size = 0;
  855. hmu_type_t ut;
  856. int ret = GC_SUCCESS;
  857. if (!obj) {
  858. return GC_SUCCESS;
  859. }
  860. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  861. if (heap->is_heap_corrupted) {
  862. LOG_ERROR("[GC_ERROR]Heap is corrupted, free memory failed.\n");
  863. return GC_ERROR;
  864. }
  865. #endif
  866. hmu = obj_to_hmu(obj);
  867. base_addr = heap->base_addr;
  868. end_addr = base_addr + heap->current_size;
  869. LOCK_HEAP(heap);
  870. if (hmu_is_in_heap(hmu, base_addr, end_addr)) {
  871. #if BH_ENABLE_GC_VERIFY != 0
  872. hmu_verify(heap, hmu);
  873. #endif
  874. ut = hmu_get_ut(hmu);
  875. if (ut == HMU_VO) {
  876. if (hmu_is_vo_freed(hmu)) {
  877. bh_assert(0);
  878. ret = GC_ERROR;
  879. goto out;
  880. }
  881. size = hmu_get_size(hmu);
  882. heap->total_free_size += size;
  883. #if GC_STAT_DATA != 0
  884. heap->total_size_freed += size;
  885. #endif
  886. if (!hmu_get_pinuse(hmu)) {
  887. prev = (hmu_t *)((char *)hmu - *((int *)hmu - 1));
  888. if (hmu_is_in_heap(prev, base_addr, end_addr)
  889. && hmu_get_ut(prev) == HMU_FC) {
  890. size += hmu_get_size(prev);
  891. hmu = prev;
  892. if (!unlink_hmu(heap, prev)) {
  893. ret = GC_ERROR;
  894. goto out;
  895. }
  896. }
  897. }
  898. next = (hmu_t *)((char *)hmu + size);
  899. if (hmu_is_in_heap(next, base_addr, end_addr)) {
  900. if (hmu_get_ut(next) == HMU_FC) {
  901. size += hmu_get_size(next);
  902. if (!unlink_hmu(heap, next)) {
  903. ret = GC_ERROR;
  904. goto out;
  905. }
  906. next = (hmu_t *)((char *)hmu + size);
  907. }
  908. }
  909. if (!gci_add_fc(heap, hmu, size)) {
  910. ret = GC_ERROR;
  911. goto out;
  912. }
  913. if (hmu_is_in_heap(next, base_addr, end_addr)) {
  914. hmu_unmark_pinuse(next);
  915. }
  916. }
  917. else {
  918. ret = GC_ERROR;
  919. goto out;
  920. }
  921. ret = GC_SUCCESS;
  922. goto out;
  923. }
  924. out:
  925. UNLOCK_HEAP(heap);
  926. return ret;
  927. }
  928. void
  929. gc_dump_heap_stats(gc_heap_t *heap)
  930. {
  931. os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
  932. os_printf("total free: %" PRIu32 ", current: %" PRIu32
  933. ", highmark: %" PRIu32 "\n",
  934. heap->total_free_size, heap->current_size, heap->highmark_size);
  935. #if GC_STAT_DATA != 0
  936. os_printf("total size allocated: %" PRIu64 ", total size freed: %" PRIu64
  937. ", total occupied: %" PRIu64 "\n",
  938. heap->total_size_allocated, heap->total_size_freed,
  939. heap->total_size_allocated - heap->total_size_freed);
  940. #endif
  941. }
  942. uint32
  943. gc_get_heap_highmark_size(gc_heap_t *heap)
  944. {
  945. return heap->highmark_size;
  946. }
  947. void
  948. gci_dump(gc_heap_t *heap)
  949. {
  950. hmu_t *cur = NULL, *end = NULL;
  951. hmu_type_t ut;
  952. gc_size_t size;
  953. int i = 0, p, mark;
  954. char inuse = 'U';
  955. cur = (hmu_t *)heap->base_addr;
  956. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  957. while (cur < end) {
  958. ut = hmu_get_ut(cur);
  959. size = hmu_get_size(cur);
  960. p = hmu_get_pinuse(cur);
  961. mark = hmu_is_wo_marked(cur);
  962. if (ut == HMU_VO)
  963. inuse = 'V';
  964. else if (ut == HMU_WO)
  965. inuse = hmu_is_wo_marked(cur) ? 'W' : 'w';
  966. else if (ut == HMU_FC)
  967. inuse = 'F';
  968. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  969. if (size == 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
  970. LOG_ERROR("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
  971. heap->is_heap_corrupted = true;
  972. return;
  973. }
  974. #endif
  975. os_printf("#%d %08" PRIx32 " %" PRIx32 " %d %d"
  976. " %c %" PRId32 "\n",
  977. i, (uint32)((char *)cur - (char *)heap->base_addr),
  978. (uint32)ut, p, mark, inuse, (int32)hmu_obj_size(size));
  979. #if BH_ENABLE_GC_VERIFY != 0
  980. if (inuse == 'V') {
  981. gc_object_prefix_t *prefix = (gc_object_prefix_t *)(cur + 1);
  982. os_printf("#%s:%d\n", prefix->file_name, prefix->line_no);
  983. }
  984. #endif
  985. cur = (hmu_t *)((char *)cur + size);
  986. i++;
  987. }
  988. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  989. if (cur != end) {
  990. LOG_ERROR("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
  991. heap->is_heap_corrupted = true;
  992. }
  993. #else
  994. bh_assert(cur == end);
  995. #endif
  996. }
  997. #if WASM_ENABLE_GC != 0
  998. extra_info_node_t *
  999. gc_search_extra_info_node(gc_handle_t handle, gc_object_t obj,
  1000. gc_size_t *p_index)
  1001. {
  1002. gc_heap_t *vheap = (gc_heap_t *)handle;
  1003. int32 low = 0, high = vheap->extra_info_node_cnt - 1;
  1004. int32 mid;
  1005. extra_info_node_t *node;
  1006. if (!vheap->extra_info_nodes)
  1007. return NULL;
  1008. while (low <= high) {
  1009. mid = (low + high) / 2;
  1010. node = vheap->extra_info_nodes[mid];
  1011. if (obj == node->obj) {
  1012. if (p_index) {
  1013. *p_index = mid;
  1014. }
  1015. return node;
  1016. }
  1017. else if (obj < node->obj) {
  1018. high = mid - 1;
  1019. }
  1020. else {
  1021. low = mid + 1;
  1022. }
  1023. }
  1024. if (p_index) {
  1025. *p_index = low;
  1026. }
  1027. return NULL;
  1028. }
  1029. static bool
  1030. insert_extra_info_node(gc_heap_t *vheap, extra_info_node_t *node)
  1031. {
  1032. gc_size_t index;
  1033. extra_info_node_t *orig_node;
  1034. if (!vheap->extra_info_nodes) {
  1035. vheap->extra_info_nodes = vheap->extra_info_normal_nodes;
  1036. vheap->extra_info_node_capacity = sizeof(vheap->extra_info_normal_nodes)
  1037. / sizeof(extra_info_node_t *);
  1038. vheap->extra_info_nodes[0] = node;
  1039. vheap->extra_info_node_cnt = 1;
  1040. return true;
  1041. }
  1042. /* extend array */
  1043. if (vheap->extra_info_node_cnt == vheap->extra_info_node_capacity) {
  1044. extra_info_node_t **new_nodes = NULL;
  1045. gc_size_t new_capacity = vheap->extra_info_node_capacity * 3 / 2;
  1046. gc_size_t total_size = sizeof(extra_info_node_t *) * new_capacity;
  1047. new_nodes = (extra_info_node_t **)BH_MALLOC(total_size);
  1048. if (!new_nodes) {
  1049. LOG_ERROR("alloc extra info nodes failed");
  1050. return false;
  1051. }
  1052. bh_memcpy_s(new_nodes, total_size, vheap->extra_info_nodes,
  1053. sizeof(extra_info_node_t *) * vheap->extra_info_node_cnt);
  1054. if (vheap->extra_info_nodes != vheap->extra_info_normal_nodes) {
  1055. BH_FREE(vheap->extra_info_nodes);
  1056. }
  1057. vheap->extra_info_nodes = new_nodes;
  1058. vheap->extra_info_node_capacity = new_capacity;
  1059. }
  1060. orig_node = gc_search_extra_info_node(vheap, node->obj, &index);
  1061. if (orig_node) {
  1062. /* replace the old node */
  1063. vheap->extra_info_nodes[index] = node;
  1064. BH_FREE(orig_node);
  1065. }
  1066. else {
  1067. bh_memmove_s(vheap->extra_info_nodes + index + 1,
  1068. (vheap->extra_info_node_capacity - index - 1)
  1069. * sizeof(extra_info_node_t *),
  1070. vheap->extra_info_nodes + index,
  1071. (vheap->extra_info_node_cnt - index)
  1072. * sizeof(extra_info_node_t *));
  1073. vheap->extra_info_nodes[index] = node;
  1074. vheap->extra_info_node_cnt += 1;
  1075. }
  1076. return true;
  1077. }
  1078. bool
  1079. gc_set_finalizer(gc_handle_t handle, gc_object_t obj, gc_finalizer_t cb,
  1080. void *data)
  1081. {
  1082. extra_info_node_t *node = NULL;
  1083. gc_heap_t *vheap = (gc_heap_t *)handle;
  1084. node = (extra_info_node_t *)BH_MALLOC(sizeof(extra_info_node_t));
  1085. if (!node) {
  1086. LOG_ERROR("alloc a new extra info node failed");
  1087. return GC_FALSE;
  1088. }
  1089. memset(node, 0, sizeof(extra_info_node_t));
  1090. node->finalizer = cb;
  1091. node->obj = obj;
  1092. node->data = data;
  1093. LOCK_HEAP(vheap);
  1094. if (!insert_extra_info_node(vheap, node)) {
  1095. BH_FREE(node);
  1096. UNLOCK_HEAP(vheap);
  1097. return GC_FALSE;
  1098. }
  1099. UNLOCK_HEAP(vheap);
  1100. gct_vm_set_extra_info_flag(obj, true);
  1101. return GC_TRUE;
  1102. }
  1103. void
  1104. gc_unset_finalizer(gc_handle_t handle, gc_object_t obj)
  1105. {
  1106. gc_size_t index;
  1107. gc_heap_t *vheap = (gc_heap_t *)handle;
  1108. extra_info_node_t *node;
  1109. LOCK_HEAP(vheap);
  1110. node = gc_search_extra_info_node(vheap, obj, &index);
  1111. if (!node) {
  1112. UNLOCK_HEAP(vheap);
  1113. return;
  1114. }
  1115. BH_FREE(node);
  1116. bh_memmove_s(
  1117. vheap->extra_info_nodes + index,
  1118. (vheap->extra_info_node_capacity - index) * sizeof(extra_info_node_t *),
  1119. vheap->extra_info_nodes + index + 1,
  1120. (vheap->extra_info_node_cnt - index - 1) * sizeof(extra_info_node_t *));
  1121. vheap->extra_info_node_cnt -= 1;
  1122. UNLOCK_HEAP(vheap);
  1123. gct_vm_set_extra_info_flag(obj, false);
  1124. }
  1125. #endif