ems_alloc.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. #if WASM_ENABLE_GC != 0
  7. #define LOCK_HEAP(heap) \
  8. do { \
  9. if (!heap->is_doing_reclaim) \
  10. /* If the heap is doing reclaim, it must have been locked, \
  11. we should not lock the heap again. */ \
  12. os_mutex_lock(&heap->lock); \
  13. } while (0)
  14. #define UNLOCK_HEAP(heap) \
  15. do { \
  16. if (!heap->is_doing_reclaim) \
  17. /* If the heap is doing reclaim, it must have been locked, \
  18. and will be unlocked after reclaim, we should not \
  19. unlock the heap again. */ \
  20. os_mutex_unlock(&heap->lock); \
  21. } while (0)
  22. #else
  23. #define LOCK_HEAP(heap) os_mutex_lock(&heap->lock)
  24. #define UNLOCK_HEAP(heap) os_mutex_unlock(&heap->lock)
  25. #endif
  26. static inline bool
  27. hmu_is_in_heap(void *hmu, gc_uint8 *heap_base_addr, gc_uint8 *heap_end_addr)
  28. {
  29. gc_uint8 *addr = (gc_uint8 *)hmu;
  30. return (addr >= heap_base_addr && addr < heap_end_addr) ? true : false;
  31. }
  32. /**
  33. * Remove a node from the tree it belongs to
  34. *
  35. * @param p the node to remove, can not be NULL, can not be the ROOT node
  36. * the node will be removed from the tree, and the left, right and
  37. * parent pointers of the node @p will be set to be NULL. Other fields
  38. * won't be touched. The tree will be re-organized so that the order
  39. * conditions are still satisified.
  40. */
  41. static bool
  42. remove_tree_node(gc_heap_t *heap, hmu_tree_node_t *p)
  43. {
  44. hmu_tree_node_t *q = NULL, **slot = NULL, *parent;
  45. hmu_tree_node_t *root = heap->kfc_tree_root;
  46. gc_uint8 *base_addr = heap->base_addr;
  47. gc_uint8 *end_addr = base_addr + heap->current_size;
  48. bh_assert(p);
  49. parent = p->parent;
  50. if (!parent || p == root /* p can not be the ROOT node */
  51. || !hmu_is_in_heap(p, base_addr, end_addr)
  52. || (parent != root && !hmu_is_in_heap(parent, base_addr, end_addr))) {
  53. goto fail;
  54. }
  55. /* get the slot which holds pointer to node p */
  56. if (p == p->parent->right) {
  57. /* Don't use `slot = &p->parent->right` to avoid compiler warning */
  58. slot = (hmu_tree_node_t **)((uint8 *)p->parent
  59. + offsetof(hmu_tree_node_t, right));
  60. }
  61. else if (p == p->parent->left) {
  62. /* p should be a child of its parent */
  63. /* Don't use `slot = &p->parent->left` to avoid compiler warning */
  64. slot = (hmu_tree_node_t **)((uint8 *)p->parent
  65. + offsetof(hmu_tree_node_t, left));
  66. }
  67. else {
  68. goto fail;
  69. }
  70. /**
  71. * algorithms used to remove node p
  72. * case 1: if p has no left child, replace p with its right child
  73. * case 2: if p has no right child, replace p with its left child
  74. * case 3: otherwise, find p's predecessor, remove it from the tree
  75. * and replace p with it.
  76. * use predecessor can keep the left <= root < right condition.
  77. */
  78. if (!p->left) {
  79. /* move right child up*/
  80. *slot = p->right;
  81. if (p->right) {
  82. if (!hmu_is_in_heap(p->right, base_addr, end_addr)) {
  83. goto fail;
  84. }
  85. p->right->parent = p->parent;
  86. }
  87. p->left = p->right = p->parent = NULL;
  88. return true;
  89. }
  90. if (!p->right) {
  91. /* move left child up*/
  92. *slot = p->left;
  93. if (!hmu_is_in_heap(p->left, base_addr, end_addr)) {
  94. goto fail;
  95. }
  96. /* p->left can never be NULL unless it is corrupted. */
  97. p->left->parent = p->parent;
  98. p->left = p->right = p->parent = NULL;
  99. return true;
  100. }
  101. /* both left & right exist, find p's predecessor at first*/
  102. q = p->left;
  103. if (!hmu_is_in_heap(q, base_addr, end_addr)) {
  104. goto fail;
  105. }
  106. while (q->right) {
  107. q = q->right;
  108. if (!hmu_is_in_heap(q, base_addr, end_addr)) {
  109. goto fail;
  110. }
  111. }
  112. /* remove from the tree*/
  113. if (!remove_tree_node(heap, q))
  114. return false;
  115. *slot = q;
  116. q->parent = p->parent;
  117. q->left = p->left;
  118. q->right = p->right;
  119. if (q->left) {
  120. if (!hmu_is_in_heap(q->left, base_addr, end_addr)) {
  121. goto fail;
  122. }
  123. q->left->parent = q;
  124. }
  125. if (q->right) {
  126. if (!hmu_is_in_heap(q->right, base_addr, end_addr)) {
  127. goto fail;
  128. }
  129. q->right->parent = q;
  130. }
  131. p->left = p->right = p->parent = NULL;
  132. return true;
  133. fail:
  134. heap->is_heap_corrupted = true;
  135. return false;
  136. }
  137. static bool
  138. unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
  139. {
  140. gc_uint8 *base_addr, *end_addr;
  141. gc_size_t size;
  142. bh_assert(gci_is_heap_valid(heap));
  143. bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
  144. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  145. if (hmu_get_ut(hmu) != HMU_FC) {
  146. heap->is_heap_corrupted = true;
  147. return false;
  148. }
  149. base_addr = heap->base_addr;
  150. end_addr = base_addr + heap->current_size;
  151. size = hmu_get_size(hmu);
  152. if (HMU_IS_FC_NORMAL(size)) {
  153. uint32 node_idx = size >> 3;
  154. hmu_normal_node_t *node_prev = NULL, *node_next;
  155. hmu_normal_node_t *node = heap->kfc_normal_list[node_idx].next;
  156. while (node) {
  157. if (!hmu_is_in_heap(node, base_addr, end_addr)) {
  158. heap->is_heap_corrupted = true;
  159. return false;
  160. }
  161. node_next = get_hmu_normal_node_next(node);
  162. if ((hmu_t *)node == hmu) {
  163. if (!node_prev) /* list head */
  164. heap->kfc_normal_list[node_idx].next = node_next;
  165. else
  166. set_hmu_normal_node_next(node_prev, node_next);
  167. break;
  168. }
  169. node_prev = node;
  170. node = node_next;
  171. }
  172. if (!node) {
  173. os_printf("[GC_ERROR]couldn't find the node in the normal list\n");
  174. }
  175. }
  176. else {
  177. if (!remove_tree_node(heap, (hmu_tree_node_t *)hmu))
  178. return false;
  179. }
  180. return true;
  181. }
  182. static void
  183. hmu_set_free_size(hmu_t *hmu)
  184. {
  185. gc_size_t size;
  186. bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
  187. size = hmu_get_size(hmu);
  188. *((uint32 *)((char *)hmu + size) - 1) = size;
  189. }
  190. /**
  191. * Add free chunk back to KFC
  192. *
  193. * @param heap should not be NULL and it should be a valid heap
  194. * @param hmu should not be NULL and it should be a HMU of length @size inside
  195. * @heap hmu should be 8-bytes aligned
  196. * @param size should be positive and multiple of 8
  197. * hmu with size @size will be added into KFC as a new FC.
  198. */
  199. bool
  200. gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
  201. {
  202. gc_uint8 *base_addr, *end_addr;
  203. hmu_normal_node_t *np = NULL;
  204. hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
  205. uint32 node_idx;
  206. bh_assert(gci_is_heap_valid(heap));
  207. bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
  208. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  209. bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
  210. bh_assert(size > 0
  211. && ((gc_uint8 *)hmu) + size
  212. <= heap->base_addr + heap->current_size);
  213. bh_assert(!(size & 7));
  214. base_addr = heap->base_addr;
  215. end_addr = base_addr + heap->current_size;
  216. hmu_set_ut(hmu, HMU_FC);
  217. hmu_set_size(hmu, size);
  218. hmu_set_free_size(hmu);
  219. if (HMU_IS_FC_NORMAL(size)) {
  220. np = (hmu_normal_node_t *)hmu;
  221. if (!hmu_is_in_heap(np, base_addr, end_addr)) {
  222. heap->is_heap_corrupted = true;
  223. return false;
  224. }
  225. node_idx = size >> 3;
  226. set_hmu_normal_node_next(np, heap->kfc_normal_list[node_idx].next);
  227. heap->kfc_normal_list[node_idx].next = np;
  228. return true;
  229. }
  230. /* big block */
  231. node = (hmu_tree_node_t *)hmu;
  232. node->size = size;
  233. node->left = node->right = node->parent = NULL;
  234. /* find proper node to link this new node to */
  235. root = heap->kfc_tree_root;
  236. tp = root;
  237. bh_assert(tp->size < size);
  238. while (1) {
  239. if (tp->size < size) {
  240. if (!tp->right) {
  241. tp->right = node;
  242. node->parent = tp;
  243. break;
  244. }
  245. tp = tp->right;
  246. }
  247. else { /* tp->size >= size */
  248. if (!tp->left) {
  249. tp->left = node;
  250. node->parent = tp;
  251. break;
  252. }
  253. tp = tp->left;
  254. }
  255. if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
  256. heap->is_heap_corrupted = true;
  257. return false;
  258. }
  259. }
  260. return true;
  261. }
  262. /**
  263. * Find a proper hmu for required memory size
  264. *
  265. * @param heap should not be NULL and should be a valid heap
  266. * @param size should cover the header and should be 8 bytes aligned
  267. * GC will not be performed here.
  268. * Heap extension will not be performed here.
  269. *
  270. * @return hmu allocated if success, which will be aligned to 8 bytes,
  271. * NULL otherwise
  272. */
  273. static hmu_t *
  274. alloc_hmu(gc_heap_t *heap, gc_size_t size)
  275. {
  276. gc_uint8 *base_addr, *end_addr;
  277. hmu_normal_list_t *normal_head = NULL;
  278. hmu_normal_node_t *p = NULL;
  279. uint32 node_idx = 0, init_node_idx = 0;
  280. hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
  281. hmu_t *next, *rest;
  282. uintptr_t tp_ret;
  283. bh_assert(gci_is_heap_valid(heap));
  284. bh_assert(size > 0 && !(size & 7));
  285. #if WASM_ENABLE_GC != 0
  286. /* In doing reclaim, gc must not alloc memory again. */
  287. bh_assert(!heap->is_doing_reclaim);
  288. #endif
  289. base_addr = heap->base_addr;
  290. end_addr = base_addr + heap->current_size;
  291. if (size < GC_SMALLEST_SIZE)
  292. size = GC_SMALLEST_SIZE;
  293. /* check normal list at first*/
  294. if (HMU_IS_FC_NORMAL(size)) {
  295. /* find a non-empty slot in normal_node_list with good size*/
  296. init_node_idx = (size >> 3);
  297. for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
  298. node_idx++) {
  299. normal_head = heap->kfc_normal_list + node_idx;
  300. if (normal_head->next)
  301. break;
  302. normal_head = NULL;
  303. }
  304. /* found in normal list*/
  305. if (normal_head) {
  306. bh_assert(node_idx >= init_node_idx);
  307. p = normal_head->next;
  308. if (!hmu_is_in_heap(p, base_addr, end_addr)) {
  309. heap->is_heap_corrupted = true;
  310. return NULL;
  311. }
  312. normal_head->next = get_hmu_normal_node_next(p);
  313. if (((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) != 0) {
  314. heap->is_heap_corrupted = true;
  315. return NULL;
  316. }
  317. if ((gc_size_t)node_idx != (uint32)init_node_idx
  318. /* with bigger size*/
  319. && ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) {
  320. rest = (hmu_t *)(((char *)p) + size);
  321. if (!gci_add_fc(heap, rest, (node_idx << 3) - size)) {
  322. return NULL;
  323. }
  324. hmu_mark_pinuse(rest);
  325. }
  326. else {
  327. size = node_idx << 3;
  328. next = (hmu_t *)((char *)p + size);
  329. if (hmu_is_in_heap(next, base_addr, end_addr))
  330. hmu_mark_pinuse(next);
  331. }
  332. heap->total_free_size -= size;
  333. if ((heap->current_size - heap->total_free_size)
  334. > heap->highmark_size)
  335. heap->highmark_size =
  336. heap->current_size - heap->total_free_size;
  337. hmu_set_size((hmu_t *)p, size);
  338. return (hmu_t *)p;
  339. }
  340. }
  341. /* need to find a node in tree*/
  342. root = heap->kfc_tree_root;
  343. /* find the best node*/
  344. bh_assert(root);
  345. tp = root->right;
  346. while (tp) {
  347. if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
  348. heap->is_heap_corrupted = true;
  349. return NULL;
  350. }
  351. if (tp->size < size) {
  352. tp = tp->right;
  353. continue;
  354. }
  355. /* record the last node with size equal to or bigger than given size*/
  356. last_tp = tp;
  357. tp = tp->left;
  358. }
  359. if (last_tp) {
  360. bh_assert(last_tp->size >= size);
  361. /* alloc in last_p*/
  362. /* remove node last_p from tree*/
  363. if (!remove_tree_node(heap, last_tp))
  364. return NULL;
  365. if (last_tp->size >= size + GC_SMALLEST_SIZE) {
  366. rest = (hmu_t *)((char *)last_tp + size);
  367. if (!gci_add_fc(heap, rest, last_tp->size - size))
  368. return NULL;
  369. hmu_mark_pinuse(rest);
  370. }
  371. else {
  372. size = last_tp->size;
  373. next = (hmu_t *)((char *)last_tp + size);
  374. if (hmu_is_in_heap(next, base_addr, end_addr))
  375. hmu_mark_pinuse(next);
  376. }
  377. heap->total_free_size -= size;
  378. if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
  379. heap->highmark_size = heap->current_size - heap->total_free_size;
  380. hmu_set_size((hmu_t *)last_tp, size);
  381. tp_ret = (uintptr_t)last_tp;
  382. return (hmu_t *)tp_ret;
  383. }
  384. return NULL;
  385. }
  386. #if WASM_ENABLE_GC != 0
  387. static int
  388. do_gc_heap(gc_heap_t *heap)
  389. {
  390. int ret = GC_SUCCESS;
  391. if (heap->is_reclaim_enabled) {
  392. UNLOCK_HEAP(heap);
  393. ret = gci_gc_heap(heap);
  394. LOCK_HEAP(heap);
  395. }
  396. return ret;
  397. }
  398. #endif
  399. /**
  400. * Find a proper HMU with given size
  401. *
  402. * @param heap should not be NULL and should be a valid heap
  403. * @param size should cover the header and should be 8 bytes aligned
  404. *
  405. * Note: This function will try several ways to satisfy the allocation request:
  406. * 1. Find a proper on available HMUs.
  407. * 2. GC will be triggered if 1 failed.
  408. * 3. Find a proper on available HMUS.
  409. * 4. Return NULL if 3 failed
  410. *
  411. * @return hmu allocated if success, which will be aligned to 8 bytes,
  412. * NULL otherwise
  413. */
  414. static hmu_t *
  415. alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
  416. {
  417. bh_assert(gci_is_heap_valid(heap));
  418. bh_assert(size > 0 && !(size & 7));
  419. #if WASM_ENABLE_GC != 0
  420. #if GC_IN_EVERY_ALLOCATION != 0
  421. if (GC_SUCCESS != do_gc_heap(heap))
  422. return NULL;
  423. #else
  424. if (heap->total_free_size < heap->gc_threshold) {
  425. if (GC_SUCCESS != do_gc_heap(heap))
  426. return NULL;
  427. }
  428. else {
  429. hmu_t *ret = NULL;
  430. if ((ret = alloc_hmu(heap, size))) {
  431. return ret;
  432. }
  433. if (GC_SUCCESS != do_gc_heap(heap))
  434. return NULL;
  435. }
  436. #endif
  437. #endif
  438. return alloc_hmu(heap, size);
  439. }
  440. #if BH_ENABLE_GC_VERIFY == 0
  441. gc_object_t
  442. gc_alloc_vo(void *vheap, gc_size_t size)
  443. #else
  444. gc_object_t
  445. gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
  446. #endif
  447. {
  448. gc_heap_t *heap = (gc_heap_t *)vheap;
  449. hmu_t *hmu = NULL;
  450. gc_object_t ret = (gc_object_t)NULL;
  451. gc_size_t tot_size = 0, tot_size_unaligned;
  452. /* hmu header + prefix + obj + suffix */
  453. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  454. /* aligned size*/
  455. tot_size = GC_ALIGN_8(tot_size_unaligned);
  456. if (tot_size < size)
  457. /* integer overflow */
  458. return NULL;
  459. if (heap->is_heap_corrupted) {
  460. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  461. return NULL;
  462. }
  463. LOCK_HEAP(heap);
  464. hmu = alloc_hmu_ex(heap, tot_size);
  465. if (!hmu)
  466. goto finish;
  467. bh_assert(hmu_get_size(hmu) >= tot_size);
  468. /* the total size allocated may be larger than
  469. the required size, reset it here */
  470. tot_size = hmu_get_size(hmu);
  471. #if GC_STAT_DATA != 0
  472. heap->total_size_allocated += tot_size;
  473. #endif
  474. hmu_set_ut(hmu, HMU_VO);
  475. hmu_unfree_vo(hmu);
  476. #if BH_ENABLE_GC_VERIFY != 0
  477. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  478. #endif
  479. ret = hmu_to_obj(hmu);
  480. if (tot_size > tot_size_unaligned)
  481. /* clear buffer appended by GC_ALIGN_8() */
  482. memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
  483. finish:
  484. UNLOCK_HEAP(heap);
  485. return ret;
  486. }
  487. #if BH_ENABLE_GC_VERIFY == 0
  488. gc_object_t
  489. gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
  490. #else
  491. gc_object_t
  492. gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
  493. int line)
  494. #endif
  495. {
  496. gc_heap_t *heap = (gc_heap_t *)vheap;
  497. hmu_t *hmu = NULL, *hmu_old = NULL, *hmu_next;
  498. gc_object_t ret = (gc_object_t)NULL, obj_old = (gc_object_t)ptr;
  499. gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0, tot_size_next;
  500. gc_size_t obj_size, obj_size_old;
  501. gc_uint8 *base_addr, *end_addr;
  502. hmu_type_t ut;
  503. /* hmu header + prefix + obj + suffix */
  504. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  505. /* aligned size*/
  506. tot_size = GC_ALIGN_8(tot_size_unaligned);
  507. if (tot_size < size)
  508. /* integer overflow */
  509. return NULL;
  510. if (heap->is_heap_corrupted) {
  511. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  512. return NULL;
  513. }
  514. if (obj_old) {
  515. hmu_old = obj_to_hmu(obj_old);
  516. tot_size_old = hmu_get_size(hmu_old);
  517. if (tot_size <= tot_size_old)
  518. /* current node alreay meets requirement */
  519. return obj_old;
  520. }
  521. base_addr = heap->base_addr;
  522. end_addr = base_addr + heap->current_size;
  523. LOCK_HEAP(heap);
  524. if (hmu_old) {
  525. hmu_next = (hmu_t *)((char *)hmu_old + tot_size_old);
  526. if (hmu_is_in_heap(hmu_next, base_addr, end_addr)) {
  527. ut = hmu_get_ut(hmu_next);
  528. tot_size_next = hmu_get_size(hmu_next);
  529. if (ut == HMU_FC && tot_size <= tot_size_old + tot_size_next) {
  530. /* current node and next node meets requirement */
  531. if (!unlink_hmu(heap, hmu_next)) {
  532. UNLOCK_HEAP(heap);
  533. return NULL;
  534. }
  535. hmu_set_size(hmu_old, tot_size);
  536. memset((char *)hmu_old + tot_size_old, 0,
  537. tot_size - tot_size_old);
  538. #if BH_ENABLE_GC_VERIFY != 0
  539. hmu_init_prefix_and_suffix(hmu_old, tot_size, file, line);
  540. #endif
  541. if (tot_size < tot_size_old + tot_size_next) {
  542. hmu_next = (hmu_t *)((char *)hmu_old + tot_size);
  543. tot_size_next = tot_size_old + tot_size_next - tot_size;
  544. if (!gci_add_fc(heap, hmu_next, tot_size_next)) {
  545. UNLOCK_HEAP(heap);
  546. return NULL;
  547. }
  548. hmu_mark_pinuse(hmu_next);
  549. }
  550. UNLOCK_HEAP(heap);
  551. return obj_old;
  552. }
  553. }
  554. }
  555. hmu = alloc_hmu_ex(heap, tot_size);
  556. if (!hmu)
  557. goto finish;
  558. bh_assert(hmu_get_size(hmu) >= tot_size);
  559. /* the total size allocated may be larger than
  560. the required size, reset it here */
  561. tot_size = hmu_get_size(hmu);
  562. #if GC_STAT_DATA != 0
  563. heap->total_size_allocated += tot_size;
  564. #endif
  565. hmu_set_ut(hmu, HMU_VO);
  566. hmu_unfree_vo(hmu);
  567. #if BH_ENABLE_GC_VERIFY != 0
  568. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  569. #endif
  570. ret = hmu_to_obj(hmu);
  571. finish:
  572. if (ret) {
  573. obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  574. memset(ret, 0, obj_size);
  575. if (obj_old) {
  576. obj_size_old =
  577. tot_size_old - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  578. bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
  579. }
  580. }
  581. UNLOCK_HEAP(heap);
  582. if (ret && obj_old)
  583. gc_free_vo(vheap, obj_old);
  584. return ret;
  585. }
  586. #if GC_MANUALLY != 0
  587. void
  588. gc_free_wo(void *vheap, void *ptr)
  589. {
  590. gc_heap_t *heap = (gc_heap_t *)vheap;
  591. gc_object_t *obj = (gc_object_t *)ptr;
  592. hmu_t *hmu = obj_to_hmu(obj);
  593. bh_assert(gci_is_heap_valid(heap));
  594. bh_assert(obj);
  595. bh_assert((gc_uint8 *)hmu >= heap->base_addr
  596. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  597. bh_assert(hmu_get_ut(hmu) == HMU_WO);
  598. hmu_unmark_wo(hmu);
  599. (void)heap;
  600. }
  601. #endif
  602. /* see ems_gc.h for description*/
  603. #if BH_ENABLE_GC_VERIFY == 0
  604. gc_object_t
  605. gc_alloc_wo(void *vheap, gc_size_t size)
  606. #else
  607. gc_object_t
  608. gc_alloc_wo_internal(void *vheap, gc_size_t size, const char *file, int line)
  609. #endif
  610. {
  611. gc_heap_t *heap = (gc_heap_t *)vheap;
  612. hmu_t *hmu = NULL;
  613. gc_object_t ret = (gc_object_t)NULL;
  614. gc_size_t tot_size = 0, tot_size_unaligned;
  615. /* hmu header + prefix + obj + suffix */
  616. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  617. /* aligned size*/
  618. tot_size = GC_ALIGN_8(tot_size_unaligned);
  619. if (tot_size < size)
  620. /* integer overflow */
  621. return NULL;
  622. if (heap->is_heap_corrupted) {
  623. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  624. return NULL;
  625. }
  626. LOCK_HEAP(heap);
  627. hmu = alloc_hmu_ex(heap, tot_size);
  628. if (!hmu)
  629. goto finish;
  630. /* Do we need to memset the memory to 0? */
  631. /* memset((char *)hmu + sizeof(*hmu), 0, tot_size - sizeof(*hmu)); */
  632. bh_assert(hmu_get_size(hmu) >= tot_size);
  633. /* the total size allocated may be larger than
  634. the required size, reset it here */
  635. tot_size = hmu_get_size(hmu);
  636. #if GC_STAT_DATA != 0
  637. heap->total_size_allocated += tot_size;
  638. #endif
  639. hmu_set_ut(hmu, HMU_WO);
  640. #if GC_MANUALLY != 0
  641. hmu_mark_wo(hmu);
  642. #else
  643. hmu_unmark_wo(hmu);
  644. #endif
  645. #if BH_ENABLE_GC_VERIFY != 0
  646. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  647. #endif
  648. ret = hmu_to_obj(hmu);
  649. if (tot_size > tot_size_unaligned)
  650. /* clear buffer appended by GC_ALIGN_8() */
  651. memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
  652. finish:
  653. UNLOCK_HEAP(heap);
  654. return ret;
  655. }
  656. /**
  657. * Do some checking to see if given pointer is a possible valid heap
  658. * @return GC_TRUE if all checking passed, GC_FALSE otherwise
  659. */
  660. int
  661. gci_is_heap_valid(gc_heap_t *heap)
  662. {
  663. if (!heap)
  664. return GC_FALSE;
  665. if (heap->heap_id != (gc_handle_t)heap)
  666. return GC_FALSE;
  667. return GC_TRUE;
  668. }
  669. #if BH_ENABLE_GC_VERIFY == 0
  670. int
  671. gc_free_vo(void *vheap, gc_object_t obj)
  672. #else
  673. int
  674. gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
  675. #endif
  676. {
  677. gc_heap_t *heap = (gc_heap_t *)vheap;
  678. gc_uint8 *base_addr, *end_addr;
  679. hmu_t *hmu = NULL;
  680. hmu_t *prev = NULL;
  681. hmu_t *next = NULL;
  682. gc_size_t size = 0;
  683. hmu_type_t ut;
  684. int ret = GC_SUCCESS;
  685. if (!obj) {
  686. return GC_SUCCESS;
  687. }
  688. if (heap->is_heap_corrupted) {
  689. os_printf("[GC_ERROR]Heap is corrupted, free memory failed.\n");
  690. return GC_ERROR;
  691. }
  692. hmu = obj_to_hmu(obj);
  693. base_addr = heap->base_addr;
  694. end_addr = base_addr + heap->current_size;
  695. LOCK_HEAP(heap);
  696. if (hmu_is_in_heap(hmu, base_addr, end_addr)) {
  697. #if BH_ENABLE_GC_VERIFY != 0
  698. hmu_verify(heap, hmu);
  699. #endif
  700. ut = hmu_get_ut(hmu);
  701. if (ut == HMU_VO) {
  702. if (hmu_is_vo_freed(hmu)) {
  703. bh_assert(0);
  704. ret = GC_ERROR;
  705. goto out;
  706. }
  707. size = hmu_get_size(hmu);
  708. heap->total_free_size += size;
  709. #if GC_STAT_DATA != 0
  710. heap->total_size_freed += size;
  711. #endif
  712. if (!hmu_get_pinuse(hmu)) {
  713. prev = (hmu_t *)((char *)hmu - *((int *)hmu - 1));
  714. if (hmu_is_in_heap(prev, base_addr, end_addr)
  715. && hmu_get_ut(prev) == HMU_FC) {
  716. size += hmu_get_size(prev);
  717. hmu = prev;
  718. if (!unlink_hmu(heap, prev)) {
  719. ret = GC_ERROR;
  720. goto out;
  721. }
  722. }
  723. }
  724. next = (hmu_t *)((char *)hmu + size);
  725. if (hmu_is_in_heap(next, base_addr, end_addr)) {
  726. if (hmu_get_ut(next) == HMU_FC) {
  727. size += hmu_get_size(next);
  728. if (!unlink_hmu(heap, next)) {
  729. ret = GC_ERROR;
  730. goto out;
  731. }
  732. next = (hmu_t *)((char *)hmu + size);
  733. }
  734. }
  735. if (!gci_add_fc(heap, hmu, size)) {
  736. ret = GC_ERROR;
  737. goto out;
  738. }
  739. if (hmu_is_in_heap(next, base_addr, end_addr)) {
  740. hmu_unmark_pinuse(next);
  741. }
  742. }
  743. else {
  744. ret = GC_ERROR;
  745. goto out;
  746. }
  747. ret = GC_SUCCESS;
  748. goto out;
  749. }
  750. out:
  751. UNLOCK_HEAP(heap);
  752. return ret;
  753. }
  754. void
  755. gc_dump_heap_stats(gc_heap_t *heap)
  756. {
  757. os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
  758. os_printf("total free: %" PRIu32 ", current: %" PRIu32
  759. ", highmark: %" PRIu32 "\n",
  760. heap->total_free_size, heap->current_size, heap->highmark_size);
  761. #if GC_STAT_DATA != 0
  762. os_printf("total size allocated: %" PRIu64 ", total size freed: %" PRIu64
  763. ", total occupied: %" PRIu64 "\n",
  764. heap->total_size_allocated, heap->total_size_freed,
  765. heap->total_size_allocated - heap->total_size_freed);
  766. #endif
  767. }
  768. uint32
  769. gc_get_heap_highmark_size(gc_heap_t *heap)
  770. {
  771. return heap->highmark_size;
  772. }
  773. void
  774. gci_dump(gc_heap_t *heap)
  775. {
  776. hmu_t *cur = NULL, *end = NULL;
  777. hmu_type_t ut;
  778. gc_size_t size;
  779. int i = 0, p, mark;
  780. char inuse = 'U';
  781. cur = (hmu_t *)heap->base_addr;
  782. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  783. while (cur < end) {
  784. ut = hmu_get_ut(cur);
  785. size = hmu_get_size(cur);
  786. p = hmu_get_pinuse(cur);
  787. mark = hmu_is_wo_marked(cur);
  788. if (ut == HMU_VO)
  789. inuse = 'V';
  790. else if (ut == HMU_WO)
  791. inuse = hmu_is_wo_marked(cur) ? 'W' : 'w';
  792. else if (ut == HMU_FC)
  793. inuse = 'F';
  794. if (size == 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
  795. os_printf("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
  796. heap->is_heap_corrupted = true;
  797. return;
  798. }
  799. os_printf("#%d %08" PRIx32 " %" PRIx32 " %d %d"
  800. " %c %" PRId32 "\n",
  801. i, (int32)((char *)cur - (char *)heap->base_addr), (int32)ut,
  802. p, mark, inuse, (int32)hmu_obj_size(size));
  803. #if BH_ENABLE_GC_VERIFY != 0
  804. if (inuse == 'V') {
  805. gc_object_prefix_t *prefix = (gc_object_prefix_t *)(cur + 1);
  806. os_printf("#%s:%d\n", prefix->file_name, prefix->line_no);
  807. }
  808. #endif
  809. cur = (hmu_t *)((char *)cur + size);
  810. i++;
  811. }
  812. if (cur != end) {
  813. os_printf("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
  814. heap->is_heap_corrupted = true;
  815. }
  816. }
  817. #if WASM_ENABLE_GC != 0
  818. extra_info_node_t *
  819. gc_search_extra_info_node(gc_handle_t handle, gc_object_t obj,
  820. gc_size_t *p_index)
  821. {
  822. gc_heap_t *vheap = (gc_heap_t *)handle;
  823. int32 low = 0, high = vheap->extra_info_node_cnt - 1;
  824. int32 mid;
  825. extra_info_node_t *node;
  826. if (!vheap->extra_info_nodes)
  827. return NULL;
  828. while (low <= high) {
  829. mid = (low + high) / 2;
  830. node = vheap->extra_info_nodes[mid];
  831. if (obj == node->obj) {
  832. if (p_index) {
  833. *p_index = mid;
  834. }
  835. return node;
  836. }
  837. else if (obj < node->obj) {
  838. high = mid - 1;
  839. }
  840. else {
  841. low = mid + 1;
  842. }
  843. }
  844. if (p_index) {
  845. *p_index = low;
  846. }
  847. return NULL;
  848. }
  849. static bool
  850. insert_extra_info_node(gc_heap_t *vheap, extra_info_node_t *node)
  851. {
  852. gc_size_t index;
  853. extra_info_node_t *orig_node;
  854. if (!vheap->extra_info_nodes) {
  855. vheap->extra_info_nodes = vheap->extra_info_normal_nodes;
  856. vheap->extra_info_node_capacity = sizeof(vheap->extra_info_normal_nodes)
  857. / sizeof(extra_info_node_t *);
  858. vheap->extra_info_nodes[0] = node;
  859. vheap->extra_info_node_cnt = 1;
  860. return true;
  861. }
  862. /* extend array */
  863. if (vheap->extra_info_node_cnt == vheap->extra_info_node_capacity) {
  864. extra_info_node_t **new_nodes = NULL;
  865. gc_size_t new_capacity = vheap->extra_info_node_capacity * 3 / 2;
  866. gc_size_t total_size = sizeof(extra_info_node_t *) * new_capacity;
  867. new_nodes = (extra_info_node_t **)BH_MALLOC(total_size);
  868. if (!new_nodes) {
  869. LOG_ERROR("alloc extra info nodes failed");
  870. return false;
  871. }
  872. bh_memcpy_s(new_nodes, total_size, vheap->extra_info_nodes,
  873. sizeof(extra_info_node_t *) * vheap->extra_info_node_cnt);
  874. if (vheap->extra_info_nodes != vheap->extra_info_normal_nodes) {
  875. BH_FREE(vheap->extra_info_nodes);
  876. }
  877. vheap->extra_info_nodes = new_nodes;
  878. vheap->extra_info_node_capacity = new_capacity;
  879. }
  880. orig_node = gc_search_extra_info_node(vheap, node->obj, &index);
  881. if (orig_node) {
  882. /* replace the old node */
  883. vheap->extra_info_nodes[index] = node;
  884. BH_FREE(orig_node);
  885. }
  886. else {
  887. bh_memmove_s(vheap->extra_info_nodes + index + 1,
  888. (vheap->extra_info_node_capacity - index - 1)
  889. * sizeof(extra_info_node_t *),
  890. vheap->extra_info_nodes + index,
  891. (vheap->extra_info_node_cnt - index)
  892. * sizeof(extra_info_node_t *));
  893. vheap->extra_info_nodes[index] = node;
  894. vheap->extra_info_node_cnt += 1;
  895. }
  896. return true;
  897. }
  898. bool
  899. gc_set_finalizer(gc_handle_t handle, gc_object_t obj, gc_finalizer_t cb,
  900. void *data)
  901. {
  902. extra_info_node_t *node = NULL;
  903. gc_heap_t *vheap = (gc_heap_t *)handle;
  904. node = (extra_info_node_t *)BH_MALLOC(sizeof(extra_info_node_t));
  905. if (!node) {
  906. LOG_ERROR("alloc a new extra info node failed");
  907. return GC_FALSE;
  908. }
  909. memset(node, 0, sizeof(extra_info_node_t));
  910. node->finalizer = cb;
  911. node->obj = obj;
  912. node->data = data;
  913. LOCK_HEAP(vheap);
  914. if (!insert_extra_info_node(vheap, node)) {
  915. BH_FREE(node);
  916. UNLOCK_HEAP(vheap);
  917. return GC_FALSE;
  918. }
  919. UNLOCK_HEAP(vheap);
  920. gct_vm_set_extra_info_flag(obj, true);
  921. return GC_TRUE;
  922. }
  923. void
  924. gc_unset_finalizer(gc_handle_t handle, gc_object_t obj)
  925. {
  926. gc_size_t index;
  927. gc_heap_t *vheap = (gc_heap_t *)handle;
  928. extra_info_node_t *node;
  929. LOCK_HEAP(vheap);
  930. node = gc_search_extra_info_node(vheap, obj, &index);
  931. if (!node) {
  932. UNLOCK_HEAP(vheap);
  933. return;
  934. }
  935. BH_FREE(node);
  936. bh_memmove_s(
  937. vheap->extra_info_nodes + index,
  938. (vheap->extra_info_node_capacity - index) * sizeof(extra_info_node_t *),
  939. vheap->extra_info_nodes + index + 1,
  940. (vheap->extra_info_node_cnt - index - 1) * sizeof(extra_info_node_t *));
  941. vheap->extra_info_node_cnt -= 1;
  942. UNLOCK_HEAP(vheap);
  943. gct_vm_set_extra_info_flag(obj, false);
  944. }
  945. #endif