ems_alloc.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. #if WASM_ENABLE_GC != 0
  7. #define LOCK_HEAP(heap) \
  8. do { \
  9. if (!heap->is_doing_reclaim) \
  10. /* If the heap is doing reclaim, it must have been locked, \
  11. we should not lock the heap again. */ \
  12. os_mutex_lock(&heap->lock); \
  13. } while (0)
  14. #define UNLOCK_HEAP(heap) \
  15. do { \
  16. if (!heap->is_doing_reclaim) \
  17. /* If the heap is doing reclaim, it must have been locked, \
  18. and will be unlocked after reclaim, we should not \
  19. unlock the heap again. */ \
  20. os_mutex_unlock(&heap->lock); \
  21. } while (0)
  22. #else
  23. #define LOCK_HEAP(heap) os_mutex_lock(&heap->lock)
  24. #define UNLOCK_HEAP(heap) os_mutex_unlock(&heap->lock)
  25. #endif
  26. static inline bool
  27. hmu_is_in_heap(void *hmu, gc_uint8 *heap_base_addr, gc_uint8 *heap_end_addr)
  28. {
  29. gc_uint8 *addr = (gc_uint8 *)hmu;
  30. return (addr >= heap_base_addr && addr < heap_end_addr) ? true : false;
  31. }
  32. /**
  33. * Remove a node from the tree it belongs to
  34. *
  35. * @param p the node to remove, can not be NULL, can not be the ROOT node
  36. * the node will be removed from the tree, and the left, right and
  37. * parent pointers of the node @p will be set to be NULL. Other fields
  38. * won't be touched. The tree will be re-organized so that the order
  39. * conditions are still satisified.
  40. */
  41. static bool
  42. remove_tree_node(gc_heap_t *heap, hmu_tree_node_t *p)
  43. {
  44. hmu_tree_node_t *q = NULL, **slot = NULL, *parent;
  45. hmu_tree_node_t *root = &heap->kfc_tree_root;
  46. gc_uint8 *base_addr = heap->base_addr;
  47. gc_uint8 *end_addr = base_addr + heap->current_size;
  48. bh_assert(p);
  49. parent = p->parent;
  50. if (!parent || p == root /* p can not be the ROOT node */
  51. || !hmu_is_in_heap(p, base_addr, end_addr)
  52. || (parent != root && !hmu_is_in_heap(parent, base_addr, end_addr))) {
  53. goto fail;
  54. }
  55. /* get the slot which holds pointer to node p*/
  56. if (p == p->parent->right) {
  57. slot = &p->parent->right;
  58. }
  59. else if (p == p->parent->left) {
  60. /* p should be a child of its parent*/
  61. slot = &p->parent->left;
  62. }
  63. else {
  64. goto fail;
  65. }
  66. /**
  67. * algorithms used to remove node p
  68. * case 1: if p has no left child, replace p with its right child
  69. * case 2: if p has no right child, replace p with its left child
  70. * case 3: otherwise, find p's predecessor, remove it from the tree
  71. * and replace p with it.
  72. * use predecessor can keep the left <= root < right condition.
  73. */
  74. if (!p->left) {
  75. /* move right child up*/
  76. *slot = p->right;
  77. if (p->right) {
  78. if (!hmu_is_in_heap(p->right, base_addr, end_addr)) {
  79. goto fail;
  80. }
  81. p->right->parent = p->parent;
  82. }
  83. p->left = p->right = p->parent = NULL;
  84. return true;
  85. }
  86. if (!p->right) {
  87. /* move left child up*/
  88. *slot = p->left;
  89. if (!hmu_is_in_heap(p->left, base_addr, end_addr)) {
  90. goto fail;
  91. }
  92. /* p->left can never be NULL unless it is corrupted. */
  93. p->left->parent = p->parent;
  94. p->left = p->right = p->parent = NULL;
  95. return true;
  96. }
  97. /* both left & right exist, find p's predecessor at first*/
  98. q = p->left;
  99. if (!hmu_is_in_heap(q, base_addr, end_addr)) {
  100. goto fail;
  101. }
  102. while (q->right) {
  103. q = q->right;
  104. if (!hmu_is_in_heap(q, base_addr, end_addr)) {
  105. goto fail;
  106. }
  107. }
  108. /* remove from the tree*/
  109. if (!remove_tree_node(heap, q))
  110. return false;
  111. *slot = q;
  112. q->parent = p->parent;
  113. q->left = p->left;
  114. q->right = p->right;
  115. if (q->left) {
  116. if (!hmu_is_in_heap(q->left, base_addr, end_addr)) {
  117. goto fail;
  118. }
  119. q->left->parent = q;
  120. }
  121. if (q->right) {
  122. if (!hmu_is_in_heap(q->right, base_addr, end_addr)) {
  123. goto fail;
  124. }
  125. q->right->parent = q;
  126. }
  127. p->left = p->right = p->parent = NULL;
  128. return true;
  129. fail:
  130. heap->is_heap_corrupted = true;
  131. return false;
  132. }
  133. static bool
  134. unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
  135. {
  136. gc_uint8 *base_addr, *end_addr;
  137. gc_size_t size;
  138. bh_assert(gci_is_heap_valid(heap));
  139. bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
  140. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  141. if (hmu_get_ut(hmu) != HMU_FC) {
  142. heap->is_heap_corrupted = true;
  143. return false;
  144. }
  145. base_addr = heap->base_addr;
  146. end_addr = base_addr + heap->current_size;
  147. size = hmu_get_size(hmu);
  148. if (HMU_IS_FC_NORMAL(size)) {
  149. uint32 node_idx = size >> 3;
  150. hmu_normal_node_t *node_prev = NULL, *node_next;
  151. hmu_normal_node_t *node = heap->kfc_normal_list[node_idx].next;
  152. while (node) {
  153. if (!hmu_is_in_heap(node, base_addr, end_addr)) {
  154. heap->is_heap_corrupted = true;
  155. return false;
  156. }
  157. node_next = get_hmu_normal_node_next(node);
  158. if ((hmu_t *)node == hmu) {
  159. if (!node_prev) /* list head */
  160. heap->kfc_normal_list[node_idx].next = node_next;
  161. else
  162. set_hmu_normal_node_next(node_prev, node_next);
  163. break;
  164. }
  165. node_prev = node;
  166. node = node_next;
  167. }
  168. if (!node) {
  169. os_printf("[GC_ERROR]couldn't find the node in the normal list\n");
  170. }
  171. }
  172. else {
  173. if (!remove_tree_node(heap, (hmu_tree_node_t *)hmu))
  174. return false;
  175. }
  176. return true;
  177. }
  178. static void
  179. hmu_set_free_size(hmu_t *hmu)
  180. {
  181. gc_size_t size;
  182. bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
  183. size = hmu_get_size(hmu);
  184. *((uint32 *)((char *)hmu + size) - 1) = size;
  185. }
  186. /**
  187. * Add free chunk back to KFC
  188. *
  189. * @param heap should not be NULL and it should be a valid heap
  190. * @param hmu should not be NULL and it should be a HMU of length @size inside
  191. * @heap hmu should be 8-bytes aligned
  192. * @param size should be positive and multiple of 8
  193. * hmu with size @size will be added into KFC as a new FC.
  194. */
  195. bool
  196. gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
  197. {
  198. gc_uint8 *base_addr, *end_addr;
  199. hmu_normal_node_t *np = NULL;
  200. hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
  201. uint32 node_idx;
  202. bh_assert(gci_is_heap_valid(heap));
  203. bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
  204. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  205. bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
  206. bh_assert(size > 0
  207. && ((gc_uint8 *)hmu) + size
  208. <= heap->base_addr + heap->current_size);
  209. bh_assert(!(size & 7));
  210. base_addr = heap->base_addr;
  211. end_addr = base_addr + heap->current_size;
  212. hmu_set_ut(hmu, HMU_FC);
  213. hmu_set_size(hmu, size);
  214. hmu_set_free_size(hmu);
  215. if (HMU_IS_FC_NORMAL(size)) {
  216. np = (hmu_normal_node_t *)hmu;
  217. if (!hmu_is_in_heap(np, base_addr, end_addr)) {
  218. heap->is_heap_corrupted = true;
  219. return false;
  220. }
  221. node_idx = size >> 3;
  222. set_hmu_normal_node_next(np, heap->kfc_normal_list[node_idx].next);
  223. heap->kfc_normal_list[node_idx].next = np;
  224. return true;
  225. }
  226. /* big block */
  227. node = (hmu_tree_node_t *)hmu;
  228. node->size = size;
  229. node->left = node->right = node->parent = NULL;
  230. /* find proper node to link this new node to */
  231. root = &heap->kfc_tree_root;
  232. tp = root;
  233. bh_assert(tp->size < size);
  234. while (1) {
  235. if (tp->size < size) {
  236. if (!tp->right) {
  237. tp->right = node;
  238. node->parent = tp;
  239. break;
  240. }
  241. tp = tp->right;
  242. }
  243. else { /* tp->size >= size */
  244. if (!tp->left) {
  245. tp->left = node;
  246. node->parent = tp;
  247. break;
  248. }
  249. tp = tp->left;
  250. }
  251. if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
  252. heap->is_heap_corrupted = true;
  253. return false;
  254. }
  255. }
  256. return true;
  257. }
  258. /**
  259. * Find a proper hmu for required memory size
  260. *
  261. * @param heap should not be NULL and should be a valid heap
  262. * @param size should cover the header and should be 8 bytes aligned
  263. * GC will not be performed here.
  264. * Heap extension will not be performed here.
  265. *
  266. * @return hmu allocated if success, which will be aligned to 8 bytes,
  267. * NULL otherwise
  268. */
  269. static hmu_t *
  270. alloc_hmu(gc_heap_t *heap, gc_size_t size)
  271. {
  272. gc_uint8 *base_addr, *end_addr;
  273. hmu_normal_list_t *normal_head = NULL;
  274. hmu_normal_node_t *p = NULL;
  275. uint32 node_idx = 0, init_node_idx = 0;
  276. hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
  277. hmu_t *next, *rest;
  278. bh_assert(gci_is_heap_valid(heap));
  279. bh_assert(size > 0 && !(size & 7));
  280. #if WASM_ENABLE_GC != 0
  281. /* In doing reclaim, gc must not alloc memory again. */
  282. bh_assert(!heap->is_doing_reclaim);
  283. #endif
  284. base_addr = heap->base_addr;
  285. end_addr = base_addr + heap->current_size;
  286. if (size < GC_SMALLEST_SIZE)
  287. size = GC_SMALLEST_SIZE;
  288. /* check normal list at first*/
  289. if (HMU_IS_FC_NORMAL(size)) {
  290. /* find a non-empty slot in normal_node_list with good size*/
  291. init_node_idx = (size >> 3);
  292. for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
  293. node_idx++) {
  294. normal_head = heap->kfc_normal_list + node_idx;
  295. if (normal_head->next)
  296. break;
  297. normal_head = NULL;
  298. }
  299. /* found in normal list*/
  300. if (normal_head) {
  301. bh_assert(node_idx >= init_node_idx);
  302. p = normal_head->next;
  303. if (!hmu_is_in_heap(p, base_addr, end_addr)) {
  304. heap->is_heap_corrupted = true;
  305. return NULL;
  306. }
  307. normal_head->next = get_hmu_normal_node_next(p);
  308. if (((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) != 0) {
  309. heap->is_heap_corrupted = true;
  310. return NULL;
  311. }
  312. if ((gc_size_t)node_idx != (uint32)init_node_idx
  313. /* with bigger size*/
  314. && ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) {
  315. rest = (hmu_t *)(((char *)p) + size);
  316. if (!gci_add_fc(heap, rest, (node_idx << 3) - size)) {
  317. return NULL;
  318. }
  319. hmu_mark_pinuse(rest);
  320. }
  321. else {
  322. size = node_idx << 3;
  323. next = (hmu_t *)((char *)p + size);
  324. if (hmu_is_in_heap(next, base_addr, end_addr))
  325. hmu_mark_pinuse(next);
  326. }
  327. heap->total_free_size -= size;
  328. if ((heap->current_size - heap->total_free_size)
  329. > heap->highmark_size)
  330. heap->highmark_size =
  331. heap->current_size - heap->total_free_size;
  332. hmu_set_size((hmu_t *)p, size);
  333. return (hmu_t *)p;
  334. }
  335. }
  336. /* need to find a node in tree*/
  337. root = &heap->kfc_tree_root;
  338. /* find the best node*/
  339. bh_assert(root);
  340. tp = root->right;
  341. while (tp) {
  342. if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
  343. heap->is_heap_corrupted = true;
  344. return NULL;
  345. }
  346. if (tp->size < size) {
  347. tp = tp->right;
  348. continue;
  349. }
  350. /* record the last node with size equal to or bigger than given size*/
  351. last_tp = tp;
  352. tp = tp->left;
  353. }
  354. if (last_tp) {
  355. bh_assert(last_tp->size >= size);
  356. /* alloc in last_p*/
  357. /* remove node last_p from tree*/
  358. if (!remove_tree_node(heap, last_tp))
  359. return NULL;
  360. if (last_tp->size >= size + GC_SMALLEST_SIZE) {
  361. rest = (hmu_t *)((char *)last_tp + size);
  362. if (!gci_add_fc(heap, rest, last_tp->size - size))
  363. return NULL;
  364. hmu_mark_pinuse(rest);
  365. }
  366. else {
  367. size = last_tp->size;
  368. next = (hmu_t *)((char *)last_tp + size);
  369. if (hmu_is_in_heap(next, base_addr, end_addr))
  370. hmu_mark_pinuse(next);
  371. }
  372. heap->total_free_size -= size;
  373. if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
  374. heap->highmark_size = heap->current_size - heap->total_free_size;
  375. hmu_set_size((hmu_t *)last_tp, size);
  376. return (hmu_t *)last_tp;
  377. }
  378. return NULL;
  379. }
  380. #if WASM_ENABLE_GC != 0
  381. static int
  382. do_gc_heap(gc_heap_t *heap)
  383. {
  384. int ret = GC_SUCCESS;
  385. if (heap->is_reclaim_enabled) {
  386. UNLOCK_HEAP(heap);
  387. ret = gci_gc_heap(heap);
  388. LOCK_HEAP(heap);
  389. }
  390. return ret;
  391. }
  392. #endif
  393. /**
  394. * Find a proper HMU with given size
  395. *
  396. * @param heap should not be NULL and should be a valid heap
  397. * @param size should cover the header and should be 8 bytes aligned
  398. *
  399. * Note: This function will try several ways to satisfy the allocation request:
  400. * 1. Find a proper on available HMUs.
  401. * 2. GC will be triggered if 1 failed.
  402. * 3. Find a proper on available HMUS.
  403. * 4. Return NULL if 3 failed
  404. *
  405. * @return hmu allocated if success, which will be aligned to 8 bytes,
  406. * NULL otherwise
  407. */
  408. static hmu_t *
  409. alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
  410. {
  411. bh_assert(gci_is_heap_valid(heap));
  412. bh_assert(size > 0 && !(size & 7));
  413. #if WASM_ENABLE_GC != 0
  414. #if GC_IN_EVERY_ALLOCATION != 0
  415. if (GC_SUCCESS != do_gc_heap(heap))
  416. return NULL;
  417. return alloc_hmu(heap, size);
  418. #else
  419. if (heap->total_free_size < heap->gc_threshold) {
  420. if (GC_SUCCESS != do_gc_heap(heap))
  421. return NULL;
  422. }
  423. else {
  424. hmu_t *ret = NULL;
  425. if ((ret = alloc_hmu(heap, size))) {
  426. return ret;
  427. }
  428. if (GC_SUCCESS != do_gc_heap(heap))
  429. return NULL;
  430. }
  431. #endif
  432. #endif
  433. return alloc_hmu(heap, size);
  434. }
  435. #if BH_ENABLE_GC_VERIFY == 0
  436. gc_object_t
  437. gc_alloc_vo(void *vheap, gc_size_t size)
  438. #else
  439. gc_object_t
  440. gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
  441. #endif
  442. {
  443. gc_heap_t *heap = (gc_heap_t *)vheap;
  444. hmu_t *hmu = NULL;
  445. gc_object_t ret = (gc_object_t)NULL;
  446. gc_size_t tot_size = 0, tot_size_unaligned;
  447. /* hmu header + prefix + obj + suffix */
  448. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  449. /* aligned size*/
  450. tot_size = GC_ALIGN_8(tot_size_unaligned);
  451. if (tot_size < size)
  452. /* integer overflow */
  453. return NULL;
  454. if (heap->is_heap_corrupted) {
  455. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  456. return NULL;
  457. }
  458. LOCK_HEAP(heap);
  459. hmu = alloc_hmu_ex(heap, tot_size);
  460. if (!hmu)
  461. goto finish;
  462. bh_assert(hmu_get_size(hmu) >= tot_size);
  463. /* the total size allocated may be larger than
  464. the required size, reset it here */
  465. tot_size = hmu_get_size(hmu);
  466. #if GC_STAT_DATA != 0
  467. heap->total_size_allocated += tot_size;
  468. #endif
  469. hmu_set_ut(hmu, HMU_VO);
  470. hmu_unfree_vo(hmu);
  471. #if BH_ENABLE_GC_VERIFY != 0
  472. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  473. #endif
  474. ret = hmu_to_obj(hmu);
  475. if (tot_size > tot_size_unaligned)
  476. /* clear buffer appended by GC_ALIGN_8() */
  477. memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
  478. finish:
  479. UNLOCK_HEAP(heap);
  480. return ret;
  481. }
  482. #if BH_ENABLE_GC_VERIFY == 0
  483. gc_object_t
  484. gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
  485. #else
  486. gc_object_t
  487. gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
  488. int line)
  489. #endif
  490. {
  491. gc_heap_t *heap = (gc_heap_t *)vheap;
  492. hmu_t *hmu = NULL, *hmu_old = NULL, *hmu_next;
  493. gc_object_t ret = (gc_object_t)NULL, obj_old = (gc_object_t)ptr;
  494. gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0, tot_size_next;
  495. gc_size_t obj_size, obj_size_old;
  496. gc_uint8 *base_addr, *end_addr;
  497. hmu_type_t ut;
  498. /* hmu header + prefix + obj + suffix */
  499. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  500. /* aligned size*/
  501. tot_size = GC_ALIGN_8(tot_size_unaligned);
  502. if (tot_size < size)
  503. /* integer overflow */
  504. return NULL;
  505. if (heap->is_heap_corrupted) {
  506. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  507. return NULL;
  508. }
  509. if (obj_old) {
  510. hmu_old = obj_to_hmu(obj_old);
  511. tot_size_old = hmu_get_size(hmu_old);
  512. if (tot_size <= tot_size_old)
  513. /* current node alreay meets requirement */
  514. return obj_old;
  515. }
  516. base_addr = heap->base_addr;
  517. end_addr = base_addr + heap->current_size;
  518. LOCK_HEAP(heap);
  519. if (hmu_old) {
  520. hmu_next = (hmu_t *)((char *)hmu_old + tot_size_old);
  521. if (hmu_is_in_heap(hmu_next, base_addr, end_addr)) {
  522. ut = hmu_get_ut(hmu_next);
  523. tot_size_next = hmu_get_size(hmu_next);
  524. if (ut == HMU_FC && tot_size <= tot_size_old + tot_size_next) {
  525. /* current node and next node meets requirement */
  526. if (!unlink_hmu(heap, hmu_next)) {
  527. UNLOCK_HEAP(heap);
  528. return NULL;
  529. }
  530. hmu_set_size(hmu_old, tot_size);
  531. memset((char *)hmu_old + tot_size_old, 0,
  532. tot_size - tot_size_old);
  533. #if BH_ENABLE_GC_VERIFY != 0
  534. hmu_init_prefix_and_suffix(hmu_old, tot_size, file, line);
  535. #endif
  536. if (tot_size < tot_size_old + tot_size_next) {
  537. hmu_next = (hmu_t *)((char *)hmu_old + tot_size);
  538. tot_size_next = tot_size_old + tot_size_next - tot_size;
  539. if (!gci_add_fc(heap, hmu_next, tot_size_next)) {
  540. UNLOCK_HEAP(heap);
  541. return NULL;
  542. }
  543. }
  544. UNLOCK_HEAP(heap);
  545. return obj_old;
  546. }
  547. }
  548. }
  549. hmu = alloc_hmu_ex(heap, tot_size);
  550. if (!hmu)
  551. goto finish;
  552. bh_assert(hmu_get_size(hmu) >= tot_size);
  553. /* the total size allocated may be larger than
  554. the required size, reset it here */
  555. tot_size = hmu_get_size(hmu);
  556. #if GC_STAT_DATA != 0
  557. heap->total_size_allocated += tot_size;
  558. #endif
  559. hmu_set_ut(hmu, HMU_VO);
  560. hmu_unfree_vo(hmu);
  561. #if BH_ENABLE_GC_VERIFY != 0
  562. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  563. #endif
  564. ret = hmu_to_obj(hmu);
  565. finish:
  566. if (ret) {
  567. obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  568. memset(ret, 0, obj_size);
  569. if (obj_old) {
  570. obj_size_old =
  571. tot_size_old - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  572. bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
  573. }
  574. }
  575. UNLOCK_HEAP(heap);
  576. if (ret && obj_old)
  577. gc_free_vo(vheap, obj_old);
  578. return ret;
  579. }
  580. #if GC_MANUALLY != 0
  581. void
  582. gc_free_wo(void *vheap, void *ptr)
  583. {
  584. gc_heap_t *heap = (gc_heap_t *)vheap;
  585. gc_object_t *obj = (gc_object_t *)ptr;
  586. hmu_t *hmu = obj_to_hmu(obj);
  587. bh_assert(gci_is_heap_valid(heap));
  588. bh_assert(obj);
  589. bh_assert((gc_uint8 *)hmu >= heap->base_addr
  590. && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
  591. bh_assert(hmu_get_ut(hmu) == HMU_WO);
  592. hmu_unmark_wo(hmu);
  593. (void)heap;
  594. }
  595. #endif
  596. /* see ems_gc.h for description*/
  597. #if BH_ENABLE_GC_VERIFY == 0
  598. gc_object_t
  599. gc_alloc_wo(void *vheap, gc_size_t size)
  600. #else
  601. gc_object_t
  602. gc_alloc_wo_internal(void *vheap, gc_size_t size, const char *file, int line)
  603. #endif
  604. {
  605. gc_heap_t *heap = (gc_heap_t *)vheap;
  606. hmu_t *hmu = NULL;
  607. gc_object_t ret = (gc_object_t)NULL;
  608. gc_size_t tot_size = 0, tot_size_unaligned;
  609. /* hmu header + prefix + obj + suffix */
  610. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  611. /* aligned size*/
  612. tot_size = GC_ALIGN_8(tot_size_unaligned);
  613. if (tot_size < size)
  614. /* integer overflow */
  615. return NULL;
  616. if (heap->is_heap_corrupted) {
  617. os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
  618. return NULL;
  619. }
  620. LOCK_HEAP(heap);
  621. hmu = alloc_hmu_ex(heap, tot_size);
  622. if (!hmu)
  623. goto finish;
  624. /* Do we need to memset the memory to 0? */
  625. /* memset((char *)hmu + sizeof(*hmu), 0, tot_size - sizeof(*hmu)); */
  626. bh_assert(hmu_get_size(hmu) >= tot_size);
  627. /* the total size allocated may be larger than
  628. the required size, reset it here */
  629. tot_size = hmu_get_size(hmu);
  630. #if GC_STAT_DATA != 0
  631. heap->total_size_allocated += tot_size;
  632. #endif
  633. hmu_set_ut(hmu, HMU_WO);
  634. #if GC_MANUALLY != 0
  635. hmu_mark_wo(hmu);
  636. #else
  637. hmu_unmark_wo(hmu);
  638. #endif
  639. #if BH_ENABLE_GC_VERIFY != 0
  640. hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
  641. #endif
  642. ret = hmu_to_obj(hmu);
  643. if (tot_size > tot_size_unaligned)
  644. /* clear buffer appended by GC_ALIGN_8() */
  645. memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
  646. finish:
  647. UNLOCK_HEAP(heap);
  648. return ret;
  649. }
  650. /**
  651. * Do some checking to see if given pointer is a possible valid heap
  652. * @return GC_TRUE if all checking passed, GC_FALSE otherwise
  653. */
  654. int
  655. gci_is_heap_valid(gc_heap_t *heap)
  656. {
  657. if (!heap)
  658. return GC_FALSE;
  659. if (heap->heap_id != (gc_handle_t)heap)
  660. return GC_FALSE;
  661. return GC_TRUE;
  662. }
  663. #if BH_ENABLE_GC_VERIFY == 0
  664. int
  665. gc_free_vo(void *vheap, gc_object_t obj)
  666. #else
  667. int
  668. gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
  669. #endif
  670. {
  671. gc_heap_t *heap = (gc_heap_t *)vheap;
  672. gc_uint8 *base_addr, *end_addr;
  673. hmu_t *hmu = NULL;
  674. hmu_t *prev = NULL;
  675. hmu_t *next = NULL;
  676. gc_size_t size = 0;
  677. hmu_type_t ut;
  678. int ret = GC_SUCCESS;
  679. if (!obj) {
  680. return GC_SUCCESS;
  681. }
  682. if (heap->is_heap_corrupted) {
  683. os_printf("[GC_ERROR]Heap is corrupted, free memory failed.\n");
  684. return GC_ERROR;
  685. }
  686. hmu = obj_to_hmu(obj);
  687. base_addr = heap->base_addr;
  688. end_addr = base_addr + heap->current_size;
  689. LOCK_HEAP(heap);
  690. if (hmu_is_in_heap(hmu, base_addr, end_addr)) {
  691. #if BH_ENABLE_GC_VERIFY != 0
  692. hmu_verify(heap, hmu);
  693. #endif
  694. ut = hmu_get_ut(hmu);
  695. if (ut == HMU_VO) {
  696. if (hmu_is_vo_freed(hmu)) {
  697. bh_assert(0);
  698. ret = GC_ERROR;
  699. goto out;
  700. }
  701. size = hmu_get_size(hmu);
  702. heap->total_free_size += size;
  703. #if GC_STAT_DATA != 0
  704. heap->total_size_freed += size;
  705. #endif
  706. if (!hmu_get_pinuse(hmu)) {
  707. prev = (hmu_t *)((char *)hmu - *((int *)hmu - 1));
  708. if (hmu_is_in_heap(prev, base_addr, end_addr)
  709. && hmu_get_ut(prev) == HMU_FC) {
  710. size += hmu_get_size(prev);
  711. hmu = prev;
  712. if (!unlink_hmu(heap, prev)) {
  713. ret = GC_ERROR;
  714. goto out;
  715. }
  716. }
  717. }
  718. next = (hmu_t *)((char *)hmu + size);
  719. if (hmu_is_in_heap(next, base_addr, end_addr)) {
  720. if (hmu_get_ut(next) == HMU_FC) {
  721. size += hmu_get_size(next);
  722. if (!unlink_hmu(heap, next)) {
  723. ret = GC_ERROR;
  724. goto out;
  725. }
  726. next = (hmu_t *)((char *)hmu + size);
  727. }
  728. }
  729. if (!gci_add_fc(heap, hmu, size)) {
  730. ret = GC_ERROR;
  731. goto out;
  732. }
  733. if (hmu_is_in_heap(next, base_addr, end_addr)) {
  734. hmu_unmark_pinuse(next);
  735. }
  736. }
  737. else {
  738. ret = GC_ERROR;
  739. goto out;
  740. }
  741. ret = GC_SUCCESS;
  742. goto out;
  743. }
  744. out:
  745. UNLOCK_HEAP(heap);
  746. return ret;
  747. }
  748. void
  749. gc_dump_heap_stats(gc_heap_t *heap)
  750. {
  751. os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
  752. os_printf("total free: %" PRIu32 ", current: %" PRIu32
  753. ", highmark: %" PRIu32 "\n",
  754. heap->total_free_size, heap->current_size, heap->highmark_size);
  755. #if GC_STAT_DATA != 0
  756. os_printf("total size allocated: %" PRIu64 ", total size freed: %" PRIu64
  757. ", total occupied: %" PRIu64 "\n",
  758. heap->total_size_allocated, heap->total_size_freed,
  759. heap->total_size_allocated - heap->total_size_freed);
  760. #endif
  761. }
  762. uint32
  763. gc_get_heap_highmark_size(gc_heap_t *heap)
  764. {
  765. return heap->highmark_size;
  766. }
  767. void
  768. gci_dump(gc_heap_t *heap)
  769. {
  770. hmu_t *cur = NULL, *end = NULL;
  771. hmu_type_t ut;
  772. gc_size_t size;
  773. int i = 0, p, mark;
  774. char inuse = 'U';
  775. cur = (hmu_t *)heap->base_addr;
  776. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  777. while (cur < end) {
  778. ut = hmu_get_ut(cur);
  779. size = hmu_get_size(cur);
  780. p = hmu_get_pinuse(cur);
  781. mark = hmu_is_wo_marked(cur);
  782. if (ut == HMU_VO)
  783. inuse = 'V';
  784. else if (ut == HMU_WO)
  785. inuse = hmu_is_wo_marked(cur) ? 'W' : 'w';
  786. else if (ut == HMU_FC)
  787. inuse = 'F';
  788. if (size == 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
  789. os_printf("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
  790. heap->is_heap_corrupted = true;
  791. return;
  792. }
  793. os_printf("#%d %08" PRIx32 " %" PRIx32 " %d %d"
  794. " %c %" PRId32 "\n",
  795. i, (int32)((char *)cur - (char *)heap->base_addr), (int32)ut,
  796. p, mark, inuse, (int32)hmu_obj_size(size));
  797. #if BH_ENABLE_GC_VERIFY != 0
  798. if (inuse == 'V') {
  799. gc_object_prefix_t *prefix = (gc_object_prefix_t *)(cur + 1);
  800. os_printf("#%s:%d\n", prefix->file_name, prefix->line_no);
  801. }
  802. #endif
  803. cur = (hmu_t *)((char *)cur + size);
  804. i++;
  805. }
  806. if (cur != end) {
  807. os_printf("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
  808. heap->is_heap_corrupted = true;
  809. }
  810. }