ems_alloc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. static int hmu_is_in_heap(gc_heap_t* heap, hmu_t* hmu)
  7. {
  8. return heap && hmu && (gc_uint8*) hmu >= heap->base_addr
  9. && (gc_uint8*) hmu < heap->base_addr + heap->current_size;
  10. }
  11. /* Remove a node from the tree it belongs to*/
  12. /* @p can not be NULL*/
  13. /* @p can not be the ROOT node*/
  14. /* Node @p will be removed from the tree and left,right,parent pointers of node @p will be*/
  15. /* set to be NULL. Other fields will not be touched.*/
  16. /* The tree will be re-organized so that the order conditions are still satisified.*/
  17. static void remove_tree_node(hmu_tree_node_t *p)
  18. {
  19. hmu_tree_node_t *q = NULL, **slot = NULL;
  20. bh_assert(p);
  21. bh_assert(p->parent); /* @p can not be the ROOT node*/
  22. /* get the slot which holds pointer to node p*/
  23. if (p == p->parent->right) {
  24. slot = &p->parent->right;
  25. } else {
  26. bh_assert(p == p->parent->left); /* @p should be a child of its parent*/
  27. slot = &p->parent->left;
  28. }
  29. /* algorithms used to remove node p*/
  30. /* case 1: if p has no left child, replace p with its right child*/
  31. /* case 2: if p has no right child, replace p with its left child*/
  32. /* case 3: otherwise, find p's predecessor, remove it from the tree and replace p with it.*/
  33. /* use predecessor can keep the left <= root < right condition.*/
  34. if (!p->left) {
  35. /* move right child up*/
  36. *slot = p->right;
  37. if (p->right)
  38. p->right->parent = p->parent;
  39. p->left = p->right = p->parent = NULL;
  40. return;
  41. }
  42. if (!p->right) {
  43. /* move left child up*/
  44. *slot = p->left;
  45. p->left->parent = p->parent; /* p->left can never be NULL.*/
  46. p->left = p->right = p->parent = NULL;
  47. return;
  48. }
  49. /* both left & right exist, find p's predecessor at first*/
  50. q = p->left;
  51. while (q->right)
  52. q = q->right;
  53. remove_tree_node(q); /* remove from the tree*/
  54. *slot = q;
  55. q->parent = p->parent;
  56. q->left = p->left;
  57. q->right = p->right;
  58. if (q->left)
  59. q->left->parent = q;
  60. if (q->right)
  61. q->right->parent = q;
  62. p->left = p->right = p->parent = NULL;
  63. }
  64. static void unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
  65. {
  66. gc_size_t size;
  67. bh_assert(gci_is_heap_valid(heap));
  68. bh_assert(
  69. hmu && (gc_uint8*) hmu >= heap->base_addr
  70. && (gc_uint8*) hmu < heap->base_addr + heap->current_size);
  71. bh_assert(hmu_get_ut(hmu) == HMU_FC);
  72. size = hmu_get_size(hmu);
  73. if (HMU_IS_FC_NORMAL(size)) {
  74. uint32 node_idx = size >> 3;
  75. hmu_normal_node_t *node_prev = &heap->kfc_normal_list[node_idx];
  76. hmu_normal_node_t *node =
  77. get_hmu_normal_node_next(&heap->kfc_normal_list[node_idx]);
  78. while (node) {
  79. if ((hmu_t*) node == hmu) {
  80. set_hmu_normal_node_next(node_prev, get_hmu_normal_node_next(node));
  81. break;
  82. }
  83. node_prev = node;
  84. node = get_hmu_normal_node_next(node);
  85. }
  86. if (!node) {
  87. os_printf("[GC_ERROR]couldn't find the node in the normal list");
  88. }
  89. } else {
  90. remove_tree_node((hmu_tree_node_t *) hmu);
  91. }
  92. }
  93. static void hmu_set_free_size(hmu_t *hmu)
  94. {
  95. gc_size_t size;
  96. bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
  97. size = hmu_get_size(hmu);
  98. *((uint32*) ((char*) hmu + size) - 1) = size;
  99. }
  100. /* Add free chunk back to KFC*/
  101. /* @heap should not be NULL and it should be a valid heap*/
  102. /* @hmu should not be NULL and it should be a HMU of length @size inside @heap*/
  103. /* @hmu should be aligned to 8*/
  104. /* @size should be positive and multiple of 8*/
  105. /* @hmu with size @size will be added into KFC as a new FC.*/
  106. void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
  107. {
  108. hmu_normal_node_t *np = NULL;
  109. hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
  110. uint32 node_idx;
  111. bh_assert(gci_is_heap_valid(heap));
  112. bh_assert(
  113. hmu && (gc_uint8*) hmu >= heap->base_addr
  114. && (gc_uint8*) hmu < heap->base_addr + heap->current_size);
  115. bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
  116. bh_assert(
  117. size > 0
  118. && ((gc_uint8*) hmu) + size
  119. <= heap->base_addr + heap->current_size);
  120. bh_assert(!(size & 7));
  121. hmu_set_ut(hmu, HMU_FC);
  122. hmu_set_size(hmu, size);
  123. hmu_set_free_size(hmu);
  124. if (HMU_IS_FC_NORMAL(size)) {
  125. np = (hmu_normal_node_t*) hmu;
  126. node_idx = size >> 3;
  127. np->next = heap->kfc_normal_list[node_idx].next;
  128. set_hmu_normal_node_next(&heap->kfc_normal_list[node_idx], np);
  129. return;
  130. }
  131. /* big block*/
  132. node = (hmu_tree_node_t*) hmu;
  133. node->size = size;
  134. node->left = node->right = node->parent = NULL;
  135. /* find proper node to link this new node to*/
  136. root = &heap->kfc_tree_root;
  137. tp = root;
  138. bh_assert(tp->size < size);
  139. while (1) {
  140. if (tp->size < size) {
  141. if (!tp->right) {
  142. tp->right = node;
  143. node->parent = tp;
  144. break;
  145. }
  146. tp = tp->right;
  147. } else /* tp->size >= size*/
  148. {
  149. if (!tp->left) {
  150. tp->left = node;
  151. node->parent = tp;
  152. break;
  153. }
  154. tp = tp->left;
  155. }
  156. }
  157. }
  158. /* Find a proper hmu for required memory size*/
  159. /* @heap should not be NULL and it should be a valid heap*/
  160. /* @size should cover the header and it should be 8 bytes aligned*/
  161. /* GC will not be performed here.*/
  162. /* Heap extension will not be performed here.*/
  163. /* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
  164. /* NULL will be returned if there are no proper HMU.*/
  165. static hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
  166. {
  167. hmu_normal_node_t *node = NULL, *p = NULL;
  168. uint32 node_idx = 0, init_node_idx = 0;
  169. hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
  170. hmu_t *next, *rest;
  171. bh_assert(gci_is_heap_valid(heap));
  172. bh_assert(size > 0 && !(size & 7));
  173. if (size < GC_SMALLEST_SIZE)
  174. size = GC_SMALLEST_SIZE;
  175. /* check normal list at first*/
  176. if (HMU_IS_FC_NORMAL(size)) {
  177. /* find a non-empty slot in normal_node_list with good size*/
  178. init_node_idx = (size >> 3);
  179. for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
  180. node_idx++) {
  181. node = heap->kfc_normal_list + node_idx;
  182. if (get_hmu_normal_node_next(node))
  183. break;
  184. node = NULL;
  185. }
  186. /* not found in normal list*/
  187. if (node) {
  188. bh_assert(node_idx >= init_node_idx);
  189. p = get_hmu_normal_node_next(node);
  190. node->next = p->next;
  191. bh_assert(((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) == 0);
  192. if ((gc_size_t)node_idx != (uint32)init_node_idx
  193. && ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) { /* with bigger size*/
  194. rest = (hmu_t*) (((char *) p) + size);
  195. gci_add_fc(heap, rest, (node_idx << 3) - size);
  196. hmu_mark_pinuse(rest);
  197. } else {
  198. size = node_idx << 3;
  199. next = (hmu_t*) ((char*) p + size);
  200. if (hmu_is_in_heap(heap, next))
  201. hmu_mark_pinuse(next);
  202. }
  203. #if GC_STAT_DATA != 0
  204. heap->total_free_size -= size;
  205. if ((heap->current_size - heap->total_free_size)
  206. > heap->highmark_size)
  207. heap->highmark_size = heap->current_size
  208. - heap->total_free_size;
  209. #endif
  210. hmu_set_size((hmu_t* ) p, size);
  211. return (hmu_t*) p;
  212. }
  213. }
  214. /* need to find a node in tree*/
  215. root = &heap->kfc_tree_root;
  216. /* find the best node*/
  217. bh_assert(root);
  218. tp = root->right;
  219. while (tp) {
  220. if (tp->size < size) {
  221. tp = tp->right;
  222. continue;
  223. }
  224. /* record the last node with size equal to or bigger than given size*/
  225. last_tp = tp;
  226. tp = tp->left;
  227. }
  228. if (last_tp) {
  229. bh_assert(last_tp->size >= size);
  230. /* alloc in last_p*/
  231. /* remove node last_p from tree*/
  232. remove_tree_node(last_tp);
  233. if (last_tp->size >= size + GC_SMALLEST_SIZE) {
  234. rest = (hmu_t*) ((char*) last_tp + size);
  235. gci_add_fc(heap, rest, last_tp->size - size);
  236. hmu_mark_pinuse(rest);
  237. } else {
  238. size = last_tp->size;
  239. next = (hmu_t*) ((char*) last_tp + size);
  240. if (hmu_is_in_heap(heap, next))
  241. hmu_mark_pinuse(next);
  242. }
  243. #if GC_STAT_DATA != 0
  244. heap->total_free_size -= size;
  245. if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
  246. heap->highmark_size = heap->current_size - heap->total_free_size;
  247. #endif
  248. hmu_set_size((hmu_t* ) last_tp, size);
  249. return (hmu_t*) last_tp;
  250. }
  251. return NULL;
  252. }
  253. /* Find a proper HMU for given size*/
  254. /* @heap should not be NULL and it should be a valid heap*/
  255. /* @size should cover the header and it should be 8 bytes aligned*/
  256. /* This function will try several ways to satisfy the allocation request.*/
  257. /* 1. Find a proper on available HMUs.*/
  258. /* 2. GC will be triggered if 1 failed.*/
  259. /* 3. Find a proper on available HMUS.*/
  260. /* 4. Return NULL if 3 failed*/
  261. /* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
  262. /* NULL will be returned if there are no proper HMU.*/
  263. static hmu_t* alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
  264. {
  265. hmu_t *ret = NULL;
  266. bh_assert(gci_is_heap_valid(heap));
  267. bh_assert(size > 0 && !(size & 7));
  268. #ifdef GC_IN_EVERY_ALLOCATION
  269. gci_gc_heap(heap);
  270. ret = alloc_hmu(heap, size);
  271. #else
  272. # if GC_STAT_DATA != 0
  273. if (heap->gc_threshold < heap->total_free_size)
  274. ret = alloc_hmu(heap, size);
  275. # else
  276. ret = alloc_hmu(heap, size);
  277. # endif
  278. if (ret)
  279. return ret;
  280. /*gci_gc_heap(heap);*//* disable gc claim currently */
  281. ret = alloc_hmu(heap, size);
  282. #endif
  283. return ret;
  284. }
  285. unsigned long g_total_malloc = 0;
  286. unsigned long g_total_free = 0;
  287. gc_object_t _gc_alloc_vo_i_heap(void *vheap,
  288. gc_size_t size ALLOC_EXTRA_PARAMETERS)
  289. {
  290. gc_heap_t* heap = (gc_heap_t*) vheap;
  291. hmu_t *hmu = NULL;
  292. gc_object_t ret = (gc_object_t) NULL;
  293. gc_size_t tot_size = 0, tot_size_unaligned;
  294. /* hmu header + prefix + obj + suffix */
  295. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  296. /* aligned size*/
  297. tot_size = GC_ALIGN_8(tot_size_unaligned);
  298. if (tot_size < size)
  299. /* integer overflow */
  300. return NULL;
  301. gct_vm_mutex_lock(&heap->lock);
  302. hmu = alloc_hmu_ex(heap, tot_size);
  303. if (!hmu)
  304. goto FINISH;
  305. g_total_malloc += tot_size;
  306. hmu_set_ut(hmu, HMU_VO);
  307. hmu_unfree_vo(hmu);
  308. #if defined(GC_VERIFY)
  309. hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
  310. #endif
  311. ret = hmu_to_obj(hmu);
  312. if (tot_size > tot_size_unaligned)
  313. /* clear buffer appended by GC_ALIGN_8() */
  314. memset((uint8*)ret + size, 0, tot_size - tot_size_unaligned);
  315. #if BH_ENABLE_MEMORY_PROFILING != 0
  316. os_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
  317. #endif
  318. FINISH:
  319. gct_vm_mutex_unlock(&heap->lock);
  320. return ret;
  321. }
  322. gc_object_t _gc_realloc_vo_i_heap(void *vheap, void *ptr,
  323. gc_size_t size ALLOC_EXTRA_PARAMETERS)
  324. {
  325. gc_heap_t* heap = (gc_heap_t*) vheap;
  326. hmu_t *hmu = NULL, *hmu_old = NULL;
  327. gc_object_t ret = (gc_object_t) NULL, obj_old = (gc_object_t)ptr;
  328. gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0;
  329. gc_size_t obj_size, obj_size_old;
  330. /* hmu header + prefix + obj + suffix */
  331. tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
  332. /* aligned size*/
  333. tot_size = GC_ALIGN_8(tot_size_unaligned);
  334. if (tot_size < size)
  335. /* integer overflow */
  336. return NULL;
  337. if (obj_old) {
  338. hmu_old = obj_to_hmu(obj_old);
  339. tot_size_old = hmu_get_size(hmu_old);
  340. if (tot_size <= tot_size_old)
  341. /* current node alreay meets requirement */
  342. return obj_old;
  343. }
  344. gct_vm_mutex_lock(&heap->lock);
  345. hmu = alloc_hmu_ex(heap, tot_size);
  346. if (!hmu)
  347. goto FINISH;
  348. g_total_malloc += tot_size;
  349. hmu_set_ut(hmu, HMU_VO);
  350. hmu_unfree_vo(hmu);
  351. #if defined(GC_VERIFY)
  352. hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
  353. #endif
  354. ret = hmu_to_obj(hmu);
  355. #if BH_ENABLE_MEMORY_PROFILING != 0
  356. os_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
  357. #endif
  358. FINISH:
  359. gct_vm_mutex_unlock(&heap->lock);
  360. if (ret) {
  361. obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  362. memset(ret, 0, obj_size);
  363. if (obj_old) {
  364. obj_size_old = tot_size_old - HMU_SIZE
  365. - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
  366. bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
  367. gc_free_h(vheap, obj_old);
  368. }
  369. }
  370. return ret;
  371. }
  372. /* Do some checking to see if given pointer is a possible valid heap*/
  373. /* Return GC_TRUE if all checking passed*/
  374. /* Return GC_FALSE otherwise*/
  375. int gci_is_heap_valid(gc_heap_t *heap)
  376. {
  377. if (!heap)
  378. return GC_FALSE;
  379. if (heap->heap_id != (gc_handle_t) heap)
  380. return GC_FALSE;
  381. return GC_TRUE;
  382. }
  383. int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
  384. {
  385. gc_heap_t* heap = (gc_heap_t*) vheap;
  386. hmu_t *hmu = NULL;
  387. hmu_t *prev = NULL;
  388. hmu_t *next = NULL;
  389. gc_size_t size = 0;
  390. hmu_type_t ut;
  391. int ret = GC_SUCCESS;
  392. if (!obj) {
  393. return GC_SUCCESS;
  394. }
  395. hmu = obj_to_hmu(obj);
  396. gct_vm_mutex_lock(&heap->lock);
  397. if ((gc_uint8 *) hmu >= heap->base_addr
  398. && (gc_uint8 *) hmu < heap->base_addr + heap->current_size) {
  399. #ifdef GC_VERIFY
  400. hmu_verify(hmu);
  401. #endif
  402. ut = hmu_get_ut(hmu);
  403. if (ut == HMU_VO) {
  404. if (hmu_is_vo_freed(hmu)) {
  405. bh_assert(0);
  406. ret = GC_ERROR;
  407. goto out;
  408. }
  409. size = hmu_get_size(hmu);
  410. g_total_free += size;
  411. #if GC_STAT_DATA != 0
  412. heap->total_free_size += size;
  413. #endif
  414. #if BH_ENABLE_MEMORY_PROFILING != 0
  415. os_printf("HEAP.FREE, heap: %p, size: %u\n",heap, size);
  416. #endif
  417. if (!hmu_get_pinuse(hmu)) {
  418. prev = (hmu_t*) ((char*) hmu - *((int*) hmu - 1));
  419. if (hmu_is_in_heap(heap, prev) && hmu_get_ut(prev) == HMU_FC) {
  420. size += hmu_get_size(prev);
  421. hmu = prev;
  422. unlink_hmu(heap, prev);
  423. }
  424. }
  425. next = (hmu_t*) ((char*) hmu + size);
  426. if (hmu_is_in_heap(heap, next)) {
  427. if (hmu_get_ut(next) == HMU_FC) {
  428. size += hmu_get_size(next);
  429. unlink_hmu(heap, next);
  430. next = (hmu_t*) ((char*) hmu + size);
  431. }
  432. }
  433. gci_add_fc(heap, hmu, size);
  434. if (hmu_is_in_heap(heap, next)) {
  435. hmu_unmark_pinuse(next);
  436. }
  437. } else {
  438. ret = GC_ERROR;
  439. goto out;
  440. }
  441. ret = GC_SUCCESS;
  442. goto out;
  443. }
  444. out:
  445. gct_vm_mutex_unlock(&heap->lock);
  446. return ret;
  447. }
  448. void gc_dump_heap_stats(gc_heap_t *heap)
  449. {
  450. os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
  451. os_printf(
  452. "total malloc: totalfree: %u, current: %u, highmark: %u, gc cnt: %u\n",
  453. heap->total_free_size, heap->current_size, heap->highmark_size,
  454. heap->total_gc_count);
  455. os_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
  456. g_total_malloc, g_total_free, g_total_malloc - g_total_free);
  457. }
  458. #ifdef GC_TEST
  459. void gci_dump(char* buf, gc_heap_t *heap)
  460. {
  461. hmu_t *cur = NULL, *end = NULL;
  462. hmu_type_t ut;
  463. gc_size_t size;
  464. int i = 0;
  465. int p;
  466. char inuse;
  467. int mark;
  468. cur = (hmu_t*)heap->base_addr;
  469. end = (hmu_t*)((char*)heap->base_addr + heap->current_size);
  470. while(cur < end)
  471. {
  472. ut = hmu_get_ut(cur);
  473. size = hmu_get_size(cur);
  474. p = hmu_get_pinuse(cur);
  475. mark = hmu_is_jo_marked (cur);
  476. if(ut == HMU_VO)
  477. inuse = 'V';
  478. else if(ut == HMU_JO)
  479. inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
  480. else if(ut == HMU_FC)
  481. inuse = 'F';
  482. bh_assert(size > 0);
  483. buf += sprintf(buf, "#%d %08x %x %x %d %c %d\n", i, (char*) cur - (char*) heap->base_addr, ut, p, mark, inuse, hmu_obj_size(size));
  484. cur = (hmu_t*)((char *)cur + size);
  485. i++;
  486. }
  487. bh_assert(cur == end);
  488. }
  489. #endif