ems_alloc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "ems_gc_internal.h"
  17. #if !defined(NVALGRIND)
  18. #include <valgrind/memcheck.h>
  19. #endif
  20. static int hmu_is_in_heap(gc_heap_t* heap, hmu_t* hmu)
  21. {
  22. return heap && hmu && (gc_uint8*) hmu >= heap->base_addr
  23. && (gc_uint8*) hmu < heap->base_addr + heap->current_size;
  24. }
  25. /* Remove a node from the tree it belongs to*/
  26. /* @p can not be NULL*/
  27. /* @p can not be the ROOT node*/
  28. /* Node @p will be removed from the tree and left,right,parent pointers of node @p will be*/
  29. /* set to be NULL. Other fields will not be touched.*/
  30. /* The tree will be re-organized so that the order conditions are still satisified.*/
  31. BH_STATIC void remove_tree_node(hmu_tree_node_t *p)
  32. {
  33. hmu_tree_node_t *q = NULL, **slot = NULL;
  34. bh_assert(p);
  35. bh_assert(p->parent); /* @p can not be the ROOT node*/
  36. /* get the slot which holds pointer to node p*/
  37. if (p == p->parent->right) {
  38. slot = &p->parent->right;
  39. } else {
  40. bh_assert(p == p->parent->left); /* @p should be a child of its parent*/
  41. slot = &p->parent->left;
  42. }
  43. /* algorithms used to remove node p*/
  44. /* case 1: if p has no left child, replace p with its right child*/
  45. /* case 2: if p has no right child, replace p with its left child*/
  46. /* case 3: otherwise, find p's predecessor, remove it from the tree and replace p with it.*/
  47. /* use predecessor can keep the left <= root < right condition.*/
  48. if (!p->left) {
  49. /* move right child up*/
  50. *slot = p->right;
  51. if (p->right)
  52. p->right->parent = p->parent;
  53. p->left = p->right = p->parent = NULL;
  54. return;
  55. }
  56. if (!p->right) {
  57. /* move left child up*/
  58. *slot = p->left;
  59. p->left->parent = p->parent; /* p->left can never be NULL.*/
  60. p->left = p->right = p->parent = NULL;
  61. return;
  62. }
  63. /* both left & right exist, find p's predecessor at first*/
  64. q = p->left;
  65. while (q->right)
  66. q = q->right;
  67. remove_tree_node(q); /* remove from the tree*/
  68. *slot = q;
  69. q->parent = p->parent;
  70. q->left = p->left;
  71. q->right = p->right;
  72. if (q->left)
  73. q->left->parent = q;
  74. if (q->right)
  75. q->right->parent = q;
  76. p->left = p->right = p->parent = NULL;
  77. }
  78. static void unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
  79. {
  80. gc_size_t size;
  81. bh_assert(gci_is_heap_valid(heap));
  82. bh_assert(
  83. hmu && (gc_uint8*) hmu >= heap->base_addr
  84. && (gc_uint8*) hmu < heap->base_addr + heap->current_size);
  85. bh_assert(hmu_get_ut(hmu) == HMU_FC);
  86. size = hmu_get_size(hmu);
  87. if (HMU_IS_FC_NORMAL(size)) {
  88. int node_idx = size >> 3;
  89. hmu_normal_node_t* node = heap->kfc_normal_list[node_idx].next;
  90. hmu_normal_node_t** p = &(heap->kfc_normal_list[node_idx].next);
  91. while (node) {
  92. if ((hmu_t*) node == hmu) {
  93. *p = node->next;
  94. break;
  95. }
  96. p = &(node->next);
  97. node = node->next;
  98. }
  99. if (!node) {
  100. bh_printf("[GC_ERROR]couldn't find the node in the normal list");
  101. }
  102. } else {
  103. remove_tree_node((hmu_tree_node_t *) hmu);
  104. }
  105. }
  106. static void hmu_set_free_size(hmu_t *hmu)
  107. {
  108. gc_size_t size;
  109. bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
  110. size = hmu_get_size(hmu);
  111. *((int*) ((char*) hmu + size) - 1) = size;
  112. }
  113. /* Add free chunk back to KFC*/
  114. /* @heap should not be NULL and it should be a valid heap*/
  115. /* @hmu should not be NULL and it should be a HMU of length @size inside @heap*/
  116. /* @hmu should be aligned to 8*/
  117. /* @size should be positive and multiple of 8*/
  118. /* @hmu with size @size will be added into KFC as a new FC.*/
  119. void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
  120. {
  121. hmu_normal_node_t *np = NULL;
  122. hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
  123. int node_idx;
  124. bh_assert(gci_is_heap_valid(heap));
  125. bh_assert(
  126. hmu && (gc_uint8*) hmu >= heap->base_addr
  127. && (gc_uint8*) hmu < heap->base_addr + heap->current_size);
  128. bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
  129. bh_assert(
  130. size > 0
  131. && ((gc_uint8*) hmu) + size
  132. <= heap->base_addr + heap->current_size);
  133. bh_assert(!(size & 7));
  134. hmu_set_ut(hmu, HMU_FC);
  135. hmu_set_size(hmu, size);
  136. hmu_set_free_size(hmu);
  137. if (HMU_IS_FC_NORMAL(size)) {
  138. np = (hmu_normal_node_t*) hmu;
  139. node_idx = size >> 3;
  140. np->next = heap->kfc_normal_list[node_idx].next;
  141. heap->kfc_normal_list[node_idx].next = np;
  142. return;
  143. }
  144. /* big block*/
  145. node = (hmu_tree_node_t*) hmu;
  146. node->size = size;
  147. node->left = node->right = node->parent = NULL;
  148. /* find proper node to link this new node to*/
  149. root = &heap->kfc_tree_root;
  150. tp = root;
  151. bh_assert(tp->size < size);
  152. while (1) {
  153. if (tp->size < size) {
  154. if (!tp->right) {
  155. tp->right = node;
  156. node->parent = tp;
  157. break;
  158. }
  159. tp = tp->right;
  160. } else /* tp->size >= size*/
  161. {
  162. if (!tp->left) {
  163. tp->left = node;
  164. node->parent = tp;
  165. break;
  166. }
  167. tp = tp->left;
  168. }
  169. }
  170. }
  171. /* Find a proper hmu for required memory size*/
  172. /* @heap should not be NULL and it should be a valid heap*/
  173. /* @size should cover the header and it should be 8 bytes aligned*/
  174. /* GC will not be performed here.*/
  175. /* Heap extension will not be performed here.*/
  176. /* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
  177. /* NULL will be returned if there are no proper HMU.*/
  178. BH_STATIC hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
  179. {
  180. hmu_normal_node_t *node = NULL, *p = NULL;
  181. int node_idx = 0, init_node_idx = 0;
  182. hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
  183. hmu_t *next, *rest;
  184. bh_assert(gci_is_heap_valid(heap));
  185. bh_assert(size > 0 && !(size & 7));
  186. if (size < GC_SMALLEST_SIZE)
  187. size = GC_SMALLEST_SIZE;
  188. /* check normal list at first*/
  189. if (HMU_IS_FC_NORMAL(size)) {
  190. /* find a non-empty slot in normal_node_list with good size*/
  191. init_node_idx = (int) (size >> 3);
  192. for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
  193. node_idx++) {
  194. node = heap->kfc_normal_list + node_idx;
  195. if (node->next)
  196. break;
  197. node = NULL;
  198. }
  199. /* not found in normal list*/
  200. if (node) {
  201. bh_assert(node_idx >= init_node_idx);
  202. p = node->next;
  203. node->next = p->next;
  204. bh_assert(((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) == 0);
  205. if ((gc_size_t) node_idx
  206. != init_node_idx&& ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) { /* with bigger size*/
  207. rest = (hmu_t*) (((char *) p) + size);
  208. gci_add_fc(heap, rest, (node_idx << 3) - size);
  209. hmu_mark_pinuse(rest);
  210. } else {
  211. size = node_idx << 3;
  212. next = (hmu_t*) ((char*) p + size);
  213. if (hmu_is_in_heap(heap, next))
  214. hmu_mark_pinuse(next);
  215. }
  216. #if GC_STAT_DATA != 0
  217. heap->total_free_size -= size;
  218. if ((heap->current_size - heap->total_free_size)
  219. > heap->highmark_size)
  220. heap->highmark_size = heap->current_size
  221. - heap->total_free_size;
  222. #endif
  223. hmu_set_size((hmu_t* ) p, size);
  224. return (hmu_t*) p;
  225. }
  226. }
  227. /* need to find a node in tree*/
  228. root = &heap->kfc_tree_root;
  229. /* find the best node*/
  230. bh_assert(root);
  231. tp = root->right;
  232. while (tp) {
  233. if (tp->size < size) {
  234. tp = tp->right;
  235. continue;
  236. }
  237. /* record the last node with size equal to or bigger than given size*/
  238. last_tp = tp;
  239. tp = tp->left;
  240. }
  241. if (last_tp) {
  242. bh_assert(last_tp->size >= size);
  243. /* alloc in last_p*/
  244. /* remove node last_p from tree*/
  245. remove_tree_node(last_tp);
  246. if (last_tp->size >= size + GC_SMALLEST_SIZE) {
  247. rest = (hmu_t*) ((char*) last_tp + size);
  248. gci_add_fc(heap, rest, last_tp->size - size);
  249. hmu_mark_pinuse(rest);
  250. } else {
  251. size = last_tp->size;
  252. next = (hmu_t*) ((char*) last_tp + size);
  253. if (hmu_is_in_heap(heap, next))
  254. hmu_mark_pinuse(next);
  255. }
  256. #if GC_STAT_DATA != 0
  257. heap->total_free_size -= size;
  258. if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
  259. heap->highmark_size = heap->current_size - heap->total_free_size;
  260. #endif
  261. hmu_set_size((hmu_t* ) last_tp, size);
  262. return (hmu_t*) last_tp;
  263. }
  264. return NULL;
  265. }
  266. /* Find a proper HMU for given size*/
  267. /* @heap should not be NULL and it should be a valid heap*/
  268. /* @size should cover the header and it should be 8 bytes aligned*/
  269. /* This function will try several ways to satisfy the allocation request.*/
  270. /* 1. Find a proper on available HMUs.*/
  271. /* 2. GC will be triggered if 1 failed.*/
  272. /* 3. Find a proper on available HMUS.*/
  273. /* 4. Return NULL if 3 failed*/
  274. /* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
  275. /* NULL will be returned if there are no proper HMU.*/
  276. BH_STATIC hmu_t* alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
  277. {
  278. hmu_t *ret = NULL;
  279. bh_assert(gci_is_heap_valid(heap));
  280. bh_assert(size > 0 && !(size & 7));
  281. #ifdef GC_IN_EVERY_ALLOCATION
  282. gci_gc_heap(heap);
  283. ret = alloc_hmu(heap, size);
  284. #else
  285. # if GC_STAT_DATA != 0
  286. if (heap->gc_threshold < heap->total_free_size)
  287. ret = alloc_hmu(heap, size);
  288. # else
  289. ret = alloc_hmu(heap, size);
  290. # endif
  291. if (ret)
  292. return ret;
  293. /*gci_gc_heap(heap);*//* disable gc claim currently */
  294. ret = alloc_hmu(heap, size);
  295. #endif
  296. return ret;
  297. }
  298. unsigned long g_total_malloc = 0;
  299. unsigned long g_total_free = 0;
  300. gc_object_t _gc_alloc_vo_i_heap(void *vheap,
  301. gc_size_t size ALLOC_EXTRA_PARAMETERS)
  302. {
  303. gc_heap_t* heap = (gc_heap_t*) vheap;
  304. hmu_t *hmu = NULL;
  305. gc_object_t ret = (gc_object_t) NULL;
  306. gc_size_t tot_size = 0;
  307. /* align size*/
  308. tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
  309. if (tot_size < size)
  310. return NULL;
  311. gct_vm_mutex_lock(&heap->lock);
  312. hmu = alloc_hmu_ex(heap, tot_size);
  313. if (!hmu)
  314. goto FINISH;
  315. g_total_malloc += tot_size;
  316. hmu_set_ut(hmu, HMU_VO);
  317. hmu_unfree_vo(hmu);
  318. #if defined(GC_VERIFY)
  319. hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
  320. #endif
  321. ret = hmu_to_obj(hmu);
  322. #if BH_ENABLE_MEMORY_PROFILING != 0
  323. bh_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
  324. #endif
  325. FINISH:
  326. gct_vm_mutex_unlock(&heap->lock);
  327. return ret;
  328. }
  329. /* see ems_gc.h for description*/
  330. gc_object_t _gc_alloc_jo_i_heap(void *vheap,
  331. gc_size_t size ALLOC_EXTRA_PARAMETERS)
  332. {
  333. gc_heap_t* heap = (gc_heap_t*) vheap;
  334. gc_object_t ret = (gc_object_t) NULL;
  335. hmu_t *hmu = NULL;
  336. gc_size_t tot_size = 0;
  337. bh_assert(gci_is_heap_valid(heap));
  338. /* align size*/
  339. tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
  340. if (tot_size < size)
  341. return NULL;
  342. hmu = alloc_hmu_ex(heap, tot_size);
  343. if (!hmu)
  344. goto FINISH;
  345. /* reset all fields*/
  346. memset((char*) hmu + sizeof(*hmu), 0, tot_size - sizeof(*hmu));
  347. /* hmu->header = 0; */
  348. hmu_set_ut(hmu, HMU_JO);
  349. hmu_unmark_jo(hmu);
  350. #if defined(GC_VERIFY)
  351. hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
  352. #endif
  353. ret = hmu_to_obj(hmu);
  354. #if BH_ENABLE_MEMORY_PROFILING != 0
  355. bh_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
  356. #endif
  357. FINISH:
  358. return ret;
  359. }
  360. /* Do some checking to see if given pointer is a possible valid heap*/
  361. /* Return GC_TRUE if all checking passed*/
  362. /* Return GC_FALSE otherwise*/
  363. int gci_is_heap_valid(gc_heap_t *heap)
  364. {
  365. if (!heap)
  366. return GC_FALSE;
  367. if (heap->heap_id != (gc_handle_t) heap)
  368. return GC_FALSE;
  369. return GC_TRUE;
  370. }
  371. int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
  372. {
  373. gc_heap_t* heap = (gc_heap_t*) vheap;
  374. hmu_t *hmu = NULL;
  375. hmu_t *prev = NULL;
  376. hmu_t *next = NULL;
  377. gc_size_t size = 0;
  378. hmu_type_t ut;
  379. int ret = GC_SUCCESS;
  380. if (!obj) {
  381. return GC_SUCCESS;
  382. }
  383. hmu = obj_to_hmu(obj);
  384. gct_vm_mutex_lock(&heap->lock);
  385. if ((gc_uint8 *) hmu >= heap->base_addr
  386. && (gc_uint8 *) hmu < heap->base_addr + heap->current_size) {
  387. #ifdef GC_VERIFY
  388. hmu_verify(hmu);
  389. #endif
  390. ut = hmu_get_ut(hmu);
  391. if (ut == HMU_VO) {
  392. if (hmu_is_vo_freed(hmu)) {
  393. bh_assert(0);
  394. ret = GC_ERROR;
  395. goto out;
  396. }
  397. size = hmu_get_size(hmu);
  398. g_total_free += size;
  399. #if GC_STAT_DATA != 0
  400. heap->total_free_size += size;
  401. #endif
  402. #if BH_ENABLE_MEMORY_PROFILING != 0
  403. bh_printf("HEAP.FREE, heap: %p, size: %u\n",heap, size);
  404. #endif
  405. if (!hmu_get_pinuse(hmu)) {
  406. prev = (hmu_t*) ((char*) hmu - *((int*) hmu - 1));
  407. if (hmu_is_in_heap(heap, prev) && hmu_get_ut(prev) == HMU_FC) {
  408. size += hmu_get_size(prev);
  409. hmu = prev;
  410. unlink_hmu(heap, prev);
  411. }
  412. }
  413. next = (hmu_t*) ((char*) hmu + size);
  414. if (hmu_is_in_heap(heap, next)) {
  415. if (hmu_get_ut(next) == HMU_FC) {
  416. size += hmu_get_size(next);
  417. unlink_hmu(heap, next);
  418. next = (hmu_t*) ((char*) hmu + size);
  419. }
  420. }
  421. gci_add_fc(heap, hmu, size);
  422. if (hmu_is_in_heap(heap, next)) {
  423. hmu_unmark_pinuse(next);
  424. }
  425. } else {
  426. ret = GC_ERROR;
  427. goto out;
  428. }
  429. ret = GC_SUCCESS;
  430. goto out;
  431. }
  432. out:
  433. gct_vm_mutex_unlock(&heap->lock);
  434. return ret;
  435. }
  436. void gc_dump_heap_stats(gc_heap_t *heap)
  437. {
  438. bh_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
  439. bh_printf(
  440. "total malloc: totalfree: %u, current: %u, highmark: %u, gc cnt: %u\n",
  441. heap->total_free_size, heap->current_size, heap->highmark_size,
  442. heap->total_gc_count);
  443. bh_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
  444. g_total_malloc, g_total_free, g_total_malloc - g_total_free);
  445. }
  446. #ifdef GC_TEST
  447. void gci_dump(char* buf, gc_heap_t *heap)
  448. {
  449. hmu_t *cur = NULL, *end = NULL;
  450. hmu_type_t ut;
  451. gc_size_t size;
  452. int i = 0;
  453. int p;
  454. char inuse;
  455. int mark;
  456. cur = (hmu_t*)heap->base_addr;
  457. end = (hmu_t*)((char*)heap->base_addr + heap->current_size);
  458. while(cur < end)
  459. {
  460. ut = hmu_get_ut(cur);
  461. size = hmu_get_size(cur);
  462. p = hmu_get_pinuse(cur);
  463. mark = hmu_is_jo_marked (cur);
  464. if(ut == HMU_VO)
  465. inuse = 'V';
  466. else if(ut == HMU_JO)
  467. inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
  468. else if(ut == HMU_FC)
  469. inuse = 'F';
  470. bh_assert(size > 0);
  471. buf += sprintf(buf, "#%d %08x %x %x %d %c %d\n", i, (char*) cur - (char*) heap->base_addr, ut, p, mark, inuse, hmu_obj_size(size));
  472. cur = (hmu_t*)((char *)cur + size);
  473. i++;
  474. }
  475. bh_assert(cur == end);
  476. }
  477. #endif