ems_kfc.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. gc_handle_t
  7. gc_init_with_pool(char *buf, gc_size_t buf_size)
  8. {
  9. char *buf_end = buf + buf_size;
  10. char *buf_aligned = (char*) (((uintptr_t) buf + 7) & (uintptr_t)~7);
  11. char *base_addr = buf_aligned + sizeof(gc_heap_t);
  12. gc_heap_t *heap = (gc_heap_t*) buf_aligned;
  13. gc_size_t heap_max_size;
  14. hmu_normal_node_t *p = NULL;
  15. hmu_tree_node_t *root = NULL, *q = NULL;
  16. int i = 0, ret;
  17. if (buf_size < 1024) {
  18. os_printf("[GC_ERROR]heap_init_size(%d) < 1024\n", buf_size);
  19. return NULL;
  20. }
  21. base_addr = (char*) (((uintptr_t) base_addr + 7) & (uintptr_t)~7) + GC_HEAD_PADDING;
  22. heap_max_size = (uint32)(buf_end - base_addr) & (uint32)~7;
  23. memset(heap, 0, sizeof *heap);
  24. memset(base_addr, 0, heap_max_size);
  25. ret = os_mutex_init(&heap->lock);
  26. if (ret != BHT_OK) {
  27. os_printf("[GC_ERROR]failed to init lock\n");
  28. return NULL;
  29. }
  30. /* init all data structures*/
  31. heap->current_size = heap_max_size;
  32. heap->base_addr = (gc_uint8*)base_addr;
  33. heap->heap_id = (gc_handle_t)heap;
  34. heap->total_free_size = heap->current_size;
  35. heap->highmark_size = 0;
  36. for (i = 0; i < HMU_NORMAL_NODE_CNT; i++) {
  37. /* make normal node look like a FC*/
  38. p = &heap->kfc_normal_list[i];
  39. memset(p, 0, sizeof *p);
  40. hmu_set_ut(&p->hmu_header, HMU_FC);
  41. hmu_set_size(&p->hmu_header, sizeof *p);
  42. }
  43. root = &heap->kfc_tree_root;
  44. memset(root, 0, sizeof *root);
  45. root->size = sizeof *root;
  46. hmu_set_ut(&root->hmu_header, HMU_FC);
  47. hmu_set_size(&root->hmu_header, sizeof *root);
  48. q = (hmu_tree_node_t *) heap->base_addr;
  49. memset(q, 0, sizeof *q);
  50. hmu_set_ut(&q->hmu_header, HMU_FC);
  51. hmu_set_size(&q->hmu_header, heap->current_size);
  52. hmu_mark_pinuse(&q->hmu_header);
  53. root->right = q;
  54. q->parent = root;
  55. q->size = heap->current_size;
  56. bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE
  57. && HMU_FC_NORMAL_MAX_SIZE < q->size);
  58. #if BH_ENABLE_MEMORY_PROFILING != 0
  59. os_printf("heap is successfully initialized with max_size=%u.\n",
  60. heap_max_size);
  61. #endif
  62. return heap;
  63. }
  64. int
  65. gc_destroy_with_pool(gc_handle_t handle)
  66. {
  67. gc_heap_t *heap = (gc_heap_t *) handle;
  68. os_mutex_destroy(&heap->lock);
  69. memset(heap->base_addr, 0, heap->current_size);
  70. memset(heap, 0, sizeof(gc_heap_t));
  71. return GC_SUCCESS;
  72. }
  73. static void
  74. adjust_ptr(uint8 **p_ptr, intptr_t offset)
  75. {
  76. if (*p_ptr)
  77. *p_ptr += offset;
  78. }
  79. int
  80. gc_migrate(gc_handle_t handle, gc_handle_t handle_old)
  81. {
  82. gc_heap_t *heap = (gc_heap_t *) handle;
  83. intptr_t offset = (uint8*)handle - (uint8*)handle_old;
  84. hmu_t *cur = NULL, *end = NULL;
  85. hmu_tree_node_t *tree_node;
  86. gc_size_t size;
  87. os_mutex_init(&heap->lock);
  88. if (offset == 0)
  89. return 0;
  90. heap->heap_id = (gc_handle_t)heap;
  91. heap->base_addr += offset;
  92. adjust_ptr((uint8**)&heap->kfc_tree_root.left, offset);
  93. adjust_ptr((uint8**)&heap->kfc_tree_root.right, offset);
  94. adjust_ptr((uint8**)&heap->kfc_tree_root.parent, offset);
  95. cur = (hmu_t*)heap->base_addr;
  96. end = (hmu_t*)((char*)heap->base_addr + heap->current_size);
  97. while (cur < end) {
  98. size = hmu_get_size(cur);
  99. bh_assert(size > 0);
  100. if (!HMU_IS_FC_NORMAL(size)) {
  101. tree_node = (hmu_tree_node_t *)cur;
  102. adjust_ptr((uint8**)&tree_node->left, offset);
  103. adjust_ptr((uint8**)&tree_node->right, offset);
  104. adjust_ptr((uint8**)&tree_node->parent, offset);
  105. }
  106. cur = (hmu_t*)((char *)cur + size);
  107. }
  108. bh_assert(cur == end);
  109. return 0;
  110. }
  111. int
  112. gc_reinit_lock(gc_handle_t handle)
  113. {
  114. gc_heap_t *heap = (gc_heap_t *) handle;
  115. return os_mutex_init(&heap->lock);
  116. }
  117. void
  118. gc_destroy_lock(gc_handle_t handle)
  119. {
  120. gc_heap_t *heap = (gc_heap_t *) handle;
  121. os_mutex_destroy(&heap->lock);
  122. }
  123. #if BH_ENABLE_GC_VERIFY != 0
  124. void
  125. gci_verify_heap(gc_heap_t *heap)
  126. {
  127. hmu_t *cur = NULL, *end = NULL;
  128. bh_assert(heap && gci_is_heap_valid(heap));
  129. cur = (hmu_t *)heap->base_addr;
  130. end = (hmu_t *)(heap->base_addr + heap->current_size);
  131. while(cur < end)
  132. {
  133. hmu_verify(cur);
  134. cur = (hmu_t *)((gc_uint8*)cur + hmu_get_size(cur));
  135. }
  136. bh_assert(cur == end);
  137. }
  138. #endif
  139. void *
  140. gc_heap_stats(void *heap_arg, uint32* stats, int size)
  141. {
  142. int i;
  143. gc_heap_t *heap = (gc_heap_t *) heap_arg;
  144. for (i = 0; i < size; i++) {
  145. switch (i) {
  146. case GC_STAT_TOTAL:
  147. stats[i] = heap->current_size;
  148. break;
  149. case GC_STAT_FREE:
  150. stats[i] = heap->total_free_size;
  151. break;
  152. case GC_STAT_HIGHMARK:
  153. stats[i] = heap->highmark_size;
  154. break;
  155. default:
  156. break;
  157. }
  158. }
  159. return heap;
  160. }