ems_kfc.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. static gc_handle_t
  7. gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
  8. {
  9. hmu_tree_node_t *root = NULL, *q = NULL;
  10. int ret;
  11. memset(heap, 0, sizeof *heap);
  12. ret = os_mutex_init(&heap->lock);
  13. if (ret != BHT_OK) {
  14. os_printf("[GC_ERROR]failed to init lock\n");
  15. return NULL;
  16. }
  17. /* init all data structures*/
  18. heap->current_size = heap_max_size;
  19. heap->base_addr = (gc_uint8 *)base_addr;
  20. heap->heap_id = (gc_handle_t)heap;
  21. heap->total_free_size = heap->current_size;
  22. heap->highmark_size = 0;
  23. root = heap->kfc_tree_root = (hmu_tree_node_t *)heap->kfc_tree_root_buf;
  24. memset(root, 0, sizeof *root);
  25. root->size = sizeof *root;
  26. hmu_set_ut(&root->hmu_header, HMU_FC);
  27. hmu_set_size(&root->hmu_header, sizeof *root);
  28. q = (hmu_tree_node_t *)heap->base_addr;
  29. memset(q, 0, sizeof *q);
  30. hmu_set_ut(&q->hmu_header, HMU_FC);
  31. hmu_set_size(&q->hmu_header, heap->current_size);
  32. ASSERT_TREE_NODE_ALIGNED_ACCESS(q);
  33. ASSERT_TREE_NODE_ALIGNED_ACCESS(root);
  34. hmu_mark_pinuse(&q->hmu_header);
  35. root->right = q;
  36. q->parent = root;
  37. q->size = heap->current_size;
  38. bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE);
  39. return heap;
  40. }
  41. gc_handle_t
  42. gc_init_with_pool(char *buf, gc_size_t buf_size)
  43. {
  44. char *buf_end = buf + buf_size;
  45. char *buf_aligned = (char *)(((uintptr_t)buf + 7) & (uintptr_t)~7);
  46. char *base_addr = buf_aligned + sizeof(gc_heap_t);
  47. gc_heap_t *heap = (gc_heap_t *)buf_aligned;
  48. gc_size_t heap_max_size;
  49. if (buf_size < APP_HEAP_SIZE_MIN) {
  50. os_printf("[GC_ERROR]heap init buf size (%" PRIu32 ") < %" PRIu32 "\n",
  51. buf_size, (uint32)APP_HEAP_SIZE_MIN);
  52. return NULL;
  53. }
  54. base_addr =
  55. (char *)(((uintptr_t)base_addr + 7) & (uintptr_t)~7) + GC_HEAD_PADDING;
  56. heap_max_size = (uint32)(buf_end - base_addr) & (uint32)~7;
  57. #if WASM_ENABLE_MEMORY_TRACING != 0
  58. os_printf("Heap created, total size: %u\n", buf_size);
  59. os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
  60. os_printf(" actual heap size: %u\n", heap_max_size);
  61. os_printf(" padding bytes: %u\n",
  62. buf_size - sizeof(gc_heap_t) - heap_max_size);
  63. #endif
  64. return gc_init_internal(heap, base_addr, heap_max_size);
  65. }
  66. gc_handle_t
  67. gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
  68. char *pool_buf, gc_size_t pool_buf_size)
  69. {
  70. gc_heap_t *heap = (gc_heap_t *)struct_buf;
  71. char *base_addr = pool_buf + GC_HEAD_PADDING;
  72. char *pool_buf_end = pool_buf + pool_buf_size;
  73. gc_size_t heap_max_size;
  74. if ((((uintptr_t)struct_buf) & 7) != 0) {
  75. os_printf("[GC_ERROR]heap init struct buf not 8-byte aligned\n");
  76. return NULL;
  77. }
  78. if (struct_buf_size < sizeof(gc_handle_t)) {
  79. os_printf("[GC_ERROR]heap init struct buf size (%" PRIu32 ") < %zu\n",
  80. struct_buf_size, sizeof(gc_handle_t));
  81. return NULL;
  82. }
  83. if ((((uintptr_t)pool_buf) & 7) != 0) {
  84. os_printf("[GC_ERROR]heap init pool buf not 8-byte aligned\n");
  85. return NULL;
  86. }
  87. if (pool_buf_size < APP_HEAP_SIZE_MIN) {
  88. os_printf("[GC_ERROR]heap init buf size (%" PRIu32 ") < %u\n",
  89. pool_buf_size, APP_HEAP_SIZE_MIN);
  90. return NULL;
  91. }
  92. heap_max_size = (uint32)(pool_buf_end - base_addr) & (uint32)~7;
  93. #if WASM_ENABLE_MEMORY_TRACING != 0
  94. os_printf("Heap created, total size: %u\n",
  95. struct_buf_size + pool_buf_size);
  96. os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
  97. os_printf(" actual heap size: %u\n", heap_max_size);
  98. os_printf(" padding bytes: %u\n", pool_buf_size - heap_max_size);
  99. #endif
  100. return gc_init_internal(heap, base_addr, heap_max_size);
  101. }
  102. int
  103. gc_destroy_with_pool(gc_handle_t handle)
  104. {
  105. gc_heap_t *heap = (gc_heap_t *)handle;
  106. int ret = GC_SUCCESS;
  107. #if BH_ENABLE_GC_VERIFY != 0
  108. hmu_t *cur = (hmu_t *)heap->base_addr;
  109. hmu_t *end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  110. if (
  111. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  112. !heap->is_heap_corrupted &&
  113. #endif
  114. (hmu_t *)((char *)cur + hmu_get_size(cur)) != end) {
  115. os_printf("Memory leak detected:\n");
  116. gci_dump(heap);
  117. ret = GC_ERROR;
  118. }
  119. #endif
  120. os_mutex_destroy(&heap->lock);
  121. memset(heap, 0, sizeof(gc_heap_t));
  122. return ret;
  123. }
  124. uint32
  125. gc_get_heap_struct_size()
  126. {
  127. return sizeof(gc_heap_t);
  128. }
  129. static void
  130. adjust_ptr(uint8 **p_ptr, intptr_t offset)
  131. {
  132. if (*p_ptr)
  133. *p_ptr = (uint8 *)((intptr_t)(*p_ptr) + offset);
  134. }
  135. int
  136. gc_migrate(gc_handle_t handle, char *pool_buf_new, gc_size_t pool_buf_size)
  137. {
  138. gc_heap_t *heap = (gc_heap_t *)handle;
  139. char *base_addr_new = pool_buf_new + GC_HEAD_PADDING;
  140. char *pool_buf_end = pool_buf_new + pool_buf_size;
  141. intptr_t offset = (uint8 *)base_addr_new - (uint8 *)heap->base_addr;
  142. hmu_t *cur = NULL, *end = NULL;
  143. hmu_tree_node_t *tree_node;
  144. uint8 **p_left, **p_right, **p_parent;
  145. gc_size_t heap_max_size, size;
  146. if ((((uintptr_t)pool_buf_new) & 7) != 0) {
  147. os_printf("[GC_ERROR]heap migrate pool buf not 8-byte aligned\n");
  148. return GC_ERROR;
  149. }
  150. heap_max_size = (uint32)(pool_buf_end - base_addr_new) & (uint32)~7;
  151. if (pool_buf_end < base_addr_new || heap_max_size < heap->current_size) {
  152. os_printf("[GC_ERROR]heap migrate invlaid pool buf size\n");
  153. return GC_ERROR;
  154. }
  155. if (offset == 0)
  156. return 0;
  157. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  158. if (heap->is_heap_corrupted) {
  159. os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  160. return GC_ERROR;
  161. }
  162. #endif
  163. heap->base_addr = (uint8 *)base_addr_new;
  164. ASSERT_TREE_NODE_ALIGNED_ACCESS(heap->kfc_tree_root);
  165. p_left = (uint8 **)((uint8 *)heap->kfc_tree_root
  166. + offsetof(hmu_tree_node_t, left));
  167. p_right = (uint8 **)((uint8 *)heap->kfc_tree_root
  168. + offsetof(hmu_tree_node_t, right));
  169. p_parent = (uint8 **)((uint8 *)heap->kfc_tree_root
  170. + offsetof(hmu_tree_node_t, parent));
  171. adjust_ptr(p_left, offset);
  172. adjust_ptr(p_right, offset);
  173. adjust_ptr(p_parent, offset);
  174. cur = (hmu_t *)heap->base_addr;
  175. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  176. while (cur < end) {
  177. size = hmu_get_size(cur);
  178. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  179. if (size <= 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
  180. os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  181. heap->is_heap_corrupted = true;
  182. return GC_ERROR;
  183. }
  184. #endif
  185. if (hmu_get_ut(cur) == HMU_FC && !HMU_IS_FC_NORMAL(size)) {
  186. tree_node = (hmu_tree_node_t *)cur;
  187. ASSERT_TREE_NODE_ALIGNED_ACCESS(tree_node);
  188. p_left = (uint8 **)((uint8 *)tree_node
  189. + offsetof(hmu_tree_node_t, left));
  190. p_right = (uint8 **)((uint8 *)tree_node
  191. + offsetof(hmu_tree_node_t, right));
  192. p_parent = (uint8 **)((uint8 *)tree_node
  193. + offsetof(hmu_tree_node_t, parent));
  194. adjust_ptr(p_left, offset);
  195. adjust_ptr(p_right, offset);
  196. if (tree_node->parent != heap->kfc_tree_root)
  197. /* The root node belongs to heap structure,
  198. it is fixed part and isn't changed. */
  199. adjust_ptr(p_parent, offset);
  200. }
  201. cur = (hmu_t *)((char *)cur + size);
  202. }
  203. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  204. if (cur != end) {
  205. os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  206. heap->is_heap_corrupted = true;
  207. return GC_ERROR;
  208. }
  209. #else
  210. bh_assert(cur == end);
  211. #endif
  212. return 0;
  213. }
  214. bool
  215. gc_is_heap_corrupted(gc_handle_t handle)
  216. {
  217. #if BH_ENABLE_GC_CORRUPTION_CHECK != 0
  218. gc_heap_t *heap = (gc_heap_t *)handle;
  219. return heap->is_heap_corrupted ? true : false;
  220. #else
  221. return false;
  222. #endif
  223. }
  224. #if BH_ENABLE_GC_VERIFY != 0
  225. void
  226. gci_verify_heap(gc_heap_t *heap)
  227. {
  228. hmu_t *cur = NULL, *end = NULL;
  229. bh_assert(heap && gci_is_heap_valid(heap));
  230. cur = (hmu_t *)heap->base_addr;
  231. end = (hmu_t *)(heap->base_addr + heap->current_size);
  232. while (cur < end) {
  233. hmu_verify(heap, cur);
  234. cur = (hmu_t *)((gc_uint8 *)cur + hmu_get_size(cur));
  235. }
  236. bh_assert(cur == end);
  237. }
  238. #endif
  239. void *
  240. gc_heap_stats(void *heap_arg, uint32 *stats, int size)
  241. {
  242. int i;
  243. gc_heap_t *heap = (gc_heap_t *)heap_arg;
  244. for (i = 0; i < size; i++) {
  245. switch (i) {
  246. case GC_STAT_TOTAL:
  247. stats[i] = heap->current_size;
  248. break;
  249. case GC_STAT_FREE:
  250. stats[i] = heap->total_free_size;
  251. break;
  252. case GC_STAT_HIGHMARK:
  253. stats[i] = heap->highmark_size;
  254. break;
  255. default:
  256. break;
  257. }
  258. }
  259. return heap;
  260. }