ems_kfc.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "ems_gc_internal.h"
  6. static gc_handle_t
  7. gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
  8. {
  9. hmu_tree_node_t *root = NULL, *q = NULL;
  10. int ret;
  11. memset(heap, 0, sizeof *heap);
  12. memset(base_addr, 0, heap_max_size);
  13. ret = os_mutex_init(&heap->lock);
  14. if (ret != BHT_OK) {
  15. os_printf("[GC_ERROR]failed to init lock\n");
  16. return NULL;
  17. }
  18. /* init all data structures*/
  19. heap->current_size = heap_max_size;
  20. heap->base_addr = (gc_uint8 *)base_addr;
  21. heap->heap_id = (gc_handle_t)heap;
  22. heap->total_free_size = heap->current_size;
  23. heap->highmark_size = 0;
  24. root = &heap->kfc_tree_root;
  25. memset(root, 0, sizeof *root);
  26. root->size = sizeof *root;
  27. hmu_set_ut(&root->hmu_header, HMU_FC);
  28. hmu_set_size(&root->hmu_header, sizeof *root);
  29. q = (hmu_tree_node_t *)heap->base_addr;
  30. memset(q, 0, sizeof *q);
  31. hmu_set_ut(&q->hmu_header, HMU_FC);
  32. hmu_set_size(&q->hmu_header, heap->current_size);
  33. hmu_mark_pinuse(&q->hmu_header);
  34. root->right = q;
  35. q->parent = root;
  36. q->size = heap->current_size;
  37. bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE);
  38. return heap;
  39. }
  40. gc_handle_t
  41. gc_init_with_pool(char *buf, gc_size_t buf_size)
  42. {
  43. char *buf_end = buf + buf_size;
  44. char *buf_aligned = (char *)(((uintptr_t)buf + 7) & (uintptr_t)~7);
  45. char *base_addr = buf_aligned + sizeof(gc_heap_t);
  46. gc_heap_t *heap = (gc_heap_t *)buf_aligned;
  47. gc_size_t heap_max_size;
  48. if (buf_size < APP_HEAP_SIZE_MIN) {
  49. os_printf("[GC_ERROR]heap init buf size (%" PRIu32 ") < %" PRIu32 "\n",
  50. buf_size, (uint32)APP_HEAP_SIZE_MIN);
  51. return NULL;
  52. }
  53. base_addr =
  54. (char *)(((uintptr_t)base_addr + 7) & (uintptr_t)~7) + GC_HEAD_PADDING;
  55. heap_max_size = (uint32)(buf_end - base_addr) & (uint32)~7;
  56. #if WASM_ENABLE_MEMORY_TRACING != 0
  57. os_printf("Heap created, total size: %u\n", buf_size);
  58. os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
  59. os_printf(" actual heap size: %u\n", heap_max_size);
  60. os_printf(" padding bytes: %u\n",
  61. buf_size - sizeof(gc_heap_t) - heap_max_size);
  62. #endif
  63. return gc_init_internal(heap, base_addr, heap_max_size);
  64. }
  65. gc_handle_t
  66. gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
  67. char *pool_buf, gc_size_t pool_buf_size)
  68. {
  69. gc_heap_t *heap = (gc_heap_t *)struct_buf;
  70. char *base_addr = pool_buf + GC_HEAD_PADDING;
  71. char *pool_buf_end = pool_buf + pool_buf_size;
  72. gc_size_t heap_max_size;
  73. if ((((uintptr_t)struct_buf) & 7) != 0) {
  74. os_printf("[GC_ERROR]heap init struct buf not 8-byte aligned\n");
  75. return NULL;
  76. }
  77. if (struct_buf_size < sizeof(gc_handle_t)) {
  78. os_printf("[GC_ERROR]heap init struct buf size (%" PRIu32 ") < %zu\n",
  79. struct_buf_size, sizeof(gc_handle_t));
  80. return NULL;
  81. }
  82. if ((((uintptr_t)pool_buf) & 7) != 0) {
  83. os_printf("[GC_ERROR]heap init pool buf not 8-byte aligned\n");
  84. return NULL;
  85. }
  86. if (pool_buf_size < APP_HEAP_SIZE_MIN) {
  87. os_printf("[GC_ERROR]heap init buf size (%" PRIu32 ") < %u\n",
  88. pool_buf_size, APP_HEAP_SIZE_MIN);
  89. return NULL;
  90. }
  91. heap_max_size = (uint32)(pool_buf_end - base_addr) & (uint32)~7;
  92. #if WASM_ENABLE_MEMORY_TRACING != 0
  93. os_printf("Heap created, total size: %u\n",
  94. struct_buf_size + pool_buf_size);
  95. os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
  96. os_printf(" actual heap size: %u\n", heap_max_size);
  97. os_printf(" padding bytes: %u\n", pool_buf_size - heap_max_size);
  98. #endif
  99. return gc_init_internal(heap, base_addr, heap_max_size);
  100. }
  101. int
  102. gc_destroy_with_pool(gc_handle_t handle)
  103. {
  104. gc_heap_t *heap = (gc_heap_t *)handle;
  105. #if BH_ENABLE_GC_VERIFY != 0
  106. hmu_t *cur = (hmu_t *)heap->base_addr;
  107. hmu_t *end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  108. if (!heap->is_heap_corrupted
  109. && (hmu_t *)((char *)cur + hmu_get_size(cur)) != end) {
  110. os_printf("Memory leak detected:\n");
  111. gci_dump(heap);
  112. #if WASM_ENABLE_SPEC_TEST != 0
  113. while (1) {
  114. }
  115. #endif
  116. }
  117. #endif
  118. os_mutex_destroy(&heap->lock);
  119. memset(heap->base_addr, 0, heap->current_size);
  120. memset(heap, 0, sizeof(gc_heap_t));
  121. return GC_SUCCESS;
  122. }
  123. uint32
  124. gc_get_heap_struct_size()
  125. {
  126. return sizeof(gc_heap_t);
  127. }
  128. static void
  129. adjust_ptr(uint8 **p_ptr, intptr_t offset)
  130. {
  131. if (*p_ptr)
  132. *p_ptr += offset;
  133. }
  134. int
  135. gc_migrate(gc_handle_t handle, char *pool_buf_new, gc_size_t pool_buf_size)
  136. {
  137. gc_heap_t *heap = (gc_heap_t *)handle;
  138. char *base_addr_new = pool_buf_new + GC_HEAD_PADDING;
  139. char *pool_buf_end = pool_buf_new + pool_buf_size;
  140. intptr_t offset = (uint8 *)base_addr_new - (uint8 *)heap->base_addr;
  141. hmu_t *cur = NULL, *end = NULL;
  142. hmu_tree_node_t *tree_node;
  143. gc_size_t heap_max_size, size;
  144. if ((((uintptr_t)pool_buf_new) & 7) != 0) {
  145. os_printf("[GC_ERROR]heap migrate pool buf not 8-byte aligned\n");
  146. return GC_ERROR;
  147. }
  148. heap_max_size = (uint32)(pool_buf_end - base_addr_new) & (uint32)~7;
  149. if (pool_buf_end < base_addr_new || heap_max_size < heap->current_size) {
  150. os_printf("[GC_ERROR]heap migrate invlaid pool buf size\n");
  151. return GC_ERROR;
  152. }
  153. if (offset == 0)
  154. return 0;
  155. if (heap->is_heap_corrupted) {
  156. os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  157. return GC_ERROR;
  158. }
  159. heap->base_addr = (uint8 *)base_addr_new;
  160. adjust_ptr((uint8 **)&heap->kfc_tree_root.left, offset);
  161. adjust_ptr((uint8 **)&heap->kfc_tree_root.right, offset);
  162. adjust_ptr((uint8 **)&heap->kfc_tree_root.parent, offset);
  163. cur = (hmu_t *)heap->base_addr;
  164. end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
  165. while (cur < end) {
  166. size = hmu_get_size(cur);
  167. if (size <= 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
  168. os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  169. heap->is_heap_corrupted = true;
  170. return GC_ERROR;
  171. }
  172. if (hmu_get_ut(cur) == HMU_FC && !HMU_IS_FC_NORMAL(size)) {
  173. tree_node = (hmu_tree_node_t *)cur;
  174. adjust_ptr((uint8 **)&tree_node->left, offset);
  175. adjust_ptr((uint8 **)&tree_node->right, offset);
  176. if (tree_node->parent != &heap->kfc_tree_root)
  177. /* The root node belongs to heap structure,
  178. it is fixed part and isn't changed. */
  179. adjust_ptr((uint8 **)&tree_node->parent, offset);
  180. }
  181. cur = (hmu_t *)((char *)cur + size);
  182. }
  183. if (cur != end) {
  184. os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
  185. heap->is_heap_corrupted = true;
  186. return GC_ERROR;
  187. }
  188. return 0;
  189. }
  190. bool
  191. gc_is_heap_corrupted(gc_handle_t handle)
  192. {
  193. gc_heap_t *heap = (gc_heap_t *)handle;
  194. return heap->is_heap_corrupted ? true : false;
  195. }
  196. #if BH_ENABLE_GC_VERIFY != 0
  197. void
  198. gci_verify_heap(gc_heap_t *heap)
  199. {
  200. hmu_t *cur = NULL, *end = NULL;
  201. bh_assert(heap && gci_is_heap_valid(heap));
  202. cur = (hmu_t *)heap->base_addr;
  203. end = (hmu_t *)(heap->base_addr + heap->current_size);
  204. while (cur < end) {
  205. hmu_verify(heap, cur);
  206. cur = (hmu_t *)((gc_uint8 *)cur + hmu_get_size(cur));
  207. }
  208. bh_assert(cur == end);
  209. }
  210. #endif
  211. void *
  212. gc_heap_stats(void *heap_arg, uint32 *stats, int size)
  213. {
  214. int i;
  215. gc_heap_t *heap = (gc_heap_t *)heap_arg;
  216. for (i = 0; i < size; i++) {
  217. switch (i) {
  218. case GC_STAT_TOTAL:
  219. stats[i] = heap->current_size;
  220. break;
  221. case GC_STAT_FREE:
  222. stats[i] = heap->total_free_size;
  223. break;
  224. case GC_STAT_HIGHMARK:
  225. stats[i] = heap->highmark_size;
  226. break;
  227. default:
  228. break;
  229. }
  230. }
  231. return heap;
  232. }