ems_gc_internal.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #ifndef _EMS_GC_INTERNAL_H
  6. #define _EMS_GC_INTERNAL_H
  7. #ifdef __cplusplus
  8. extern "C" {
  9. #endif
  10. #include "bh_platform.h"
  11. #include "ems_gc.h"
  12. /* HMU (heap memory unit) basic block type */
  13. typedef enum hmu_type_enum {
  14. HMU_TYPE_MIN = 0,
  15. HMU_TYPE_MAX = 3,
  16. HMU_WO = 3, /* WASM Object */
  17. HMU_VO = 2, /* VM Object */
  18. HMU_FC = 1,
  19. HMU_FM = 0
  20. } hmu_type_t;
  21. typedef struct hmu_struct {
  22. gc_uint32 header;
  23. } hmu_t;
  24. #if BH_ENABLE_GC_VERIFY != 0
  25. #if UINTPTR_MAX > UINT32_MAX
  26. /* 2 prefix paddings for 64-bit pointer */
  27. #define GC_OBJECT_PREFIX_PADDING_CNT 2
  28. #else
  29. /* 3 prefix paddings for 32-bit pointer */
  30. #define GC_OBJECT_PREFIX_PADDING_CNT 3
  31. #endif
  32. #define GC_OBJECT_SUFFIX_PADDING_CNT 4
  33. #define GC_OBJECT_PADDING_VALUE (0x12345678)
  34. typedef struct gc_object_prefix {
  35. const char *file_name;
  36. gc_int32 line_no;
  37. gc_int32 size;
  38. gc_uint32 padding[GC_OBJECT_PREFIX_PADDING_CNT];
  39. } gc_object_prefix_t;
  40. typedef struct gc_object_suffix {
  41. gc_uint32 padding[GC_OBJECT_SUFFIX_PADDING_CNT];
  42. } gc_object_suffix_t;
  43. #define OBJ_PREFIX_SIZE (sizeof(gc_object_prefix_t))
  44. #define OBJ_SUFFIX_SIZE (sizeof(gc_object_suffix_t))
  45. void
  46. hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
  47. const char *file_name, int line_no);
  48. void
  49. hmu_verify(void *vheap, hmu_t *hmu);
  50. #define SKIP_OBJ_PREFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_PREFIX_SIZE))
  51. #define SKIP_OBJ_SUFFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_SUFFIX_SIZE))
  52. #define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
  53. #else /* else of BH_ENABLE_GC_VERIFY */
  54. #define OBJ_PREFIX_SIZE 0
  55. #define OBJ_SUFFIX_SIZE 0
  56. #define SKIP_OBJ_PREFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_PREFIX_SIZE))
  57. #define SKIP_OBJ_SUFFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_SUFFIX_SIZE))
  58. #define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
  59. #endif /* end of BH_ENABLE_GC_VERIFY */
  60. #define hmu_obj_size(s) ((s)-OBJ_EXTRA_SIZE)
  61. #define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7)
  62. #define GC_SMALLEST_SIZE \
  63. GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8)
  64. #define GC_GET_REAL_SIZE(x) \
  65. GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \
  66. + (((x) > 8) ? (x) : 8))
  67. /**
  68. * hmu bit operation
  69. */
  70. #define SETBIT(v, offset) (v) |= ((uint32)1 << (offset))
  71. #define GETBIT(v, offset) ((v) & ((uint32)1 << (offset)) ? 1 : 0)
  72. #define CLRBIT(v, offset) (v) &= (~((uint32)1 << (offset)))
  73. /* clang-format off */
  74. #define SETBITS(v, offset, size, value) \
  75. do { \
  76. (v) &= ~((((uint32)1 << size) - 1) << offset); \
  77. (v) |= ((uint32)value << offset); \
  78. } while (0)
  79. #define CLRBITS(v, offset, size) \
  80. (v) &= ~((((uint32)1 << size) - 1) << offset)
  81. #define GETBITS(v, offset, size) \
  82. (((v) & (((((uint32)1 << size) - 1) << offset))) >> offset)
  83. /* clang-format on */
  84. /**
  85. * gc object layout definition
  86. */
  87. #define HMU_SIZE (sizeof(hmu_t))
  88. #define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t *)(hmu) + 1))
  89. #define obj_to_hmu(obj) ((hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1)
  90. #define HMU_UT_SIZE 2
  91. #define HMU_UT_OFFSET 30
  92. /* clang-format off */
  93. #define hmu_get_ut(hmu) \
  94. GETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE)
  95. #define hmu_set_ut(hmu, type) \
  96. SETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE, type)
  97. #define hmu_is_ut_valid(tp) \
  98. (tp >= HMU_TYPE_MIN && tp <= HMU_TYPE_MAX)
  99. /* clang-format on */
  100. /* P in use bit means the previous chunk is in use */
  101. #define HMU_P_OFFSET 29
  102. #define hmu_mark_pinuse(hmu) SETBIT((hmu)->header, HMU_P_OFFSET)
  103. #define hmu_unmark_pinuse(hmu) CLRBIT((hmu)->header, HMU_P_OFFSET)
  104. #define hmu_get_pinuse(hmu) GETBIT((hmu)->header, HMU_P_OFFSET)
  105. #define HMU_WO_VT_SIZE 27
  106. #define HMU_WO_VT_OFFSET 0
  107. #define HMU_WO_MB_OFFSET 28
  108. #define hmu_mark_wo(hmu) SETBIT((hmu)->header, HMU_WO_MB_OFFSET)
  109. #define hmu_unmark_wo(hmu) CLRBIT((hmu)->header, HMU_WO_MB_OFFSET)
  110. #define hmu_is_wo_marked(hmu) GETBIT((hmu)->header, HMU_WO_MB_OFFSET)
  111. /**
  112. * The hmu size is divisible by 8, its lowest 3 bits are 0, so we only
  113. * store its higher bits of bit [29..3], and bit [2..0] are not stored.
  114. * After that, the maximal heap size can be enlarged from (1<<27) = 128MB
  115. * to (1<<27) * 8 = 1GB.
  116. */
  117. #define HMU_SIZE_SIZE 27
  118. #define HMU_SIZE_OFFSET 0
  119. #define HMU_VO_FB_OFFSET 28
  120. #define hmu_is_vo_freed(hmu) GETBIT((hmu)->header, HMU_VO_FB_OFFSET)
  121. #define hmu_unfree_vo(hmu) CLRBIT((hmu)->header, HMU_VO_FB_OFFSET)
  122. #define hmu_get_size(hmu) \
  123. (GETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE) << 3)
  124. #define hmu_set_size(hmu, size) \
  125. SETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, ((size) >> 3))
  126. /**
  127. * HMU free chunk management
  128. */
  129. #ifndef HMU_NORMAL_NODE_CNT
  130. #define HMU_NORMAL_NODE_CNT 32
  131. #endif
  132. #define HMU_FC_NORMAL_MAX_SIZE ((HMU_NORMAL_NODE_CNT - 1) << 3)
  133. #define HMU_IS_FC_NORMAL(size) ((size) < HMU_FC_NORMAL_MAX_SIZE)
  134. #if HMU_FC_NORMAL_MAX_SIZE >= GC_MAX_HEAP_SIZE
  135. #error "Too small GC_MAX_HEAP_SIZE"
  136. #endif
  137. typedef struct hmu_normal_node {
  138. hmu_t hmu_header;
  139. gc_int32 next_offset;
  140. } hmu_normal_node_t;
  141. typedef struct hmu_normal_list {
  142. hmu_normal_node_t *next;
  143. } hmu_normal_list_t;
  144. static inline hmu_normal_node_t *
  145. get_hmu_normal_node_next(hmu_normal_node_t *node)
  146. {
  147. return node->next_offset
  148. ? (hmu_normal_node_t *)((uint8 *)node + node->next_offset)
  149. : NULL;
  150. }
  151. static inline void
  152. set_hmu_normal_node_next(hmu_normal_node_t *node, hmu_normal_node_t *next)
  153. {
  154. if (next) {
  155. bh_assert((uint8 *)next - (uint8 *)node < INT32_MAX);
  156. node->next_offset = (gc_int32)(intptr_t)((uint8 *)next - (uint8 *)node);
  157. }
  158. else {
  159. node->next_offset = 0;
  160. }
  161. }
  162. typedef struct hmu_tree_node {
  163. hmu_t hmu_header;
  164. gc_size_t size;
  165. struct hmu_tree_node *left;
  166. struct hmu_tree_node *right;
  167. struct hmu_tree_node *parent;
  168. } hmu_tree_node_t;
  169. typedef struct gc_heap_struct {
  170. /* for double checking*/
  171. gc_handle_t heap_id;
  172. gc_uint8 *base_addr;
  173. gc_size_t current_size;
  174. korp_mutex lock;
  175. hmu_normal_list_t kfc_normal_list[HMU_NORMAL_NODE_CNT];
  176. /* order in kfc_tree is: size[left] <= size[cur] < size[right]*/
  177. hmu_tree_node_t kfc_tree_root;
  178. #if WASM_ENABLE_GC != 0
  179. /* for rootset enumeration of private heap*/
  180. void *root_set;
  181. #if WASM_ENABLE_THREAD_MGR == 0
  182. /* exec_env of current wasm module instance */
  183. void *exec_env;
  184. #else
  185. /* thread cluster of current module instances */
  186. void *cluster;
  187. #endif
  188. /* whether the fast mode of marking process that requires
  189. additional memory fails. When the fast mode fails, the
  190. marking process can still be done in the slow mode, which
  191. doesn't need additional memory (by walking through all
  192. blocks and marking sucessors of marked nodes until no new
  193. node is marked). TODO: slow mode is not implemented. */
  194. unsigned is_fast_marking_failed : 1;
  195. /* whether the heap is doing reclaim */
  196. unsigned is_doing_reclaim : 1;
  197. /* Whether the heap can do reclaim */
  198. unsigned is_reclaim_enabled : 1;
  199. #endif
  200. /* whether heap is corrupted, e.g. the hmu nodes are modified
  201. by user */
  202. bool is_heap_corrupted;
  203. gc_size_t init_size;
  204. gc_size_t highmark_size;
  205. gc_size_t total_free_size;
  206. #if WASM_ENABLE_GC != 0
  207. gc_size_t gc_threshold;
  208. gc_size_t gc_threshold_factor;
  209. gc_size_t total_gc_count;
  210. gc_size_t total_gc_time;
  211. #endif
  212. #if GC_STAT_DATA != 0
  213. gc_uint64 total_size_allocated;
  214. gc_uint64 total_size_freed;
  215. #endif
  216. } gc_heap_t;
  217. #if WASM_ENABLE_GC != 0
  218. #define GC_DEFAULT_THRESHOLD_FACTOR 300
  219. static inline void
  220. gc_update_threshold(gc_heap_t *heap)
  221. {
  222. heap->gc_threshold =
  223. heap->total_free_size * heap->gc_threshold_factor / 1000;
  224. }
  225. #define gct_vm_mutex_init os_mutex_init
  226. #define gct_vm_mutex_destroy os_mutex_destroy
  227. #define gct_vm_mutex_lock os_mutex_lock
  228. #define gct_vm_mutex_unlock os_mutex_unlock
  229. #define gct_vm_gc_prepare wasm_runtime_gc_prepare
  230. #define gct_vm_gc_finished wasm_runtime_gc_finalize
  231. #define gct_vm_begin_rootset_enumeration wasm_runtime_traverse_gc_rootset
  232. #define gct_vm_get_wasm_object_ref_list wasm_runtime_get_wasm_object_ref_list
  233. #endif /* end of WAMS_ENABLE_GC != 0 */
  234. /**
  235. * MISC internal used APIs
  236. */
  237. bool
  238. gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
  239. int
  240. gci_is_heap_valid(gc_heap_t *heap);
  241. /**
  242. * Verify heap integrity
  243. */
  244. void
  245. gci_verify_heap(gc_heap_t *heap);
  246. /**
  247. * Dump heap nodes
  248. */
  249. void
  250. gci_dump(gc_heap_t *heap);
  251. #ifdef __cplusplus
  252. }
  253. #endif
  254. #endif /* end of _EMS_GC_INTERNAL_H */