|
|
@@ -48,7 +48,9 @@ is_table_64bit(WASMModule *module, uint32 table_idx)
|
|
|
return !!(module->import_tables[table_idx].u.table.table_type.flags
|
|
|
& TABLE64_FLAG);
|
|
|
else
|
|
|
- return !!(module->tables[table_idx].table_type.flags & TABLE64_FLAG);
|
|
|
+ return !!(module->tables[table_idx - module->import_table_count]
|
|
|
+ .table_type.flags
|
|
|
+ & TABLE64_FLAG);
|
|
|
|
|
|
return false;
|
|
|
}
|
|
|
@@ -206,9 +208,14 @@ memory_realloc(void *mem_old, uint32 size_old, uint32 size_new, char *error_buf,
|
|
|
{
|
|
|
uint8 *mem_new;
|
|
|
bh_assert(size_new > size_old);
|
|
|
+
|
|
|
+ if ((mem_new = wasm_runtime_realloc(mem_old, size_new))) {
|
|
|
+ memset(mem_new + size_old, 0, size_new - size_old);
|
|
|
+ return mem_new;
|
|
|
+ }
|
|
|
+
|
|
|
if ((mem_new = loader_malloc(size_new, error_buf, error_buf_size))) {
|
|
|
bh_memcpy_s(mem_new, size_new, mem_old, size_old);
|
|
|
- memset(mem_new + size_old, 0, size_new - size_old);
|
|
|
wasm_runtime_free(mem_old);
|
|
|
}
|
|
|
return mem_new;
|
|
|
@@ -1183,6 +1190,8 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
|
|
|
local_count += sub_local_count;
|
|
|
}
|
|
|
|
|
|
+ bh_assert(p_code_end > p_code && *(p_code_end - 1) == WASM_OP_END);
|
|
|
+
|
|
|
/* Alloc memory, layout: function structure + local types */
|
|
|
code_size = (uint32)(p_code_end - p_code);
|
|
|
|
|
|
@@ -2564,7 +2573,7 @@ get_table_elem_type(const WASMModule *module, uint32 table_idx,
|
|
|
module->import_tables[table_idx].u.table.table_type.elem_type;
|
|
|
else
|
|
|
*p_elem_type =
|
|
|
- module->tables[module->import_table_count + table_idx]
|
|
|
+ module->tables[table_idx - module->import_table_count]
|
|
|
.table_type.elem_type;
|
|
|
}
|
|
|
return true;
|
|
|
@@ -2725,6 +2734,11 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
section = section->next;
|
|
|
}
|
|
|
|
|
|
+#if WASM_ENABLE_BULK_MEMORY != 0
|
|
|
+ bh_assert(!has_datacount_section
|
|
|
+ || module->data_seg_count == module->data_seg_count1);
|
|
|
+#endif
|
|
|
+
|
|
|
module->aux_data_end_global_index = (uint32)-1;
|
|
|
module->aux_heap_base_global_index = (uint32)-1;
|
|
|
module->aux_stack_top_global_index = (uint32)-1;
|
|
|
@@ -2734,6 +2748,12 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
for (i = 0; i < module->export_count; i++, export ++) {
|
|
|
if (export->kind == EXPORT_KIND_GLOBAL) {
|
|
|
if (!strcmp(export->name, "__heap_base")) {
|
|
|
+ if (export->index < module->import_global_count) {
|
|
|
+ LOG_DEBUG("Skip the process if __heap_base is imported "
|
|
|
+ "instead of being a local global");
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
global_index = export->index - module->import_global_count;
|
|
|
global = module->globals + global_index;
|
|
|
if (global->type.val_type == VALUE_TYPE_I32
|
|
|
@@ -2748,6 +2768,12 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
}
|
|
|
}
|
|
|
else if (!strcmp(export->name, "__data_end")) {
|
|
|
+ if (export->index < module->import_global_count) {
|
|
|
+ LOG_DEBUG("Skip the process if __data_end is imported "
|
|
|
+ "instead of being a local global");
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
global_index = export->index - module->import_global_count;
|
|
|
global = module->globals + global_index;
|
|
|
if (global->type.val_type == VALUE_TYPE_I32
|
|
|
@@ -2942,9 +2968,7 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
}
|
|
|
|
|
|
if (!module->possible_memory_grow) {
|
|
|
- WASMMemoryImport *memory_import;
|
|
|
- WASMMemory *memory;
|
|
|
-
|
|
|
+#if WASM_ENABLE_SHRUNK_MEMORY != 0
|
|
|
if (aux_data_end_global && aux_heap_base_global
|
|
|
&& aux_stack_top_global) {
|
|
|
uint64 init_memory_size;
|
|
|
@@ -2954,7 +2978,8 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
* valid range of uint32 */
|
|
|
if (shrunk_memory_size <= UINT32_MAX) {
|
|
|
if (module->import_memory_count) {
|
|
|
- memory_import = &module->import_memories[0].u.memory;
|
|
|
+ WASMMemoryImport *memory_import =
|
|
|
+ &module->import_memories[0].u.memory;
|
|
|
init_memory_size =
|
|
|
(uint64)memory_import->mem_type.num_bytes_per_page
|
|
|
* memory_import->mem_type.init_page_count;
|
|
|
@@ -2969,7 +2994,7 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
}
|
|
|
|
|
|
if (module->memory_count) {
|
|
|
- memory = &module->memories[0];
|
|
|
+ WASMMemory *memory = &module->memories[0];
|
|
|
init_memory_size = (uint64)memory->num_bytes_per_page
|
|
|
* memory->init_page_count;
|
|
|
if (shrunk_memory_size <= init_memory_size) {
|
|
|
@@ -2982,9 +3007,11 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+#endif /* WASM_ENABLE_SHRUNK_MEMORY != 0 */
|
|
|
|
|
|
if (module->import_memory_count) {
|
|
|
- memory_import = &module->import_memories[0].u.memory;
|
|
|
+ WASMMemoryImport *memory_import =
|
|
|
+ &module->import_memories[0].u.memory;
|
|
|
if (memory_import->mem_type.init_page_count < DEFAULT_MAX_PAGES) {
|
|
|
memory_import->mem_type.num_bytes_per_page *=
|
|
|
memory_import->mem_type.init_page_count;
|
|
|
@@ -2998,7 +3025,7 @@ load_from_sections(WASMModule *module, WASMSection *sections,
|
|
|
}
|
|
|
|
|
|
if (module->memory_count) {
|
|
|
- memory = &module->memories[0];
|
|
|
+ WASMMemory *memory = &module->memories[0];
|
|
|
if (memory->init_page_count < DEFAULT_MAX_PAGES) {
|
|
|
memory->num_bytes_per_page *= memory->init_page_count;
|
|
|
if (memory->init_page_count > 0)
|
|
|
@@ -4030,11 +4057,16 @@ typedef struct WASMLoaderContext {
|
|
|
/* preserved local offset */
|
|
|
int16 preserved_local_offset;
|
|
|
|
|
|
- /* const buffer */
|
|
|
- uint8 *const_buf;
|
|
|
- uint16 num_const;
|
|
|
- uint16 const_cell_num;
|
|
|
- uint32 const_buf_size;
|
|
|
+ /* const buffer for i64 and f64 consts, note that the raw bytes
|
|
|
+ * of i64 and f64 are the same, so we read an i64 value from an
|
|
|
+ * f64 const with its raw bytes, something like `*(int64 *)&f64 */
|
|
|
+ int64 *i64_consts;
|
|
|
+ uint32 i64_const_max_num;
|
|
|
+ uint32 i64_const_num;
|
|
|
+ /* const buffer for i32 and f32 consts */
|
|
|
+ int32 *i32_consts;
|
|
|
+ uint32 i32_const_max_num;
|
|
|
+ uint32 i32_const_num;
|
|
|
|
|
|
/* processed code */
|
|
|
uint8 *p_code_compiled;
|
|
|
@@ -4047,12 +4079,6 @@ typedef struct WASMLoaderContext {
|
|
|
#endif
|
|
|
} WASMLoaderContext;
|
|
|
|
|
|
-typedef struct Const {
|
|
|
- WASMValue value;
|
|
|
- uint16 slot_index;
|
|
|
- uint8 value_type;
|
|
|
-} Const;
|
|
|
-
|
|
|
#define CHECK_CSP_PUSH() \
|
|
|
do { \
|
|
|
if (ctx->frame_csp >= ctx->frame_csp_boundary) { \
|
|
|
@@ -4207,8 +4233,10 @@ wasm_loader_ctx_destroy(WASMLoaderContext *ctx)
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
if (ctx->frame_offset_bottom)
|
|
|
wasm_runtime_free(ctx->frame_offset_bottom);
|
|
|
- if (ctx->const_buf)
|
|
|
- wasm_runtime_free(ctx->const_buf);
|
|
|
+ if (ctx->i64_consts)
|
|
|
+ wasm_runtime_free(ctx->i64_consts);
|
|
|
+ if (ctx->i32_consts)
|
|
|
+ wasm_runtime_free(ctx->i32_consts);
|
|
|
#endif
|
|
|
wasm_runtime_free(ctx);
|
|
|
}
|
|
|
@@ -4242,10 +4270,15 @@ wasm_loader_ctx_init(WASMFunction *func, char *error_buf, uint32 error_buf_size)
|
|
|
goto fail;
|
|
|
loader_ctx->frame_offset_boundary = loader_ctx->frame_offset_bottom + 32;
|
|
|
|
|
|
- loader_ctx->num_const = 0;
|
|
|
- loader_ctx->const_buf_size = sizeof(Const) * 8;
|
|
|
- if (!(loader_ctx->const_buf = loader_malloc(loader_ctx->const_buf_size,
|
|
|
- error_buf, error_buf_size)))
|
|
|
+ loader_ctx->i64_const_max_num = 8;
|
|
|
+ if (!(loader_ctx->i64_consts =
|
|
|
+ loader_malloc(sizeof(int64) * loader_ctx->i64_const_max_num,
|
|
|
+ error_buf, error_buf_size)))
|
|
|
+ goto fail;
|
|
|
+ loader_ctx->i32_const_max_num = 8;
|
|
|
+ if (!(loader_ctx->i32_consts =
|
|
|
+ loader_malloc(sizeof(int32) * loader_ctx->i32_const_max_num,
|
|
|
+ error_buf, error_buf_size)))
|
|
|
goto fail;
|
|
|
|
|
|
if (func->param_cell_num >= (int32)INT16_MAX - func->local_cell_num) {
|
|
|
@@ -5068,107 +5101,116 @@ wasm_loader_push_pop_frame_ref_offset(WASMLoaderContext *ctx, uint8 pop_cnt,
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+cmp_i64_const(const void *p_i64_const1, const void *p_i64_const2)
|
|
|
+{
|
|
|
+ int64 i64_const1 = *(int64 *)p_i64_const1;
|
|
|
+ int64 i64_const2 = *(int64 *)p_i64_const2;
|
|
|
+
|
|
|
+ return (i64_const1 < i64_const2) ? -1 : (i64_const1 > i64_const2) ? 1 : 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+cmp_i32_const(const void *p_i32_const1, const void *p_i32_const2)
|
|
|
+{
|
|
|
+ int32 i32_const1 = *(int32 *)p_i32_const1;
|
|
|
+ int32 i32_const2 = *(int32 *)p_i32_const2;
|
|
|
+
|
|
|
+ return (i32_const1 < i32_const2) ? -1 : (i32_const1 > i32_const2) ? 1 : 0;
|
|
|
+}
|
|
|
+
|
|
|
static bool
|
|
|
wasm_loader_get_const_offset(WASMLoaderContext *ctx, uint8 type, void *value,
|
|
|
int16 *offset, char *error_buf,
|
|
|
uint32 error_buf_size)
|
|
|
{
|
|
|
- int8 bytes_to_increase;
|
|
|
- int16 operand_offset = 0;
|
|
|
- Const *c;
|
|
|
-
|
|
|
- /* Search existing constant */
|
|
|
- for (c = (Const *)ctx->const_buf;
|
|
|
- (uint8 *)c < ctx->const_buf + ctx->num_const * sizeof(Const); c++) {
|
|
|
- if ((type == c->value_type)
|
|
|
- && ((type == VALUE_TYPE_I64 && *(int64 *)value == c->value.i64)
|
|
|
- || (type == VALUE_TYPE_I32 && *(int32 *)value == c->value.i32)
|
|
|
-#if WASM_ENABLE_REF_TYPES != 0
|
|
|
- || (type == VALUE_TYPE_FUNCREF
|
|
|
- && *(int32 *)value == c->value.i32)
|
|
|
- || (type == VALUE_TYPE_EXTERNREF
|
|
|
- && *(int32 *)value == c->value.i32)
|
|
|
-#endif
|
|
|
- || (type == VALUE_TYPE_F64
|
|
|
- && (0 == memcmp(value, &(c->value.f64), sizeof(float64))))
|
|
|
- || (type == VALUE_TYPE_F32
|
|
|
- && (0
|
|
|
- == memcmp(value, &(c->value.f32), sizeof(float32)))))) {
|
|
|
- operand_offset = c->slot_index;
|
|
|
- break;
|
|
|
- }
|
|
|
- if (c->value_type == VALUE_TYPE_I64 || c->value_type == VALUE_TYPE_F64)
|
|
|
- operand_offset += 2;
|
|
|
- else
|
|
|
- operand_offset += 1;
|
|
|
- }
|
|
|
+ if (!ctx->p_code_compiled) {
|
|
|
+ /* Treat i64 and f64 as the same by reading i64 value from
|
|
|
+ the raw bytes */
|
|
|
+ if (type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64) {
|
|
|
+ /* No slot left, emit const instead */
|
|
|
+ if (ctx->i64_const_num * 2 + ctx->i32_const_num > INT16_MAX - 2) {
|
|
|
+ *offset = 0;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Traverse the list if the const num is small */
|
|
|
+ if (ctx->i64_const_num < 10) {
|
|
|
+ for (uint32 i = 0; i < ctx->i64_const_num; i++) {
|
|
|
+ if (ctx->i64_consts[i] == *(int64 *)value) {
|
|
|
+ *offset = -1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- if ((uint8 *)c == ctx->const_buf + ctx->num_const * sizeof(Const)) {
|
|
|
- /* New constant, append to the const buffer */
|
|
|
- if ((type == VALUE_TYPE_F64) || (type == VALUE_TYPE_I64)) {
|
|
|
- bytes_to_increase = 2;
|
|
|
+ if (ctx->i64_const_num >= ctx->i64_const_max_num) {
|
|
|
+ MEM_REALLOC(ctx->i64_consts,
|
|
|
+ sizeof(int64) * ctx->i64_const_max_num,
|
|
|
+ sizeof(int64) * (ctx->i64_const_max_num * 2));
|
|
|
+ ctx->i64_const_max_num *= 2;
|
|
|
+ }
|
|
|
+ ctx->i64_consts[ctx->i64_const_num++] = *(int64 *)value;
|
|
|
}
|
|
|
else {
|
|
|
- bytes_to_increase = 1;
|
|
|
- }
|
|
|
+ /* Treat i32 and f32 as the same by reading i32 value from
|
|
|
+ the raw bytes */
|
|
|
+ bh_assert(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32);
|
|
|
+
|
|
|
+ /* No slot left, emit const instead */
|
|
|
+ if (ctx->i64_const_num * 2 + ctx->i32_const_num > INT16_MAX - 1) {
|
|
|
+ *offset = 0;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
|
|
|
- /* The max cell num of const buffer is 32768 since the valid index range
|
|
|
- * is -32768 ~ -1. Return an invalid index 0 to indicate the buffer is
|
|
|
- * full */
|
|
|
- if (ctx->const_cell_num > INT16_MAX - bytes_to_increase + 1) {
|
|
|
- *offset = 0;
|
|
|
- return true;
|
|
|
+ /* Traverse the list if the const num is small */
|
|
|
+ if (ctx->i32_const_num < 10) {
|
|
|
+ for (uint32 i = 0; i < ctx->i32_const_num; i++) {
|
|
|
+ if (ctx->i32_consts[i] == *(int32 *)value) {
|
|
|
+ *offset = -1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ctx->i32_const_num >= ctx->i32_const_max_num) {
|
|
|
+ MEM_REALLOC(ctx->i32_consts,
|
|
|
+ sizeof(int32) * ctx->i32_const_max_num,
|
|
|
+ sizeof(int32) * (ctx->i32_const_max_num * 2));
|
|
|
+ ctx->i32_const_max_num *= 2;
|
|
|
+ }
|
|
|
+ ctx->i32_consts[ctx->i32_const_num++] = *(int32 *)value;
|
|
|
}
|
|
|
|
|
|
- if ((uint8 *)c == ctx->const_buf + ctx->const_buf_size) {
|
|
|
- MEM_REALLOC(ctx->const_buf, ctx->const_buf_size,
|
|
|
- ctx->const_buf_size + 4 * sizeof(Const));
|
|
|
- ctx->const_buf_size += 4 * sizeof(Const);
|
|
|
- c = (Const *)(ctx->const_buf + ctx->num_const * sizeof(Const));
|
|
|
+ *offset = -1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ if (type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64) {
|
|
|
+ int64 key = *(int64 *)value, *i64_const;
|
|
|
+ i64_const = bsearch(&key, ctx->i64_consts, ctx->i64_const_num,
|
|
|
+ sizeof(int64), cmp_i64_const);
|
|
|
+ if (!i64_const) { /* not found, emit const instead */
|
|
|
+ *offset = 0;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ *offset = -(uint32)(ctx->i64_const_num * 2 + ctx->i32_const_num)
|
|
|
+ + (uint32)(i64_const - ctx->i64_consts) * 2;
|
|
|
}
|
|
|
- c->value_type = type;
|
|
|
- switch (type) {
|
|
|
- case VALUE_TYPE_F64:
|
|
|
- bh_memcpy_s(&(c->value.f64), sizeof(WASMValue), value,
|
|
|
- sizeof(float64));
|
|
|
- ctx->const_cell_num += 2;
|
|
|
- /* The const buf will be reversed, we use the second cell */
|
|
|
- /* of the i64/f64 const so the finnal offset is corrent */
|
|
|
- operand_offset++;
|
|
|
- break;
|
|
|
- case VALUE_TYPE_I64:
|
|
|
- c->value.i64 = *(int64 *)value;
|
|
|
- ctx->const_cell_num += 2;
|
|
|
- operand_offset++;
|
|
|
- break;
|
|
|
- case VALUE_TYPE_F32:
|
|
|
- bh_memcpy_s(&(c->value.f32), sizeof(WASMValue), value,
|
|
|
- sizeof(float32));
|
|
|
- ctx->const_cell_num++;
|
|
|
- break;
|
|
|
- case VALUE_TYPE_I32:
|
|
|
- c->value.i32 = *(int32 *)value;
|
|
|
- ctx->const_cell_num++;
|
|
|
- break;
|
|
|
-#if WASM_ENABLE_REF_TYPES != 0
|
|
|
- case VALUE_TYPE_EXTERNREF:
|
|
|
- case VALUE_TYPE_FUNCREF:
|
|
|
- c->value.i32 = *(int32 *)value;
|
|
|
- ctx->const_cell_num++;
|
|
|
- break;
|
|
|
-#endif
|
|
|
- default:
|
|
|
- break;
|
|
|
+ else {
|
|
|
+ int32 key = *(int32 *)value, *i32_const;
|
|
|
+ i32_const = bsearch(&key, ctx->i32_consts, ctx->i32_const_num,
|
|
|
+ sizeof(int32), cmp_i32_const);
|
|
|
+ if (!i32_const) { /* not found, emit const instead */
|
|
|
+ *offset = 0;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ *offset = -(uint32)(ctx->i32_const_num)
|
|
|
+ + (uint32)(i32_const - ctx->i32_consts);
|
|
|
}
|
|
|
- c->slot_index = operand_offset;
|
|
|
- ctx->num_const++;
|
|
|
- LOG_OP("#### new const [%d]: %ld\n", ctx->num_const,
|
|
|
- (int64)c->value.i64);
|
|
|
- }
|
|
|
- /* use negetive index for const */
|
|
|
- operand_offset = -(operand_offset + 1);
|
|
|
- *offset = operand_offset;
|
|
|
- return true;
|
|
|
+
|
|
|
+ return true;
|
|
|
+ }
|
|
|
fail:
|
|
|
return false;
|
|
|
}
|
|
|
@@ -5559,7 +5601,12 @@ reserve_block_ret(WASMLoaderContext *loader_ctx, uint8 opcode,
|
|
|
else {
|
|
|
loader_ctx->frame_offset = frame_offset;
|
|
|
loader_ctx->dynamic_offset = dynamic_offset;
|
|
|
- PUSH_OFFSET_TYPE(return_types[i]);
|
|
|
+ if (!(wasm_loader_push_frame_offset(
|
|
|
+ loader_ctx, return_types[i], disable_emit,
|
|
|
+ operand_offset, error_buf, error_buf_size))) {
|
|
|
+ wasm_runtime_free(emit_data);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
wasm_loader_emit_backspace(loader_ctx, sizeof(int16));
|
|
|
loader_ctx->frame_offset = frame_offset_org;
|
|
|
loader_ctx->dynamic_offset = dynamic_offset_org;
|
|
|
@@ -5590,13 +5637,6 @@ fail:
|
|
|
|
|
|
#endif /* WASM_ENABLE_FAST_INTERP */
|
|
|
|
|
|
-#define RESERVE_BLOCK_RET() \
|
|
|
- do { \
|
|
|
- if (!reserve_block_ret(loader_ctx, opcode, disable_emit, error_buf, \
|
|
|
- error_buf_size)) \
|
|
|
- goto fail; \
|
|
|
- } while (0)
|
|
|
-
|
|
|
#define PUSH_TYPE(type) \
|
|
|
do { \
|
|
|
if (!(wasm_loader_push_frame_ref(loader_ctx, type, error_buf, \
|
|
|
@@ -6033,6 +6073,86 @@ fail:
|
|
|
} \
|
|
|
} while (0)
|
|
|
|
|
|
+#if WASM_ENABLE_FAST_INTERP == 0
|
|
|
+
|
|
|
+#define pb_read_leb_uint32 read_leb_uint32
|
|
|
+#define pb_read_leb_int32 read_leb_int32
|
|
|
+#define pb_read_leb_int64 read_leb_int64
|
|
|
+#define pb_read_leb_memarg read_leb_memarg
|
|
|
+#define pb_read_leb_mem_offset read_leb_mem_offset
|
|
|
+#define pb_read_leb_memidx read_leb_memidx
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+/* Read leb without malformed format check */
|
|
|
+static uint64
|
|
|
+read_leb_quick(uint8 **p_buf, uint32 maxbits, bool sign)
|
|
|
+{
|
|
|
+ uint8 *buf = *p_buf;
|
|
|
+ uint64 result = 0, byte = 0;
|
|
|
+ uint32 shift = 0;
|
|
|
+
|
|
|
+ do {
|
|
|
+ byte = *buf++;
|
|
|
+ result |= ((byte & 0x7f) << shift);
|
|
|
+ shift += 7;
|
|
|
+ } while (byte & 0x80);
|
|
|
+
|
|
|
+ if (sign && (shift < maxbits) && (byte & 0x40)) {
|
|
|
+ /* Sign extend */
|
|
|
+ result |= (~((uint64)0)) << shift;
|
|
|
+ }
|
|
|
+
|
|
|
+ *p_buf = buf;
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
+#define pb_read_leb_uint32(p, p_end, res) \
|
|
|
+ do { \
|
|
|
+ if (!loader_ctx->p_code_compiled) \
|
|
|
+ /* Enable format check in the first scan */ \
|
|
|
+ read_leb_uint32(p, p_end, res); \
|
|
|
+ else \
|
|
|
+ /* Disable format check in the second scan */ \
|
|
|
+ res = (uint32)read_leb_quick(&p, 32, false); \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
+#define pb_read_leb_int32(p, p_end, res) \
|
|
|
+ do { \
|
|
|
+ if (!loader_ctx->p_code_compiled) \
|
|
|
+ /* Enable format check in the first scan */ \
|
|
|
+ read_leb_int32(p, p_end, res); \
|
|
|
+ else \
|
|
|
+ /* Disable format check in the second scan */ \
|
|
|
+ res = (int32)read_leb_quick(&p, 32, true); \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
+#define pb_read_leb_int64(p, p_end, res) \
|
|
|
+ do { \
|
|
|
+ if (!loader_ctx->p_code_compiled) \
|
|
|
+ /* Enable format check in the first scan */ \
|
|
|
+ read_leb_int64(p, p_end, res); \
|
|
|
+ else \
|
|
|
+ /* Disable format check in the second scan */ \
|
|
|
+ res = (int64)read_leb_quick(&p, 64, true); \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
+#if WASM_ENABLE_MULTI_MEMORY != 0
|
|
|
+#define pb_read_leb_memarg read_leb_memarg
|
|
|
+#else
|
|
|
+#define pb_read_leb_memarg pb_read_leb_uint32
|
|
|
+#endif
|
|
|
+
|
|
|
+#if WASM_ENABLE_MEMORY64 != 0
|
|
|
+#define pb_read_leb_mem_offset read_leb_mem_offset
|
|
|
+#else
|
|
|
+#define pb_read_leb_mem_offset pb_read_leb_uint32
|
|
|
+#endif
|
|
|
+
|
|
|
+#define pb_read_leb_memidx pb_read_leb_uint32
|
|
|
+
|
|
|
+#endif /* end of WASM_ENABLE_FAST_INTERP != 0 */
|
|
|
+
|
|
|
static bool
|
|
|
wasm_loader_prepare_bytecode(WASMModule *module, WASMFunction *func,
|
|
|
uint32 cur_func_idx, char *error_buf,
|
|
|
@@ -6056,11 +6176,9 @@ wasm_loader_prepare_bytecode(WASMModule *module, WASMFunction *func,
|
|
|
uint32 segment_index;
|
|
|
#endif
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
- uint8 *func_const_end, *func_const = NULL;
|
|
|
int16 operand_offset = 0;
|
|
|
uint8 last_op = 0;
|
|
|
bool disable_emit, preserve_local = false, if_condition_available = true;
|
|
|
- ;
|
|
|
float32 f32_const;
|
|
|
float64 f64_const;
|
|
|
|
|
|
@@ -6111,6 +6229,68 @@ re_scan:
|
|
|
p = func->code;
|
|
|
func->code_compiled = loader_ctx->p_code_compiled;
|
|
|
func->code_compiled_size = loader_ctx->code_compiled_size;
|
|
|
+
|
|
|
+ if (loader_ctx->i64_const_num > 0) {
|
|
|
+ int64 *i64_consts_old = loader_ctx->i64_consts;
|
|
|
+
|
|
|
+ /* Sort the i64 consts */
|
|
|
+ qsort(i64_consts_old, loader_ctx->i64_const_num, sizeof(int64),
|
|
|
+ cmp_i64_const);
|
|
|
+
|
|
|
+ /* Remove the duplicated i64 consts */
|
|
|
+ uint32 k = 1;
|
|
|
+ for (i = 1; i < loader_ctx->i64_const_num; i++) {
|
|
|
+ if (i64_consts_old[i] != i64_consts_old[i - 1]) {
|
|
|
+ i64_consts_old[k++] = i64_consts_old[i];
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (k < loader_ctx->i64_const_num) {
|
|
|
+ int64 *i64_consts_new;
|
|
|
+ /* Try to reallocate memory with a smaller size */
|
|
|
+ if ((i64_consts_new =
|
|
|
+ wasm_runtime_malloc((uint32)sizeof(int64) * k))) {
|
|
|
+ bh_memcpy_s(i64_consts_new, (uint32)sizeof(int64) * k,
|
|
|
+ i64_consts_old, (uint32)sizeof(int64) * k);
|
|
|
+ /* Free the old memory */
|
|
|
+ wasm_runtime_free(i64_consts_old);
|
|
|
+ loader_ctx->i64_consts = i64_consts_new;
|
|
|
+ loader_ctx->i64_const_max_num = k;
|
|
|
+ }
|
|
|
+ loader_ctx->i64_const_num = k;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (loader_ctx->i32_const_num > 0) {
|
|
|
+ int32 *i32_consts_old = loader_ctx->i32_consts;
|
|
|
+
|
|
|
+ /* Sort the i32 consts */
|
|
|
+ qsort(i32_consts_old, loader_ctx->i32_const_num, sizeof(int32),
|
|
|
+ cmp_i32_const);
|
|
|
+
|
|
|
+ /* Remove the duplicated i32 consts */
|
|
|
+ uint32 k = 1;
|
|
|
+ for (i = 1; i < loader_ctx->i32_const_num; i++) {
|
|
|
+ if (i32_consts_old[i] != i32_consts_old[i - 1]) {
|
|
|
+ i32_consts_old[k++] = i32_consts_old[i];
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (k < loader_ctx->i32_const_num) {
|
|
|
+ int32 *i32_consts_new;
|
|
|
+ /* Try to reallocate memory with a smaller size */
|
|
|
+ if ((i32_consts_new =
|
|
|
+ wasm_runtime_malloc((uint32)sizeof(int32) * k))) {
|
|
|
+ bh_memcpy_s(i32_consts_new, (uint32)sizeof(int32) * k,
|
|
|
+ i32_consts_old, (uint32)sizeof(int32) * k);
|
|
|
+ /* Free the old memory */
|
|
|
+ wasm_runtime_free(i32_consts_old);
|
|
|
+ loader_ctx->i32_consts = i32_consts_new;
|
|
|
+ loader_ctx->i32_const_max_num = k;
|
|
|
+ }
|
|
|
+ loader_ctx->i32_const_num = k;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
@@ -6180,7 +6360,7 @@ re_scan:
|
|
|
int32 type_index;
|
|
|
/* Resolve the leb128 encoded type index as block type */
|
|
|
p--;
|
|
|
- read_leb_int32(p, p_end, type_index);
|
|
|
+ pb_read_leb_int32(p, p_end, type_index);
|
|
|
bh_assert((uint32)type_index < module->type_count);
|
|
|
block_type.is_value_type = false;
|
|
|
block_type.u.type = module->types[type_index];
|
|
|
@@ -6364,7 +6544,10 @@ re_scan:
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
/* if the result of if branch is in local or const area, add a
|
|
|
* copy op */
|
|
|
- RESERVE_BLOCK_RET();
|
|
|
+ if (!reserve_block_ret(loader_ctx, opcode, disable_emit,
|
|
|
+ error_buf, error_buf_size)) {
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
|
|
|
emit_empty_label_addr_and_frame_ip(PATCH_END);
|
|
|
apply_label_patch(loader_ctx, 1, PATCH_ELSE);
|
|
|
@@ -6424,7 +6607,11 @@ re_scan:
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
skip_label();
|
|
|
/* copy the result to the block return address */
|
|
|
- RESERVE_BLOCK_RET();
|
|
|
+ if (!reserve_block_ret(loader_ctx, opcode, disable_emit,
|
|
|
+ error_buf, error_buf_size)) {
|
|
|
+ free_label_patch_list(loader_ctx->frame_csp);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
|
|
|
apply_label_patch(loader_ctx, 0, PATCH_END);
|
|
|
free_label_patch_list(loader_ctx->frame_csp);
|
|
|
@@ -6486,7 +6673,7 @@ re_scan:
|
|
|
uint32 j;
|
|
|
#endif
|
|
|
|
|
|
- read_leb_uint32(p, p_end, count);
|
|
|
+ pb_read_leb_uint32(p, p_end, count);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_uint32(loader_ctx, count);
|
|
|
#endif
|
|
|
@@ -6495,7 +6682,7 @@ re_scan:
|
|
|
/* Get each depth and check it */
|
|
|
p_org = p;
|
|
|
for (i = 0; i <= count; i++) {
|
|
|
- read_leb_uint32(p, p_end, depth);
|
|
|
+ pb_read_leb_uint32(p, p_end, depth);
|
|
|
bh_assert(loader_ctx->csp_num > 0);
|
|
|
bh_assert(loader_ctx->csp_num - 1 >= depth);
|
|
|
(void)depth;
|
|
|
@@ -6593,7 +6780,7 @@ re_scan:
|
|
|
uint32 func_idx;
|
|
|
int32 idx;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, func_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, func_idx);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
/* we need to emit func_idx before arguments */
|
|
|
emit_uint32(loader_ctx, func_idx);
|
|
|
@@ -6666,10 +6853,10 @@ re_scan:
|
|
|
|
|
|
bh_assert(module->import_table_count + module->table_count > 0);
|
|
|
|
|
|
- read_leb_uint32(p, p_end, type_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, type_idx);
|
|
|
|
|
|
#if WASM_ENABLE_REF_TYPES != 0
|
|
|
- read_leb_uint32(p, p_end, table_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_idx);
|
|
|
#else
|
|
|
CHECK_BUF(p, p_end, 1);
|
|
|
table_idx = read_uint8(p);
|
|
|
@@ -6679,6 +6866,15 @@ re_scan:
|
|
|
goto fail;
|
|
|
}
|
|
|
|
|
|
+ bh_assert(
|
|
|
+ (table_idx < module->import_table_count
|
|
|
+ ? module->import_tables[table_idx]
|
|
|
+ .u.table.table_type.elem_type
|
|
|
+ : module
|
|
|
+ ->tables[table_idx - module->import_table_count]
|
|
|
+ .table_type.elem_type)
|
|
|
+ == VALUE_TYPE_FUNCREF);
|
|
|
+
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
/* we need to emit before arguments */
|
|
|
emit_uint32(loader_ctx, type_idx);
|
|
|
@@ -6900,7 +7096,7 @@ re_scan:
|
|
|
uint8 *p_code_compiled_tmp = loader_ctx->p_code_compiled;
|
|
|
#endif
|
|
|
|
|
|
- read_leb_uint32(p, p_end, vec_len);
|
|
|
+ pb_read_leb_uint32(p, p_end, vec_len);
|
|
|
if (vec_len != 1) {
|
|
|
/* typed select must have exactly one result */
|
|
|
set_error_buf(error_buf, error_buf_size,
|
|
|
@@ -6975,7 +7171,7 @@ re_scan:
|
|
|
uint8 decl_ref_type;
|
|
|
uint32 table_idx;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, table_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_idx);
|
|
|
if (!get_table_elem_type(module, table_idx, &decl_ref_type,
|
|
|
error_buf, error_buf_size))
|
|
|
goto fail;
|
|
|
@@ -7069,7 +7265,7 @@ re_scan:
|
|
|
case WASM_OP_REF_FUNC:
|
|
|
{
|
|
|
uint32 func_idx = 0;
|
|
|
- read_leb_uint32(p, p_end, func_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, func_idx);
|
|
|
|
|
|
if (!check_function_index(module, func_idx, error_buf,
|
|
|
error_buf_size)) {
|
|
|
@@ -7286,7 +7482,7 @@ re_scan:
|
|
|
case WASM_OP_GET_GLOBAL:
|
|
|
{
|
|
|
p_org = p - 1;
|
|
|
- read_leb_uint32(p, p_end, global_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, global_idx);
|
|
|
bh_assert(global_idx < global_count);
|
|
|
|
|
|
global_type = global_idx < module->import_global_count
|
|
|
@@ -7320,7 +7516,7 @@ re_scan:
|
|
|
bool is_mutable = false;
|
|
|
|
|
|
p_org = p - 1;
|
|
|
- read_leb_uint32(p, p_end, global_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, global_idx);
|
|
|
bh_assert(global_idx < global_count);
|
|
|
|
|
|
is_mutable = global_idx < module->import_global_count
|
|
|
@@ -7417,8 +7613,8 @@ re_scan:
|
|
|
}
|
|
|
#endif
|
|
|
CHECK_MEMORY();
|
|
|
- read_leb_memarg(p, p_end, align); /* align */
|
|
|
- read_leb_mem_offset(p, p_end, mem_offset); /* offset */
|
|
|
+ pb_read_leb_memarg(p, p_end, align); /* align */
|
|
|
+ pb_read_leb_mem_offset(p, p_end, mem_offset); /* offset */
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_uint32(loader_ctx, mem_offset);
|
|
|
#endif
|
|
|
@@ -7479,7 +7675,7 @@ re_scan:
|
|
|
|
|
|
case WASM_OP_MEMORY_SIZE:
|
|
|
CHECK_MEMORY();
|
|
|
- read_leb_memidx(p, p_end, memidx);
|
|
|
+ pb_read_leb_memidx(p, p_end, memidx);
|
|
|
check_memidx(module, memidx);
|
|
|
PUSH_PAGE_COUNT();
|
|
|
|
|
|
@@ -7491,7 +7687,7 @@ re_scan:
|
|
|
|
|
|
case WASM_OP_MEMORY_GROW:
|
|
|
CHECK_MEMORY();
|
|
|
- read_leb_memidx(p, p_end, memidx);
|
|
|
+ pb_read_leb_memidx(p, p_end, memidx);
|
|
|
check_memidx(module, memidx);
|
|
|
POP_AND_PUSH(mem_offset_type, mem_offset_type);
|
|
|
|
|
|
@@ -7506,7 +7702,7 @@ re_scan:
|
|
|
break;
|
|
|
|
|
|
case WASM_OP_I32_CONST:
|
|
|
- read_leb_int32(p, p_end, i32_const);
|
|
|
+ pb_read_leb_int32(p, p_end, i32_const);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
skip_label();
|
|
|
disable_emit = true;
|
|
|
@@ -7524,7 +7720,7 @@ re_scan:
|
|
|
break;
|
|
|
|
|
|
case WASM_OP_I64_CONST:
|
|
|
- read_leb_int64(p, p_end, i64_const);
|
|
|
+ pb_read_leb_int64(p, p_end, i64_const);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
skip_label();
|
|
|
disable_emit = true;
|
|
|
@@ -7806,7 +8002,7 @@ re_scan:
|
|
|
{
|
|
|
uint32 opcode1;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, opcode1);
|
|
|
+ pb_read_leb_uint32(p, p_end, opcode1);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_byte(loader_ctx, ((uint8)opcode1));
|
|
|
#endif
|
|
|
@@ -7831,11 +8027,11 @@ re_scan:
|
|
|
case WASM_OP_MEMORY_INIT:
|
|
|
{
|
|
|
CHECK_MEMORY();
|
|
|
- read_leb_uint32(p, p_end, segment_index);
|
|
|
+ pb_read_leb_uint32(p, p_end, segment_index);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_uint32(loader_ctx, segment_index);
|
|
|
#endif
|
|
|
- read_leb_memidx(p, p_end, memidx);
|
|
|
+ pb_read_leb_memidx(p, p_end, memidx);
|
|
|
check_memidx(module, memidx);
|
|
|
|
|
|
bh_assert(segment_index < module->data_seg_count);
|
|
|
@@ -7851,7 +8047,7 @@ re_scan:
|
|
|
}
|
|
|
case WASM_OP_DATA_DROP:
|
|
|
{
|
|
|
- read_leb_uint32(p, p_end, segment_index);
|
|
|
+ pb_read_leb_uint32(p, p_end, segment_index);
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_uint32(loader_ctx, segment_index);
|
|
|
#endif
|
|
|
@@ -7867,9 +8063,9 @@ re_scan:
|
|
|
CHECK_MEMORY();
|
|
|
CHECK_BUF(p, p_end, sizeof(int16));
|
|
|
/* check both src and dst memory index */
|
|
|
- read_leb_memidx(p, p_end, memidx);
|
|
|
+ pb_read_leb_memidx(p, p_end, memidx);
|
|
|
check_memidx(module, memidx);
|
|
|
- read_leb_memidx(p, p_end, memidx);
|
|
|
+ pb_read_leb_memidx(p, p_end, memidx);
|
|
|
check_memidx(module, memidx);
|
|
|
|
|
|
POP_MEM_OFFSET();
|
|
|
@@ -7883,7 +8079,7 @@ re_scan:
|
|
|
case WASM_OP_MEMORY_FILL:
|
|
|
{
|
|
|
CHECK_MEMORY();
|
|
|
- read_leb_memidx(p, p_end, memidx);
|
|
|
+ pb_read_leb_memidx(p, p_end, memidx);
|
|
|
check_memidx(module, memidx);
|
|
|
|
|
|
POP_MEM_OFFSET();
|
|
|
@@ -7901,8 +8097,8 @@ re_scan:
|
|
|
uint8 seg_ref_type, tbl_ref_type;
|
|
|
uint32 table_seg_idx, table_idx;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, table_seg_idx);
|
|
|
- read_leb_uint32(p, p_end, table_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_seg_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_idx);
|
|
|
|
|
|
if (!get_table_elem_type(module, table_idx,
|
|
|
&tbl_ref_type, error_buf,
|
|
|
@@ -7937,7 +8133,7 @@ re_scan:
|
|
|
case WASM_OP_ELEM_DROP:
|
|
|
{
|
|
|
uint32 table_seg_idx;
|
|
|
- read_leb_uint32(p, p_end, table_seg_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_seg_idx);
|
|
|
if (!get_table_seg_elem_type(module, table_seg_idx,
|
|
|
NULL, error_buf,
|
|
|
error_buf_size))
|
|
|
@@ -7953,13 +8149,13 @@ re_scan:
|
|
|
uint32 src_tbl_idx, dst_tbl_idx, src_tbl_idx_type,
|
|
|
dst_tbl_idx_type, min_tbl_idx_type;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, src_tbl_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, src_tbl_idx);
|
|
|
if (!get_table_elem_type(module, src_tbl_idx,
|
|
|
&src_ref_type, error_buf,
|
|
|
error_buf_size))
|
|
|
goto fail;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, dst_tbl_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, dst_tbl_idx);
|
|
|
if (!get_table_elem_type(module, dst_tbl_idx,
|
|
|
&dst_ref_type, error_buf,
|
|
|
error_buf_size))
|
|
|
@@ -8006,7 +8202,7 @@ re_scan:
|
|
|
{
|
|
|
uint32 table_idx;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, table_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_idx);
|
|
|
/* TODO: shall we create a new function to check
|
|
|
table idx instead of using below function? */
|
|
|
if (!get_table_elem_type(module, table_idx, NULL,
|
|
|
@@ -8031,7 +8227,7 @@ re_scan:
|
|
|
uint8 decl_ref_type;
|
|
|
uint32 table_idx;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, table_idx);
|
|
|
+ pb_read_leb_uint32(p, p_end, table_idx);
|
|
|
if (!get_table_elem_type(module, table_idx,
|
|
|
&decl_ref_type, error_buf,
|
|
|
error_buf_size))
|
|
|
@@ -8083,15 +8279,15 @@ re_scan:
|
|
|
{
|
|
|
uint32 opcode1;
|
|
|
|
|
|
- read_leb_uint32(p, p_end, opcode1);
|
|
|
+ pb_read_leb_uint32(p, p_end, opcode1);
|
|
|
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_byte(loader_ctx, opcode1);
|
|
|
#endif
|
|
|
if (opcode1 != WASM_OP_ATOMIC_FENCE) {
|
|
|
CHECK_MEMORY();
|
|
|
- read_leb_uint32(p, p_end, align); /* align */
|
|
|
- read_leb_mem_offset(p, p_end, mem_offset); /* offset */
|
|
|
+ pb_read_leb_uint32(p, p_end, align); /* align */
|
|
|
+ pb_read_leb_mem_offset(p, p_end, mem_offset); /* offset */
|
|
|
#if WASM_ENABLE_FAST_INTERP != 0
|
|
|
emit_uint32(loader_ctx, mem_offset);
|
|
|
#endif
|
|
|
@@ -8241,29 +8437,25 @@ re_scan:
|
|
|
if (loader_ctx->p_code_compiled == NULL)
|
|
|
goto re_scan;
|
|
|
|
|
|
- func->const_cell_num = loader_ctx->const_cell_num;
|
|
|
+ func->const_cell_num =
|
|
|
+ loader_ctx->i64_const_num * 2 + loader_ctx->i32_const_num;
|
|
|
if (func->const_cell_num > 0) {
|
|
|
- int32 j;
|
|
|
-
|
|
|
- if (!(func->consts = func_const = loader_malloc(
|
|
|
- func->const_cell_num * 4, error_buf, error_buf_size)))
|
|
|
+ if (!(func->consts =
|
|
|
+ loader_malloc((uint64)sizeof(uint32) * func->const_cell_num,
|
|
|
+ error_buf, error_buf_size)))
|
|
|
goto fail;
|
|
|
-
|
|
|
- func_const_end = func->consts + func->const_cell_num * 4;
|
|
|
- /* reverse the const buf */
|
|
|
- for (j = loader_ctx->num_const - 1; j >= 0; j--) {
|
|
|
- Const *c = (Const *)(loader_ctx->const_buf + j * sizeof(Const));
|
|
|
- if (c->value_type == VALUE_TYPE_F64
|
|
|
- || c->value_type == VALUE_TYPE_I64) {
|
|
|
- bh_memcpy_s(func_const, (uint32)(func_const_end - func_const),
|
|
|
- &(c->value.f64), (uint32)sizeof(int64));
|
|
|
- func_const += sizeof(int64);
|
|
|
- }
|
|
|
- else {
|
|
|
- bh_memcpy_s(func_const, (uint32)(func_const_end - func_const),
|
|
|
- &(c->value.f32), (uint32)sizeof(int32));
|
|
|
- func_const += sizeof(int32);
|
|
|
- }
|
|
|
+ if (loader_ctx->i64_const_num > 0) {
|
|
|
+ bh_memcpy_s(func->consts,
|
|
|
+ (uint32)sizeof(int64) * loader_ctx->i64_const_num,
|
|
|
+ loader_ctx->i64_consts,
|
|
|
+ (uint32)sizeof(int64) * loader_ctx->i64_const_num);
|
|
|
+ }
|
|
|
+ if (loader_ctx->i32_const_num > 0) {
|
|
|
+ bh_memcpy_s(func->consts
|
|
|
+ + sizeof(int64) * loader_ctx->i64_const_num,
|
|
|
+ (uint32)sizeof(int32) * loader_ctx->i32_const_num,
|
|
|
+ loader_ctx->i32_consts,
|
|
|
+ (uint32)sizeof(int32) * loader_ctx->i32_const_num);
|
|
|
}
|
|
|
}
|
|
|
|