|
|
@@ -96,7 +96,8 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
|
|
|
|
|
|
LLVMValueRef
|
|
|
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
- mem_offset_t offset, uint32 bytes, bool enable_segue)
|
|
|
+ mem_offset_t offset, uint32 bytes, bool enable_segue,
|
|
|
+ unsigned int *alignp)
|
|
|
{
|
|
|
LLVMValueRef offset_const =
|
|
|
MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
|
|
|
@@ -180,6 +181,26 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
comp_ctx->comp_data->memories[0].init_page_count;
|
|
|
uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
|
|
|
|
|
|
+ if (alignp != NULL) {
|
|
|
+ /*
|
|
|
+ * A note about max_align below:
|
|
|
+ * the assumption here is the base address of a linear memory
|
|
|
+ * has the natural alignment. for platforms using mmap, it can
|
|
|
+ * be even larger. for now, use a conservative value.
|
|
|
+ */
|
|
|
+ const int max_align = 8;
|
|
|
+ int shift = ffs((int)(unsigned int)mem_offset);
|
|
|
+ if (shift == 0) {
|
|
|
+ *alignp = max_align;
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ unsigned int align = 1 << (shift - 1);
|
|
|
+ if (align > max_align) {
|
|
|
+ align = max_align;
|
|
|
+ }
|
|
|
+ *alignp = align;
|
|
|
+ }
|
|
|
+ }
|
|
|
if (mem_offset + bytes <= mem_data_size) {
|
|
|
/* inside memory space */
|
|
|
if (comp_ctx->pointer_size == sizeof(uint64))
|
|
|
@@ -205,6 +226,9 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
return maddr;
|
|
|
}
|
|
|
}
|
|
|
+ else if (alignp != NULL) {
|
|
|
+ *alignp = 1;
|
|
|
+ }
|
|
|
|
|
|
if (is_target_64bit) {
|
|
|
if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
|
|
|
@@ -324,7 +348,7 @@ fail:
|
|
|
aot_set_last_error("llvm build load failed."); \
|
|
|
goto fail; \
|
|
|
} \
|
|
|
- LLVMSetAlignment(value, 1); \
|
|
|
+ LLVMSetAlignment(value, known_align); \
|
|
|
} while (0)
|
|
|
|
|
|
#define BUILD_TRUNC(value, data_type) \
|
|
|
@@ -343,7 +367,7 @@ fail:
|
|
|
aot_set_last_error("llvm build store failed."); \
|
|
|
goto fail; \
|
|
|
} \
|
|
|
- LLVMSetAlignment(res, 1); \
|
|
|
+ LLVMSetAlignment(res, known_align); \
|
|
|
} while (0)
|
|
|
|
|
|
#define BUILD_SIGN_EXT(dst_type) \
|
|
|
@@ -445,8 +469,9 @@ aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
LLVMTypeRef data_type;
|
|
|
bool enable_segue = comp_ctx->enable_segue_i32_load;
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
switch (bytes) {
|
|
|
@@ -515,8 +540,9 @@ aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
LLVMTypeRef data_type;
|
|
|
bool enable_segue = comp_ctx->enable_segue_i64_load;
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
switch (bytes) {
|
|
|
@@ -591,8 +617,9 @@ aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
LLVMValueRef maddr, value;
|
|
|
bool enable_segue = comp_ctx->enable_segue_f32_load;
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
if (!enable_segue)
|
|
|
@@ -614,8 +641,9 @@ aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
LLVMValueRef maddr, value;
|
|
|
bool enable_segue = comp_ctx->enable_segue_f64_load;
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
if (!enable_segue)
|
|
|
@@ -640,8 +668,9 @@ aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
|
|
|
POP_I32(value);
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
switch (bytes) {
|
|
|
@@ -691,8 +720,9 @@ aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
|
|
|
POP_I64(value);
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
switch (bytes) {
|
|
|
@@ -748,8 +778,9 @@ aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
|
|
|
POP_F32(value);
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
if (!enable_segue)
|
|
|
@@ -771,8 +802,9 @@ aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
|
|
|
POP_F64(value);
|
|
|
|
|
|
+ unsigned int known_align;
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, &known_align)))
|
|
|
return false;
|
|
|
|
|
|
if (!enable_segue)
|
|
|
@@ -1302,7 +1334,7 @@ aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
POP_I64(value);
|
|
|
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, NULL)))
|
|
|
return false;
|
|
|
|
|
|
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
|
|
|
@@ -1392,7 +1424,7 @@ aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
|
|
|
}
|
|
|
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- enable_segue)))
|
|
|
+ enable_segue, NULL)))
|
|
|
return false;
|
|
|
|
|
|
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
|
|
|
@@ -1505,7 +1537,7 @@ aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|
|
CHECK_LLVM_CONST(is_wait64);
|
|
|
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- false)))
|
|
|
+ false, NULL)))
|
|
|
return false;
|
|
|
|
|
|
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
|
|
|
@@ -1579,7 +1611,7 @@ aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
|
|
|
POP_I32(count);
|
|
|
|
|
|
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
|
|
|
- false)))
|
|
|
+ false, NULL)))
|
|
|
return false;
|
|
|
|
|
|
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
|