|
|
@@ -9,6 +9,7 @@
|
|
|
#include "../jit_frontend.h"
|
|
|
#include "../jit_codegen.h"
|
|
|
#include "../../interpreter/wasm_runtime.h"
|
|
|
+#include "jit_emit_control.h"
|
|
|
|
|
|
#ifndef OS_ENABLE_HW_BOUND_CHECK
|
|
|
static JitReg
|
|
|
@@ -60,6 +61,14 @@ fail:
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+static void
|
|
|
+set_load_or_store_atomic(JitInsn *load_or_store_inst)
|
|
|
+{
|
|
|
+ load_or_store_inst->flags_u8 |= 0x1;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
#if UINTPTR_MAX == UINT64_MAX
|
|
|
static JitReg
|
|
|
check_and_seek_on_64bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
|
|
|
@@ -177,23 +186,36 @@ fail:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#define CHECK_ALIGNMENT(maddr, memory_data, offset1) \
|
|
|
+#if UINTPTR_MAX == UINT64_MAX
|
|
|
+#define CHECK_ALIGNMENT(offset1) \
|
|
|
do { \
|
|
|
- GEN_INSN(ADD, maddr, memory_data, offset1); \
|
|
|
JitReg align_mask = NEW_CONST(I64, ((uint64)1 << align) - 1); \
|
|
|
JitReg AND_res = jit_cc_new_reg_I64(cc); \
|
|
|
- GEN_INSN(AND, AND_res, maddr, align_mask); \
|
|
|
+ GEN_INSN(AND, AND_res, offset1, align_mask); \
|
|
|
GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I64, 0)); \
|
|
|
if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
|
|
|
cc->cmp_reg, NULL)) \
|
|
|
goto fail; \
|
|
|
} while (0)
|
|
|
+#else
|
|
|
+#define CHECK_ALIGNMENT(offset1) \
|
|
|
+ do { \
|
|
|
+ JitReg align_mask = NEW_CONST(I32, (1 << align) - 1); \
|
|
|
+ JitReg AND_res = jit_cc_new_reg_I32(cc); \
|
|
|
+ GEN_INSN(AND, AND_res, offset1, align_mask); \
|
|
|
+ GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I32, 0)); \
|
|
|
+ if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
|
|
|
+ cc->cmp_reg, NULL)) \
|
|
|
+ goto fail; \
|
|
|
+ } while (0)
|
|
|
+#endif
|
|
|
|
|
|
bool
|
|
|
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
uint32 bytes, bool sign, bool atomic)
|
|
|
{
|
|
|
JitReg addr, offset1, value, memory_data;
|
|
|
+ JitInsn *load_insn = NULL;
|
|
|
|
|
|
POP_I32(addr);
|
|
|
|
|
|
@@ -201,6 +223,11 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
if (!offset1) {
|
|
|
goto fail;
|
|
|
}
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic) {
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
memory_data = get_memory_data_reg(cc->jit_frame, 0);
|
|
|
|
|
|
@@ -209,30 +236,30 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
case 1:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI8, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU8, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case 2:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI16, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU16, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case 4:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI32, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU32, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
@@ -243,6 +270,13 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic && load_insn)
|
|
|
+ set_load_or_store_atomic(load_insn);
|
|
|
+#else
|
|
|
+ (void)load_insn;
|
|
|
+#endif
|
|
|
+
|
|
|
PUSH_I32(value);
|
|
|
return true;
|
|
|
fail:
|
|
|
@@ -254,6 +288,7 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
uint32 bytes, bool sign, bool atomic)
|
|
|
{
|
|
|
JitReg addr, offset1, value, memory_data;
|
|
|
+ JitInsn *load_insn = NULL;
|
|
|
|
|
|
POP_I32(addr);
|
|
|
|
|
|
@@ -261,6 +296,11 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
if (!offset1) {
|
|
|
goto fail;
|
|
|
}
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic) {
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
memory_data = get_memory_data_reg(cc->jit_frame, 0);
|
|
|
|
|
|
@@ -269,40 +309,40 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
case 1:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI8, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU8, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case 2:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI16, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU16, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case 4:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI32, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU32, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case 8:
|
|
|
{
|
|
|
if (sign) {
|
|
|
- GEN_INSN(LDI64, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDI64, value, memory_data, offset1);
|
|
|
}
|
|
|
else {
|
|
|
- GEN_INSN(LDU64, value, memory_data, offset1);
|
|
|
+ load_insn = GEN_INSN(LDU64, value, memory_data, offset1);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
@@ -313,6 +353,13 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic && load_insn)
|
|
|
+ set_load_or_store_atomic(load_insn);
|
|
|
+#else
|
|
|
+ (void)load_insn;
|
|
|
+#endif
|
|
|
+
|
|
|
PUSH_I64(value);
|
|
|
return true;
|
|
|
fail:
|
|
|
@@ -370,6 +417,7 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
uint32 bytes, bool atomic)
|
|
|
{
|
|
|
JitReg value, addr, offset1, memory_data;
|
|
|
+ JitInsn *store_insn = NULL;
|
|
|
|
|
|
POP_I32(value);
|
|
|
POP_I32(addr);
|
|
|
@@ -378,23 +426,28 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
if (!offset1) {
|
|
|
goto fail;
|
|
|
}
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic) {
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
memory_data = get_memory_data_reg(cc->jit_frame, 0);
|
|
|
|
|
|
switch (bytes) {
|
|
|
case 1:
|
|
|
{
|
|
|
- GEN_INSN(STI8, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI8, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
case 2:
|
|
|
{
|
|
|
- GEN_INSN(STI16, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI16, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
case 4:
|
|
|
{
|
|
|
- GEN_INSN(STI32, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI32, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
default:
|
|
|
@@ -403,6 +456,12 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
goto fail;
|
|
|
}
|
|
|
}
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic && store_insn)
|
|
|
+ set_load_or_store_atomic(store_insn);
|
|
|
+#else
|
|
|
+ (void)store_insn;
|
|
|
+#endif
|
|
|
|
|
|
return true;
|
|
|
fail:
|
|
|
@@ -414,6 +473,7 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
uint32 bytes, bool atomic)
|
|
|
{
|
|
|
JitReg value, addr, offset1, memory_data;
|
|
|
+ JitInsn *store_insn = NULL;
|
|
|
|
|
|
POP_I64(value);
|
|
|
POP_I32(addr);
|
|
|
@@ -422,6 +482,11 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
if (!offset1) {
|
|
|
goto fail;
|
|
|
}
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic) {
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
if (jit_reg_is_const(value) && bytes < 8) {
|
|
|
value = NEW_CONST(I32, (int32)jit_cc_get_const_I64(cc, value));
|
|
|
@@ -432,22 +497,22 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
switch (bytes) {
|
|
|
case 1:
|
|
|
{
|
|
|
- GEN_INSN(STI8, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI8, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
case 2:
|
|
|
{
|
|
|
- GEN_INSN(STI16, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI16, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
case 4:
|
|
|
{
|
|
|
- GEN_INSN(STI32, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI32, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
case 8:
|
|
|
{
|
|
|
- GEN_INSN(STI64, value, memory_data, offset1);
|
|
|
+ store_insn = GEN_INSN(STI64, value, memory_data, offset1);
|
|
|
break;
|
|
|
}
|
|
|
default:
|
|
|
@@ -456,6 +521,12 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
goto fail;
|
|
|
}
|
|
|
}
|
|
|
+#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+ if (atomic && store_insn)
|
|
|
+ set_load_or_store_atomic(store_insn);
|
|
|
+#else
|
|
|
+ (void)store_insn;
|
|
|
+#endif
|
|
|
|
|
|
return true;
|
|
|
fail:
|
|
|
@@ -774,10 +845,153 @@ fail:
|
|
|
#endif
|
|
|
|
|
|
#if WASM_ENABLE_SHARED_MEMORY != 0
|
|
|
+#define GEN_AT_RMW_INSN(op, op_type, bytes, result, value, memory_data, \
|
|
|
+ offset1) \
|
|
|
+ do { \
|
|
|
+ switch (bytes) { \
|
|
|
+ case 1: \
|
|
|
+ { \
|
|
|
+ insn = GEN_INSN(AT_##op##U8, result, value, memory_data, \
|
|
|
+ offset1); \
|
|
|
+ break; \
|
|
|
+ } \
|
|
|
+ case 2: \
|
|
|
+ { \
|
|
|
+ insn = GEN_INSN(AT_##op##U16, result, value, memory_data, \
|
|
|
+ offset1); \
|
|
|
+ break; \
|
|
|
+ } \
|
|
|
+ case 4: \
|
|
|
+ { \
|
|
|
+ if (op_type == VALUE_TYPE_I32) \
|
|
|
+ insn = GEN_INSN(AT_##op##I32, result, value, memory_data, \
|
|
|
+ offset1); \
|
|
|
+ else \
|
|
|
+ insn = GEN_INSN(AT_##op##U32, result, value, memory_data, \
|
|
|
+ offset1); \
|
|
|
+ break; \
|
|
|
+ } \
|
|
|
+ case 8: \
|
|
|
+ { \
|
|
|
+ insn = GEN_INSN(AT_##op##I64, result, value, memory_data, \
|
|
|
+ offset1); \
|
|
|
+ break; \
|
|
|
+ } \
|
|
|
+ default: \
|
|
|
+ { \
|
|
|
+ bh_assert(0); \
|
|
|
+ goto fail; \
|
|
|
+ } \
|
|
|
+ } \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
bool
|
|
|
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
|
|
|
uint32 align, uint32 offset, uint32 bytes)
|
|
|
{
|
|
|
+ JitReg addr, offset1, memory_data, value, result, eax_hreg, rax_hreg,
|
|
|
+ ebx_hreg, rbx_hreg;
|
|
|
+ JitInsn *insn = NULL;
|
|
|
+ bool is_i32 = op_type == VALUE_TYPE_I32;
|
|
|
+ bool is_logical_op = atomic_op == AtomicRMWBinOpAnd
|
|
|
+ || atomic_op == AtomicRMWBinOpOr
|
|
|
+ || atomic_op == AtomicRMWBinOpXor;
|
|
|
+
|
|
|
+ /* currently we only implement atomic rmw on x86-64 target */
|
|
|
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
|
|
|
+
|
|
|
+ /* For atomic logical binary ops, it implicitly uses rax in cmpxchg
|
|
|
+ * instruction and implicitly uses rbx for storing temp value in the
|
|
|
+ * generated loop */
|
|
|
+ eax_hreg = jit_codegen_get_hreg_by_name("eax");
|
|
|
+ rax_hreg = jit_codegen_get_hreg_by_name("rax");
|
|
|
+ ebx_hreg = jit_codegen_get_hreg_by_name("ebx");
|
|
|
+ rbx_hreg = jit_codegen_get_hreg_by_name("rbx");
|
|
|
+
|
|
|
+ bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
|
|
|
+ if (op_type == VALUE_TYPE_I32) {
|
|
|
+ POP_I32(value);
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ POP_I64(value);
|
|
|
+ }
|
|
|
+ POP_I32(addr);
|
|
|
+
|
|
|
+ offset1 = check_and_seek(cc, addr, offset, bytes);
|
|
|
+ if (!offset1) {
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+
|
|
|
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
|
|
|
+
|
|
|
+ if (op_type == VALUE_TYPE_I32)
|
|
|
+ result = jit_cc_new_reg_I32(cc);
|
|
|
+ else
|
|
|
+ result = jit_cc_new_reg_I64(cc);
|
|
|
+
|
|
|
+ switch (atomic_op) {
|
|
|
+ case AtomicRMWBinOpAdd:
|
|
|
+ {
|
|
|
+ GEN_AT_RMW_INSN(ADD, op_type, bytes, result, value, memory_data,
|
|
|
+ offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case AtomicRMWBinOpSub:
|
|
|
+ {
|
|
|
+ GEN_AT_RMW_INSN(SUB, op_type, bytes, result, value, memory_data,
|
|
|
+ offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case AtomicRMWBinOpAnd:
|
|
|
+ {
|
|
|
+ GEN_AT_RMW_INSN(AND, op_type, bytes, result, value, memory_data,
|
|
|
+ offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case AtomicRMWBinOpOr:
|
|
|
+ {
|
|
|
+ GEN_AT_RMW_INSN(OR, op_type, bytes, result, value, memory_data,
|
|
|
+ offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case AtomicRMWBinOpXor:
|
|
|
+ {
|
|
|
+ GEN_AT_RMW_INSN(XOR, op_type, bytes, result, value, memory_data,
|
|
|
+ offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case AtomicRMWBinOpXchg:
|
|
|
+ {
|
|
|
+ GEN_AT_RMW_INSN(XCHG, op_type, bytes, result, value, memory_data,
|
|
|
+ offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ default:
|
|
|
+ {
|
|
|
+ bh_assert(0);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (is_logical_op
|
|
|
+ && (!insn
|
|
|
+ || !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)
|
|
|
+ || !jit_lock_reg_in_insn(cc, insn, is_i32 ? ebx_hreg : rbx_hreg))) {
|
|
|
+ jit_set_last_error(
|
|
|
+ cc, "generate atomic logical insn or lock ra&rb hreg failed");
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (op_type == VALUE_TYPE_I32)
|
|
|
+ PUSH_I32(result);
|
|
|
+ else
|
|
|
+ PUSH_I64(result);
|
|
|
+
|
|
|
+ return true;
|
|
|
+#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
|
|
|
+
|
|
|
+fail:
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
@@ -785,6 +999,93 @@ bool
|
|
|
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
|
|
|
uint32 offset, uint32 bytes)
|
|
|
{
|
|
|
+ JitReg addr, offset1, memory_data, value, expect, result;
|
|
|
+ bool is_i32 = op_type == VALUE_TYPE_I32;
|
|
|
+ /* currently we only implement atomic cmpxchg on x86-64 target */
|
|
|
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
|
|
|
+ /* cmpxchg will use register al/ax/eax/rax to store parameter expected
|
|
|
+ * value, and the read result will also be stored to al/ax/eax/rax */
|
|
|
+ JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
|
|
|
+ JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
|
|
|
+ JitInsn *insn = NULL;
|
|
|
+
|
|
|
+ bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
|
|
|
+ if (is_i32) {
|
|
|
+ POP_I32(value);
|
|
|
+ POP_I32(expect);
|
|
|
+ result = jit_cc_new_reg_I32(cc);
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ POP_I64(value);
|
|
|
+ POP_I64(expect);
|
|
|
+ result = jit_cc_new_reg_I64(cc);
|
|
|
+ }
|
|
|
+ POP_I32(addr);
|
|
|
+
|
|
|
+ offset1 = check_and_seek(cc, addr, offset, bytes);
|
|
|
+ if (!offset1) {
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+
|
|
|
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
|
|
|
+
|
|
|
+ GEN_INSN(MOV, is_i32 ? eax_hreg : rax_hreg, expect);
|
|
|
+ switch (bytes) {
|
|
|
+ case 1:
|
|
|
+ {
|
|
|
+ insn = GEN_INSN(AT_CMPXCHGU8, value, is_i32 ? eax_hreg : rax_hreg,
|
|
|
+ memory_data, offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 2:
|
|
|
+ {
|
|
|
+ insn = GEN_INSN(AT_CMPXCHGU16, value, is_i32 ? eax_hreg : rax_hreg,
|
|
|
+ memory_data, offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 4:
|
|
|
+ {
|
|
|
+ if (op_type == VALUE_TYPE_I32)
|
|
|
+ insn =
|
|
|
+ GEN_INSN(AT_CMPXCHGI32, value, is_i32 ? eax_hreg : rax_hreg,
|
|
|
+ memory_data, offset1);
|
|
|
+ else
|
|
|
+ insn =
|
|
|
+ GEN_INSN(AT_CMPXCHGU32, value, is_i32 ? eax_hreg : rax_hreg,
|
|
|
+ memory_data, offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8:
|
|
|
+ {
|
|
|
+ insn = GEN_INSN(AT_CMPXCHGI64, value, is_i32 ? eax_hreg : rax_hreg,
|
|
|
+ memory_data, offset1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ default:
|
|
|
+ {
|
|
|
+ bh_assert(0);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!insn
|
|
|
+ || !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)) {
|
|
|
+ jit_set_last_error(cc, "generate cmpxchg insn or lock ra hreg failed");
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+
|
|
|
+ GEN_INSN(MOV, result, is_i32 ? eax_hreg : rax_hreg);
|
|
|
+
|
|
|
+ if (is_i32)
|
|
|
+ PUSH_I32(result);
|
|
|
+ else
|
|
|
+ PUSH_I64(result);
|
|
|
+
|
|
|
+ return true;
|
|
|
+#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
|
|
|
+
|
|
|
+fail:
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
@@ -812,8 +1113,10 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
|
|
|
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
|
|
|
if (!offset1)
|
|
|
goto fail;
|
|
|
- JitReg maddr = jit_cc_new_reg_I64(cc);
|
|
|
- CHECK_ALIGNMENT(maddr, memory_data, offset1);
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+
|
|
|
+ JitReg maddr = jit_cc_new_reg_ptr(cc);
|
|
|
+ GEN_INSN(ADD, maddr, memory_data, offset1);
|
|
|
|
|
|
// Prepare `wasm_runtime_atomic_wait` arguments
|
|
|
JitReg res = jit_cc_new_reg_I32(cc);
|
|
|
@@ -835,6 +1138,12 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
|
|
|
goto fail;
|
|
|
|
|
|
PUSH_I32(res);
|
|
|
+
|
|
|
+#if WASM_ENABLE_THREAD_MGR != 0
|
|
|
+ /* Insert suspend check point */
|
|
|
+ if (!jit_check_suspend_flags(cc))
|
|
|
+ goto fail;
|
|
|
+#endif
|
|
|
return true;
|
|
|
fail:
|
|
|
return false;
|
|
|
@@ -854,8 +1163,10 @@ jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
|
|
|
if (!offset1)
|
|
|
goto fail;
|
|
|
- JitReg maddr = jit_cc_new_reg_I64(cc);
|
|
|
- CHECK_ALIGNMENT(maddr, memory_data, offset1);
|
|
|
+ CHECK_ALIGNMENT(offset1);
|
|
|
+
|
|
|
+ JitReg maddr = jit_cc_new_reg_ptr(cc);
|
|
|
+ GEN_INSN(ADD, maddr, memory_data, offset1);
|
|
|
|
|
|
// Prepare `wasm_runtime_atomic_notify` arguments
|
|
|
JitReg res = jit_cc_new_reg_I32(cc);
|
|
|
@@ -879,4 +1190,11 @@ jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
|
|
|
fail:
|
|
|
return false;
|
|
|
}
|
|
|
+
|
|
|
+bool
|
|
|
+jit_compiler_op_atomic_fence(JitCompContext *cc)
|
|
|
+{
|
|
|
+ GEN_INSN(FENCE);
|
|
|
+ return true;
|
|
|
+}
|
|
|
#endif
|