Reland "[wasm] add 64 bit atomic ops to interpreter"

This is a reland of 5301cdc39f

Original change's description:
> [wasm] add 64 bit atomic ops to interpreter
>
> R=gdeepti@chromium.org
> BUG=v8:6532
>
> Change-Id: I532bf67f1631c692e12f9b054b29601a57b76f05
> Reviewed-on: https://chromium-review.googlesource.com/1130635
> Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
> Commit-Queue: Aseem Garg <aseemgarg@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54365}

Bug: v8:6532
Change-Id: I22af58646b898ee2f54ccb64467d9fb978a645c5
Reviewed-on: https://chromium-review.googlesource.com/1132155
Commit-Queue: Aseem Garg <aseemgarg@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54369}
This commit is contained in:
Aseem Garg 2018-07-10 14:54:03 -07:00 committed by Commit Bot
parent 5d76222686
commit 2114c6eea6
3 changed files with 147 additions and 92 deletions

View File

@ -1454,14 +1454,14 @@ class ThreadImpl {
return true;
}
template <typename type>
template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address& address, pc_t pc, int& len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (val2) *val2 = Pop().to<uint32_t>();
if (val) *val = Pop().to<uint32_t>();
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
address = BoundsCheckMem<type>(imm.offset, index);
if (!address) {
@ -1511,84 +1511,133 @@ class ThreadImpl {
InterpreterCode* code, pc_t pc, int& len) {
WasmValue result;
switch (opcode) {
#define ATOMIC_BINOP_CASE(name, type, operation) \
// Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
// on big endian and 64 bit atomics fail to compile.
#if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
#define ATOMIC_BINOP_CASE(name, type, op_type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
&val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
result = WasmValue( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
result = WasmValue(static_cast<op_type>( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \
Push(result); \
break; \
}
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
atomic_exchange);
ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
atomic_exchange);
ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
atomic_exchange);
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
atomic_exchange);
#undef ATOMIC_BINOP_CASE
#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type) \
case kExpr##name: { \
type val; \
type val2; \
Address addr; \
if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val, \
&val2)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
std::atomic_compare_exchange_strong( \
reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
Push(WasmValue(val)); \
break; \
#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
case kExpr##name: { \
type val; \
type val2; \
Address addr; \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
&val, &val2)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
std::atomic_compare_exchange_strong( \
reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
Push(WasmValue(static_cast<op_type>(val))); \
break; \
}
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
uint32_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
uint32_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
uint32_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
uint64_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
uint64_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
uint64_t);
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
#define ATOMIC_LOAD_CASE(name, type, operation) \
#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
case kExpr##name: { \
Address addr; \
if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len)) { \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
result = \
WasmValue(std::operation(reinterpret_cast<std::atomic<type>*>(addr))); \
result = WasmValue(static_cast<op_type>( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \
Push(result); \
break; \
}
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
#undef ATOMIC_LOAD_CASE
#define ATOMIC_STORE_CASE(name, type, operation) \
#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
&val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
@ -1597,10 +1646,15 @@ class ThreadImpl {
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
break; \
}
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
#endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
default:
UNREACHABLE();
return false;

View File

@ -243,6 +243,7 @@
# platforms, since this would require bit swapping as a part of atomic
# operations.
'test-run-wasm-atomics/*': [SKIP],
'test-run-wasm-atomics64/*': [SKIP],
}], # 'byteorder == big'
##############################################################################

View File

@ -32,22 +32,22 @@ void RunU64BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd) {
WASM_EXEC_TEST(I64AtomicAdd) {
RunU64BinOp(execution_mode, kExprI64AtomicAdd, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub) {
WASM_EXEC_TEST(I64AtomicSub) {
RunU64BinOp(execution_mode, kExprI64AtomicSub, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd) {
WASM_EXEC_TEST(I64AtomicAnd) {
RunU64BinOp(execution_mode, kExprI64AtomicAnd, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr) {
WASM_EXEC_TEST(I64AtomicOr) {
RunU64BinOp(execution_mode, kExprI64AtomicOr, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor) {
WASM_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange) {
WASM_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
}
@ -73,22 +73,22 @@ void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd32U) {
WASM_EXEC_TEST(I64AtomicAdd32U) {
RunU32BinOp(execution_mode, kExprI64AtomicAdd32U, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub32U) {
WASM_EXEC_TEST(I64AtomicSub32U) {
RunU32BinOp(execution_mode, kExprI64AtomicSub32U, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd32U) {
WASM_EXEC_TEST(I64AtomicAnd32U) {
RunU32BinOp(execution_mode, kExprI64AtomicAnd32U, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) {
WASM_EXEC_TEST(I64AtomicOr32U) {
RunU32BinOp(execution_mode, kExprI64AtomicOr32U, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) {
WASM_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) {
WASM_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
}
@ -114,22 +114,22 @@ void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd16U) {
WASM_EXEC_TEST(I64AtomicAdd16U) {
RunU16BinOp(execution_mode, kExprI64AtomicAdd16U, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub16U) {
WASM_EXEC_TEST(I64AtomicSub16U) {
RunU16BinOp(execution_mode, kExprI64AtomicSub16U, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd16U) {
WASM_EXEC_TEST(I64AtomicAnd16U) {
RunU16BinOp(execution_mode, kExprI64AtomicAnd16U, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) {
WASM_EXEC_TEST(I64AtomicOr16U) {
RunU16BinOp(execution_mode, kExprI64AtomicOr16U, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) {
WASM_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) {
WASM_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
}
@ -154,26 +154,26 @@ void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd8U) {
WASM_EXEC_TEST(I64AtomicAdd8U) {
RunU8BinOp(execution_mode, kExprI64AtomicAdd8U, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub8U) {
WASM_EXEC_TEST(I64AtomicSub8U) {
RunU8BinOp(execution_mode, kExprI64AtomicSub8U, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd8U) {
WASM_EXEC_TEST(I64AtomicAnd8U) {
RunU8BinOp(execution_mode, kExprI64AtomicAnd8U, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) {
WASM_EXEC_TEST(I64AtomicOr8U) {
RunU8BinOp(execution_mode, kExprI64AtomicOr8U, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) {
WASM_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) {
WASM_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
}
WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange) {
WASM_EXEC_TEST(I64AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -194,7 +194,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange32U) {
WASM_EXEC_TEST(I64AtomicCompareExchange32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -216,7 +216,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange32U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange16U) {
WASM_EXEC_TEST(I64AtomicCompareExchange16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -238,7 +238,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange16U) {
}
}
WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) {
WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -258,7 +258,7 @@ WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad) {
WASM_EXEC_TEST(I64AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -274,7 +274,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicLoad) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad32U) {
WASM_EXEC_TEST(I64AtomicLoad32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -290,7 +290,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicLoad32U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad16U) {
WASM_EXEC_TEST(I64AtomicLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -306,7 +306,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicLoad16U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad8U) {
WASM_EXEC_TEST(I64AtomicLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -321,7 +321,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicLoad8U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad) {
WASM_EXEC_TEST(I64AtomicStoreLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -341,7 +341,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad32U) {
WASM_EXEC_TEST(I64AtomicStoreLoad32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -362,7 +362,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad32U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad16U) {
WASM_EXEC_TEST(I64AtomicStoreLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
@ -383,7 +383,7 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad16U) {
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) {
WASM_EXEC_TEST(I64AtomicStoreLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();