[wasm] Implement "atomic.fence" operator.
This adds decoding and compilation of the "atomic.fence" operator, which is intended to preserve the synchronization guarantees of higher-level languages. Unlike other atomic operators, it does not target a particular linear memory. It may occur in modules which declare no memory, or a non-shared memory, without causing a validation error. See proposal: https://github.com/WebAssembly/threads/pull/141 See discussion: https://github.com/WebAssembly/threads/issues/140 R=clemensh@chromium.org TEST=cctest/test-run-wasm-atomics/RunWasmXXX_AtomicFence BUG=v8:9452 Change-Id: Ibf7e46227f7edfe5c81c097cfc15924c59614067 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1701856 Commit-Queue: Michael Starzinger <mstarzinger@chromium.org> Reviewed-by: Clemens Hammacher <clemensh@chromium.org> Reviewed-by: Deepti Gandluri <gdeepti@chromium.org> Cr-Commit-Position: refs/heads/master@{#62821}
This commit is contained in:
parent
7b303af841
commit
4ca8b4dd67
@ -756,6 +756,13 @@ void Assembler::cmpxchg8b(Operand dst) {
|
||||
emit_operand(ecx, dst);
|
||||
}
|
||||
|
||||
void Assembler::mfence() {
|
||||
EnsureSpace ensure_space(this);
|
||||
EMIT(0x0F);
|
||||
EMIT(0xAE);
|
||||
EMIT(0xF0);
|
||||
}
|
||||
|
||||
void Assembler::lfence() {
|
||||
EnsureSpace ensure_space(this);
|
||||
EMIT(0x0F);
|
||||
|
@ -542,6 +542,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void cmpxchg8b(Operand dst);
|
||||
|
||||
// Memory Fence
|
||||
void mfence();
|
||||
void lfence();
|
||||
|
||||
void pause();
|
||||
|
@ -1258,6 +1258,13 @@ void Assembler::emit_cmpxchg(Operand dst, Register src, int size) {
|
||||
emit_operand(src, dst);
|
||||
}
|
||||
|
||||
void Assembler::mfence() {
|
||||
EnsureSpace ensure_space(this);
|
||||
emit(0x0F);
|
||||
emit(0xAE);
|
||||
emit(0xF0);
|
||||
}
|
||||
|
||||
void Assembler::lfence() {
|
||||
EnsureSpace ensure_space(this);
|
||||
emit(0x0F);
|
||||
|
@ -1746,6 +1746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void rorxl(Register dst, Register src, byte imm8);
|
||||
void rorxl(Register dst, Operand src, byte imm8);
|
||||
|
||||
void mfence();
|
||||
void lfence();
|
||||
void pause();
|
||||
|
||||
|
@ -1752,6 +1752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kArmDmbIsh: {
|
||||
__ dmb(ISH);
|
||||
break;
|
||||
}
|
||||
case kArmDsbIsb: {
|
||||
__ dsb(SY);
|
||||
__ isb(SY);
|
||||
|
@ -126,6 +126,7 @@ namespace compiler {
|
||||
V(ArmPush) \
|
||||
V(ArmPoke) \
|
||||
V(ArmPeek) \
|
||||
V(ArmDmbIsh) \
|
||||
V(ArmDsbIsb) \
|
||||
V(ArmF32x4Splat) \
|
||||
V(ArmF32x4ExtractLane) \
|
||||
|
@ -275,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kArmStr:
|
||||
case kArmPush:
|
||||
case kArmPoke:
|
||||
case kArmDmbIsh:
|
||||
case kArmDsbIsb:
|
||||
case kArmWord32AtomicPairStore:
|
||||
case kArmWord32AtomicPairAdd:
|
||||
|
@ -2020,6 +2020,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
|
||||
g.UseRegister(right));
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Emit(kArmDmbIsh, g.NoOutput());
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||
ArmOperandGenerator g(this);
|
||||
|
@ -1625,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kArm64StrCompressTagged:
|
||||
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
|
||||
break;
|
||||
case kArm64DmbIsh:
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
break;
|
||||
case kArm64DsbIsb:
|
||||
__ Dsb(FullSystem, BarrierAll);
|
||||
__ Isb();
|
||||
|
@ -171,6 +171,7 @@ namespace compiler {
|
||||
V(Arm64CompressSigned) \
|
||||
V(Arm64CompressPointer) \
|
||||
V(Arm64CompressAny) \
|
||||
V(Arm64DmbIsh) \
|
||||
V(Arm64DsbIsb) \
|
||||
V(Arm64F32x4Splat) \
|
||||
V(Arm64F32x4ExtractLane) \
|
||||
|
@ -319,6 +319,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kArm64StrW:
|
||||
case kArm64Str:
|
||||
case kArm64StrCompressTagged:
|
||||
case kArm64DmbIsh:
|
||||
case kArm64DsbIsb:
|
||||
return kHasSideEffect;
|
||||
|
||||
|
@ -2801,6 +2801,11 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
|
||||
return VisitRRR(this, kArm64Float64Mul, node);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Emit(kArm64DmbIsh, g.NoOutput());
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
|
@ -1217,7 +1217,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kArchWordPoisonOnSpeculation:
|
||||
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
|
||||
UNREACHABLE();
|
||||
case kLFence:
|
||||
case kIA32MFence:
|
||||
__ mfence();
|
||||
break;
|
||||
case kIA32LFence:
|
||||
__ lfence();
|
||||
break;
|
||||
case kSSEFloat32Cmp:
|
||||
|
@ -44,7 +44,8 @@ namespace compiler {
|
||||
V(IA32Tzcnt) \
|
||||
V(IA32Popcnt) \
|
||||
V(IA32Bswap) \
|
||||
V(LFence) \
|
||||
V(IA32MFence) \
|
||||
V(IA32LFence) \
|
||||
V(SSEFloat32Cmp) \
|
||||
V(SSEFloat32Add) \
|
||||
V(SSEFloat32Sub) \
|
||||
|
@ -365,7 +365,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kIA32PushFloat64:
|
||||
case kIA32PushSimd128:
|
||||
case kIA32Poke:
|
||||
case kLFence:
|
||||
case kIA32MFence:
|
||||
case kIA32LFence:
|
||||
return kHasSideEffect;
|
||||
|
||||
case kIA32Word32AtomicPairLoad:
|
||||
|
@ -1593,6 +1593,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
Emit(kIA32MFence, g.NoOutput());
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
|
||||
|
@ -1080,7 +1080,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
|
||||
node->opcode() == IrOpcode::kCall ||
|
||||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
|
||||
node->opcode() == IrOpcode::kProtectedLoad ||
|
||||
node->opcode() == IrOpcode::kProtectedStore) {
|
||||
node->opcode() == IrOpcode::kProtectedStore ||
|
||||
node->opcode() == IrOpcode::kMemoryBarrier) {
|
||||
++effect_level;
|
||||
}
|
||||
}
|
||||
@ -1740,6 +1741,8 @@ void InstructionSelector::VisitNode(Node* node) {
|
||||
MarkAsWord32(node);
|
||||
MarkPairProjectionsAsWord32(node);
|
||||
return VisitWord32PairSar(node);
|
||||
case IrOpcode::kMemoryBarrier:
|
||||
return VisitMemoryBarrier(node);
|
||||
case IrOpcode::kWord32AtomicLoad: {
|
||||
LoadRepresentation type = LoadRepresentationOf(node->op());
|
||||
MarkAsRepresentation(type.representation(), node);
|
||||
|
@ -1051,7 +1051,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
|
||||
__ andq(i.InputRegister(0), kSpeculationPoisonRegister);
|
||||
break;
|
||||
case kLFence:
|
||||
case kX64MFence:
|
||||
__ mfence();
|
||||
break;
|
||||
case kX64LFence:
|
||||
__ lfence();
|
||||
break;
|
||||
case kArchStackSlot: {
|
||||
|
@ -58,7 +58,8 @@ namespace compiler {
|
||||
V(X64Popcnt32) \
|
||||
V(X64Bswap) \
|
||||
V(X64Bswap32) \
|
||||
V(LFence) \
|
||||
V(X64MFence) \
|
||||
V(X64LFence) \
|
||||
V(SSEFloat32Cmp) \
|
||||
V(SSEFloat32Add) \
|
||||
V(SSEFloat32Sub) \
|
||||
|
@ -354,7 +354,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kX64Poke:
|
||||
return kHasSideEffect;
|
||||
|
||||
case kLFence:
|
||||
case kX64MFence:
|
||||
case kX64LFence:
|
||||
return kHasSideEffect;
|
||||
|
||||
case kX64Word64AtomicLoadUint8:
|
||||
|
@ -2371,6 +2371,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64MFence, g.NoOutput());
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
|
||||
|
@ -778,6 +778,14 @@ struct MachineOperatorGlobalCache {
|
||||
};
|
||||
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
|
||||
|
||||
struct MemoryBarrierOperator : public Operator {
|
||||
MemoryBarrierOperator()
|
||||
: Operator(IrOpcode::kMemoryBarrier,
|
||||
Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
|
||||
1, 1, 0, 1, 0) {}
|
||||
};
|
||||
MemoryBarrierOperator kMemoryBarrier;
|
||||
|
||||
// The {BitcastWordToTagged} operator must not be marked as pure (especially
|
||||
// not idempotent), because otherwise the splitting logic in the Scheduler
|
||||
// might decide to split these operators, thus potentially creating live
|
||||
@ -1041,6 +1049,10 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
|
||||
return new (zone_) CommentOperator(msg);
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::MemBarrier() {
|
||||
return &cache_.kMemoryBarrier;
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
|
||||
LoadRepresentation rep) {
|
||||
#define LOAD(Type) \
|
||||
|
@ -655,6 +655,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
const Operator* LoadFramePointer();
|
||||
const Operator* LoadParentFramePointer();
|
||||
|
||||
// Memory barrier.
|
||||
const Operator* MemBarrier();
|
||||
|
||||
// atomic-load [base + index]
|
||||
const Operator* Word32AtomicLoad(LoadRepresentation rep);
|
||||
// atomic-load [base + index]
|
||||
|
@ -714,6 +714,7 @@
|
||||
V(Word32PairSar) \
|
||||
V(ProtectedLoad) \
|
||||
V(ProtectedStore) \
|
||||
V(MemoryBarrier) \
|
||||
V(Word32AtomicLoad) \
|
||||
V(Word32AtomicStore) \
|
||||
V(Word32AtomicExchange) \
|
||||
|
@ -1842,6 +1842,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
|
||||
case IrOpcode::kLoadParentFramePointer:
|
||||
case IrOpcode::kUnalignedLoad:
|
||||
case IrOpcode::kUnalignedStore:
|
||||
case IrOpcode::kMemoryBarrier:
|
||||
case IrOpcode::kWord32AtomicLoad:
|
||||
case IrOpcode::kWord32AtomicStore:
|
||||
case IrOpcode::kWord32AtomicExchange:
|
||||
|
@ -4695,6 +4695,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
return SetEffect(node);
|
||||
}
|
||||
|
||||
Node* WasmGraphBuilder::AtomicFence() {
|
||||
return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(),
|
||||
Effect(), Control()));
|
||||
}
|
||||
|
||||
#undef ATOMIC_BINOP_LIST
|
||||
#undef ATOMIC_CMP_EXCHG_LIST
|
||||
#undef ATOMIC_LOAD_LIST
|
||||
|
@ -387,6 +387,7 @@ class WasmGraphBuilder {
|
||||
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
uint32_t alignment, uint32_t offset,
|
||||
wasm::WasmCodePosition position);
|
||||
Node* AtomicFence();
|
||||
|
||||
// Returns a pointer to the dropped_data_segments array. Traps if the data
|
||||
// segment is active or has been dropped.
|
||||
|
@ -1870,6 +1870,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
int mod, regop, rm;
|
||||
get_modrm(*data, &mod, ®op, &rm);
|
||||
data += PrintRightOperand(data);
|
||||
} else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xF0) {
|
||||
AppendToBuffer("mfence");
|
||||
data += 3;
|
||||
} else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
|
||||
AppendToBuffer("lfence");
|
||||
data += 3;
|
||||
|
@ -2290,7 +2290,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
|
||||
byte_size_operand_ = true;
|
||||
}
|
||||
current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
|
||||
} else if (opcode == 0xAE && (*(data + 2) & 0xF8) == 0xE8) {
|
||||
} else if (opcode == 0xAE && (data[2] & 0xF8) == 0xF0) {
|
||||
AppendToBuffer("mfence");
|
||||
current = data + 3;
|
||||
} else if (opcode == 0xAE && (data[2] & 0xF8) == 0xE8) {
|
||||
AppendToBuffer("lfence");
|
||||
current = data + 3;
|
||||
} else {
|
||||
|
@ -2007,6 +2007,9 @@ class LiftoffCompiler {
|
||||
const MemoryAccessImmediate<validate>& imm, Value* result) {
|
||||
unsupported(decoder, kAtomics, "atomicop");
|
||||
}
|
||||
void AtomicFence(FullDecoder* decoder) {
|
||||
unsupported(decoder, kAtomics, "atomic.fence");
|
||||
}
|
||||
void MemoryInit(FullDecoder* decoder,
|
||||
const MemoryInitImmediate<validate>& imm, const Value& dst,
|
||||
const Value& src, const Value& size) {
|
||||
|
@ -760,6 +760,7 @@ struct ControlBase {
|
||||
Vector<Value> values) \
|
||||
F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
|
||||
const MemoryAccessImmediate<validate>& imm, Value* result) \
|
||||
F(AtomicFence) \
|
||||
F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
|
||||
const Value& src, const Value& size) \
|
||||
F(DataDrop, const DataDropImmediate<validate>& imm) \
|
||||
@ -1417,6 +1418,12 @@ class WasmDecoder : public Decoder {
|
||||
MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX);
|
||||
return 2 + imm.length;
|
||||
}
|
||||
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
|
||||
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
|
||||
#undef DECLARE_OPCODE_CASE
|
||||
{
|
||||
return 2 + 1;
|
||||
}
|
||||
default:
|
||||
decoder->error(pc, "invalid Atomics opcode");
|
||||
return 2;
|
||||
@ -2360,7 +2367,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
|
||||
}
|
||||
case kAtomicPrefix: {
|
||||
CHECK_PROTOTYPE_OPCODE(threads);
|
||||
if (!CheckHasSharedMemory()) break;
|
||||
len++;
|
||||
byte atomic_index =
|
||||
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
|
||||
@ -2766,16 +2772,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
|
||||
uint32_t len = 0;
|
||||
ValueType ret_type;
|
||||
FunctionSig* sig = WasmOpcodes::Signature(opcode);
|
||||
if (sig != nullptr) {
|
||||
MachineType memtype;
|
||||
switch (opcode) {
|
||||
if (!VALIDATE(sig != nullptr)) {
|
||||
this->error("invalid atomic opcode");
|
||||
return 0;
|
||||
}
|
||||
MachineType memtype;
|
||||
switch (opcode) {
|
||||
#define CASE_ATOMIC_STORE_OP(Name, Type) \
|
||||
case kExpr##Name: { \
|
||||
memtype = MachineType::Type(); \
|
||||
ret_type = kWasmStmt; \
|
||||
break; \
|
||||
}
|
||||
ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
|
||||
ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
|
||||
#undef CASE_ATOMIC_OP
|
||||
#define CASE_ATOMIC_OP(Name, Type) \
|
||||
case kExpr##Name: { \
|
||||
@ -2783,22 +2792,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
|
||||
ret_type = GetReturnType(sig); \
|
||||
break; \
|
||||
}
|
||||
ATOMIC_OP_LIST(CASE_ATOMIC_OP)
|
||||
ATOMIC_OP_LIST(CASE_ATOMIC_OP)
|
||||
#undef CASE_ATOMIC_OP
|
||||
default:
|
||||
this->error("invalid atomic opcode");
|
||||
case kExprAtomicFence: {
|
||||
byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
|
||||
if (!VALIDATE(zero == 0)) {
|
||||
this->error(this->pc_ + 2, "invalid atomic operand");
|
||||
return 0;
|
||||
}
|
||||
CALL_INTERFACE_IF_REACHABLE(AtomicFence);
|
||||
return 1;
|
||||
}
|
||||
MemoryAccessImmediate<validate> imm(
|
||||
this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
|
||||
len += imm.length;
|
||||
auto args = PopArgs(sig);
|
||||
auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
|
||||
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
|
||||
result);
|
||||
} else {
|
||||
this->error("invalid atomic opcode");
|
||||
default:
|
||||
this->error("invalid atomic opcode");
|
||||
return 0;
|
||||
}
|
||||
if (!CheckHasSharedMemory()) return 0;
|
||||
MemoryAccessImmediate<validate> imm(
|
||||
this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
|
||||
len += imm.length;
|
||||
auto args = PopArgs(sig);
|
||||
auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
|
||||
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -532,6 +532,8 @@ class WasmGraphBuildingInterface {
|
||||
if (result) result->node = node;
|
||||
}
|
||||
|
||||
void AtomicFence(FullDecoder* decoder) { BUILD(AtomicFence); }
|
||||
|
||||
void MemoryInit(FullDecoder* decoder,
|
||||
const MemoryInitImmediate<validate>& imm, const Value& dst,
|
||||
const Value& src, const Value& size) {
|
||||
|
@ -2151,6 +2151,10 @@ class ThreadImpl {
|
||||
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
|
||||
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
|
||||
#undef ATOMIC_STORE_CASE
|
||||
case kExprAtomicFence:
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
*len += 2;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
|
@ -305,6 +305,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
|
||||
// Atomic operations.
|
||||
CASE_OP(AtomicNotify, "atomic.notify")
|
||||
CASE_INT_OP(AtomicWait, "atomic.wait")
|
||||
CASE_OP(AtomicFence, "atomic.fence")
|
||||
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
|
||||
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
|
||||
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
|
||||
@ -508,7 +509,8 @@ struct GetSimdOpcodeSigIndex {
|
||||
struct GetAtomicOpcodeSigIndex {
|
||||
constexpr WasmOpcodeSig operator()(byte opcode) const {
|
||||
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
|
||||
return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
|
||||
return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
|
||||
kSigEnum_None;
|
||||
#undef CASE
|
||||
}
|
||||
};
|
||||
|
@ -528,6 +528,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
|
||||
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
|
||||
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
|
||||
|
||||
#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
|
||||
/* AtomicFence does not target a particular linear memory. */ \
|
||||
V(AtomicFence, 0xfe03, v_v)
|
||||
|
||||
// All opcodes.
|
||||
#define FOREACH_OPCODE(V) \
|
||||
FOREACH_CONTROL_OPCODE(V) \
|
||||
@ -543,6 +547,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
|
||||
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
|
||||
FOREACH_SIMD_MEM_OPCODE(V) \
|
||||
FOREACH_ATOMIC_OPCODE(V) \
|
||||
FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
|
||||
FOREACH_NUMERIC_OPCODE(V)
|
||||
|
||||
// All signatures.
|
||||
|
@ -361,6 +361,10 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
|
||||
<< " align=" << (1ULL << imm.alignment);
|
||||
break;
|
||||
}
|
||||
FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE_OPCODE) {
|
||||
os << WasmOpcodes::OpcodeName(atomic_opcode);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
|
@ -897,6 +897,8 @@ TEST(DisasmIa320) {
|
||||
__ Nop(i);
|
||||
}
|
||||
|
||||
__ mfence();
|
||||
__ lfence();
|
||||
__ pause();
|
||||
__ ret(0);
|
||||
|
||||
|
@ -975,6 +975,8 @@ TEST(DisasmX64) {
|
||||
__ Nop(i);
|
||||
}
|
||||
|
||||
__ mfence();
|
||||
__ lfence();
|
||||
__ pause();
|
||||
__ ret(0);
|
||||
|
||||
|
@ -304,6 +304,19 @@ WASM_EXEC_TEST(I32AtomicStoreParameter) {
|
||||
CHECK_EQ(10, r.Call(10));
|
||||
CHECK_EQ(20, r.builder().ReadMemory(&memory[0]));
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(AtomicFence) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t> r(execution_tier);
|
||||
// Note that this test specifically doesn't use a shared memory, as the fence
|
||||
// instruction does not target a particular linear memory. It may occur in
|
||||
// modules which declare no memory, or a non-shared memory, without causing a
|
||||
// validation error.
|
||||
|
||||
BUILD(r, WASM_ATOMICS_FENCE, WASM_ZERO);
|
||||
CHECK_EQ(0, r.Call());
|
||||
}
|
||||
|
||||
} // namespace test_run_wasm_atomics
|
||||
} // namespace wasm
|
||||
} // namespace internal
|
||||
|
@ -697,6 +697,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
|
||||
static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
|
||||
#define WASM_ATOMICS_WAIT(op, index, value, timeout, offset) \
|
||||
index, value, timeout, WASM_ATOMICS_OP(op), ZERO_ALIGNMENT, offset
|
||||
#define WASM_ATOMICS_FENCE WASM_ATOMICS_OP(kExprAtomicFence), ZERO_OFFSET
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Sign Externsion Operations.
|
||||
|
Loading…
Reference in New Issue
Block a user