[wasm-atomics] Use traps for atomic Load and Store OOB handling
Bug: v8:12946 Change-Id: I3d9037a6dd940fe25f737efca49835b098d55081 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3691129 Reviewed-by: Jakob Kummerow <jkummerow@chromium.org> Reviewed-by: Deepti Gandluri <gdeepti@chromium.org> Commit-Queue: Ilya Rezvov <irezvov@chromium.org> Cr-Commit-Position: refs/heads/main@{#81336}
This commit is contained in:
parent
54c69fc584
commit
118dff9dcd
@ -488,12 +488,14 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
|
||||
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \
|
||||
do { \
|
||||
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
|
||||
__ asm_instr(i.Output##reg(), i.TempRegister(0)); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \
|
||||
do { \
|
||||
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
|
||||
__ asm_instr(i.Input##reg(2), i.TempRegister(0)); \
|
||||
} while (0)
|
||||
|
||||
|
@ -41,7 +41,9 @@ namespace compiler {
|
||||
V(Arm64Strh) \
|
||||
V(Arm64StrQ) \
|
||||
V(Arm64StrS) \
|
||||
V(Arm64StrW)
|
||||
V(Arm64StrW) \
|
||||
V(Arm64Word64AtomicLoadUint64) \
|
||||
V(Arm64Word64AtomicStoreWord64)
|
||||
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
|
||||
@ -345,8 +347,6 @@ namespace compiler {
|
||||
V(Arm64I32x4AllTrue) \
|
||||
V(Arm64I16x8AllTrue) \
|
||||
V(Arm64I8x16AllTrue) \
|
||||
V(Arm64Word64AtomicLoadUint64) \
|
||||
V(Arm64Word64AtomicStoreWord64) \
|
||||
V(Arm64Word64AtomicAddUint64) \
|
||||
V(Arm64Word64AtomicSubUint64) \
|
||||
V(Arm64Word64AtomicAndUint64) \
|
||||
|
@ -2680,6 +2680,11 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (atomic_load_params.kind() == MemoryAccessKind::kProtected) {
|
||||
code |= AccessModeField::encode(kMemoryAccessProtected);
|
||||
}
|
||||
|
||||
code |=
|
||||
AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
|
||||
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
@ -2751,6 +2756,10 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
|
||||
code |= AtomicWidthField::encode(width);
|
||||
}
|
||||
|
||||
if (store_params.kind() == MemoryAccessKind::kProtected) {
|
||||
code |= AccessModeField::encode(kMemoryAccessProtected);
|
||||
}
|
||||
|
||||
code |= AddressingModeField::encode(kMode_MRR);
|
||||
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
|
||||
temps);
|
||||
|
@ -66,6 +66,18 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
#define COMMON_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
|
||||
V(ArchStoreWithWriteBarrier) \
|
||||
V(ArchAtomicStoreWithWriteBarrier) \
|
||||
V(AtomicLoadInt8) \
|
||||
V(AtomicLoadUint8) \
|
||||
V(AtomicLoadInt16) \
|
||||
V(AtomicLoadUint16) \
|
||||
V(AtomicLoadWord32) \
|
||||
V(AtomicStoreWord8) \
|
||||
V(AtomicStoreWord16) \
|
||||
V(AtomicStoreWord32)
|
||||
|
||||
// Target-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define COMMON_ARCH_OPCODE_LIST(V) \
|
||||
@ -101,19 +113,9 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
|
||||
V(ArchFramePointer) \
|
||||
V(ArchParentFramePointer) \
|
||||
V(ArchTruncateDoubleToI) \
|
||||
V(ArchStoreWithWriteBarrier) \
|
||||
V(ArchAtomicStoreWithWriteBarrier) \
|
||||
V(ArchStackSlot) \
|
||||
V(ArchStackPointerGreaterThan) \
|
||||
V(ArchStackCheckOffset) \
|
||||
V(AtomicLoadInt8) \
|
||||
V(AtomicLoadUint8) \
|
||||
V(AtomicLoadInt16) \
|
||||
V(AtomicLoadUint16) \
|
||||
V(AtomicLoadWord32) \
|
||||
V(AtomicStoreWord8) \
|
||||
V(AtomicStoreWord16) \
|
||||
V(AtomicStoreWord32) \
|
||||
V(AtomicExchangeInt8) \
|
||||
V(AtomicExchangeUint8) \
|
||||
V(AtomicExchangeInt16) \
|
||||
@ -169,7 +171,8 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
|
||||
V(Ieee754Float64Sin) \
|
||||
V(Ieee754Float64Sinh) \
|
||||
V(Ieee754Float64Tan) \
|
||||
V(Ieee754Float64Tanh)
|
||||
V(Ieee754Float64Tanh) \
|
||||
COMMON_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V)
|
||||
|
||||
#define ARCH_OPCODE_LIST(V) \
|
||||
COMMON_ARCH_OPCODE_LIST(V) \
|
||||
@ -320,6 +323,7 @@ inline bool HasMemoryAccessMode(ArchOpcode opcode) {
|
||||
#define CASE(Name) \
|
||||
case k##Name: \
|
||||
return true;
|
||||
COMMON_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
|
||||
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
|
||||
#undef CASE
|
||||
default:
|
||||
|
@ -321,9 +321,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
};
|
||||
|
||||
template <std::memory_order order>
|
||||
void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
|
||||
int EmitStore(TurboAssembler* tasm, Operand operand, Register value,
|
||||
MachineRepresentation rep) {
|
||||
int store_instr_offset;
|
||||
if (order == std::memory_order_relaxed) {
|
||||
store_instr_offset = tasm->pc_offset();
|
||||
switch (rep) {
|
||||
case MachineRepresentation::kWord8:
|
||||
tasm->movb(operand, value);
|
||||
@ -346,43 +348,50 @@ void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
return;
|
||||
return store_instr_offset;
|
||||
}
|
||||
|
||||
DCHECK_EQ(order, std::memory_order_seq_cst);
|
||||
switch (rep) {
|
||||
case MachineRepresentation::kWord8:
|
||||
tasm->movq(kScratchRegister, value);
|
||||
store_instr_offset = tasm->pc_offset();
|
||||
tasm->xchgb(kScratchRegister, operand);
|
||||
break;
|
||||
case MachineRepresentation::kWord16:
|
||||
tasm->movq(kScratchRegister, value);
|
||||
store_instr_offset = tasm->pc_offset();
|
||||
tasm->xchgw(kScratchRegister, operand);
|
||||
break;
|
||||
case MachineRepresentation::kWord32:
|
||||
tasm->movq(kScratchRegister, value);
|
||||
store_instr_offset = tasm->pc_offset();
|
||||
tasm->xchgl(kScratchRegister, operand);
|
||||
break;
|
||||
case MachineRepresentation::kWord64:
|
||||
tasm->movq(kScratchRegister, value);
|
||||
store_instr_offset = tasm->pc_offset();
|
||||
tasm->xchgq(kScratchRegister, operand);
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
store_instr_offset = tasm->pc_offset();
|
||||
tasm->AtomicStoreTaggedField(operand, value);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
return store_instr_offset;
|
||||
}
|
||||
|
||||
template <std::memory_order order>
|
||||
void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
|
||||
int EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
|
||||
MachineRepresentation rep);
|
||||
|
||||
template <>
|
||||
void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
|
||||
int EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
|
||||
Immediate value,
|
||||
MachineRepresentation rep) {
|
||||
int store_instr_offset = tasm->pc_offset();
|
||||
switch (rep) {
|
||||
case MachineRepresentation::kWord8:
|
||||
tasm->movb(operand, value);
|
||||
@ -402,8 +411,90 @@ void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
return store_instr_offset;
|
||||
}
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
class WasmOutOfLineTrap : public OutOfLineCode {
|
||||
public:
|
||||
WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
|
||||
: OutOfLineCode(gen), gen_(gen), instr_(instr) {}
|
||||
|
||||
void Generate() override {
|
||||
X64OperandConverter i(gen_, instr_);
|
||||
TrapId trap_id =
|
||||
static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
|
||||
GenerateWithTrapId(trap_id);
|
||||
}
|
||||
|
||||
protected:
|
||||
CodeGenerator* gen_;
|
||||
|
||||
void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
|
||||
|
||||
private:
|
||||
void GenerateCallToTrap(TrapId trap_id) {
|
||||
if (!gen_->wasm_runtime_exception_support()) {
|
||||
// We cannot test calls to the runtime in cctest/test-run-wasm.
|
||||
// Therefore we emit a call to C here instead of a call to the runtime.
|
||||
__ PrepareCallCFunction(0);
|
||||
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
|
||||
0);
|
||||
__ LeaveFrame(StackFrame::WASM);
|
||||
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
|
||||
size_t pop_size =
|
||||
call_descriptor->ParameterSlotCount() * kSystemPointerSize;
|
||||
// Use rcx as a scratch register, we return anyways immediately.
|
||||
__ Ret(static_cast<int>(pop_size), rcx);
|
||||
} else {
|
||||
gen_->AssembleSourcePosition(instr_);
|
||||
// A direct call to a wasm runtime stub defined in this module.
|
||||
// Just encode the stub index. This will be patched when the code
|
||||
// is added to the native module and copied into wasm code space.
|
||||
__ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
|
||||
ReferenceMap* reference_map =
|
||||
gen_->zone()->New<ReferenceMap>(gen_->zone());
|
||||
gen_->RecordSafepoint(reference_map);
|
||||
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
|
||||
}
|
||||
}
|
||||
|
||||
Instruction* instr_;
|
||||
};
|
||||
|
||||
class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
|
||||
public:
|
||||
WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
|
||||
: WasmOutOfLineTrap(gen, instr), pc_(pc) {}
|
||||
|
||||
void Generate() final {
|
||||
DCHECK(FLAG_wasm_bounds_checks && !FLAG_wasm_enforce_bounds_checks);
|
||||
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
|
||||
GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
|
||||
}
|
||||
|
||||
private:
|
||||
int pc_;
|
||||
};
|
||||
|
||||
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
int pc) {
|
||||
const MemoryAccessMode access_mode = instr->memory_access_mode();
|
||||
if (access_mode == kMemoryAccessProtected) {
|
||||
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr, int pc) {
|
||||
DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
|
||||
}
|
||||
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
|
||||
#ifdef V8_IS_TSAN
|
||||
void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
|
||||
Register scratch, Operand operand,
|
||||
@ -542,13 +633,17 @@ template <std::memory_order order, typename ValueT>
|
||||
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
|
||||
TurboAssembler* tasm, Operand operand, ValueT value,
|
||||
X64OperandConverter& i, StubCallMode stub_call_mode,
|
||||
MachineRepresentation rep) {
|
||||
MachineRepresentation rep, Instruction* instr) {
|
||||
// The FOR_TESTING code doesn't initialize the root register. We can't call
|
||||
// the TSAN builtin since we need to load the external reference through the
|
||||
// root register.
|
||||
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
|
||||
// path. It is not crucial, but it would be nice to remove this restriction.
|
||||
if (codegen->code_kind() != CodeKind::FOR_TESTING) {
|
||||
if (instr->HasMemoryAccessMode()) {
|
||||
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(),
|
||||
instr, tasm->pc_offset());
|
||||
}
|
||||
int size = ElementSizeInBytes(rep);
|
||||
EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
|
||||
stub_call_mode, size);
|
||||
@ -556,7 +651,11 @@ void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
|
||||
EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
|
||||
size, order);
|
||||
} else {
|
||||
EmitStore<order>(tasm, operand, value, rep);
|
||||
int store_instr_offset = EmitStore<order>(tasm, operand, value, rep);
|
||||
if (instr->HasMemoryAccessMode()) {
|
||||
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(),
|
||||
instr, store_instr_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,10 +727,13 @@ template <std::memory_order order, typename ValueT>
|
||||
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
|
||||
TurboAssembler* tasm, Operand operand, ValueT value,
|
||||
X64OperandConverter& i, StubCallMode stub_call_mode,
|
||||
MachineRepresentation rep) {
|
||||
MachineRepresentation rep, Instruction* instr) {
|
||||
DCHECK(order == std::memory_order_relaxed ||
|
||||
order == std::memory_order_seq_cst);
|
||||
EmitStore<order>(tasm, operand, value, rep);
|
||||
int store_instr_off = EmitStore<order>(tasm, operand, value, rep);
|
||||
if (instr->HasMemoryAccessMode()) {
|
||||
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_off);
|
||||
}
|
||||
}
|
||||
|
||||
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
@ -640,87 +742,6 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
int size) {}
|
||||
#endif // V8_IS_TSAN
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
class WasmOutOfLineTrap : public OutOfLineCode {
|
||||
public:
|
||||
WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
|
||||
: OutOfLineCode(gen), gen_(gen), instr_(instr) {}
|
||||
|
||||
void Generate() override {
|
||||
X64OperandConverter i(gen_, instr_);
|
||||
TrapId trap_id =
|
||||
static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
|
||||
GenerateWithTrapId(trap_id);
|
||||
}
|
||||
|
||||
protected:
|
||||
CodeGenerator* gen_;
|
||||
|
||||
void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
|
||||
|
||||
private:
|
||||
void GenerateCallToTrap(TrapId trap_id) {
|
||||
if (!gen_->wasm_runtime_exception_support()) {
|
||||
// We cannot test calls to the runtime in cctest/test-run-wasm.
|
||||
// Therefore we emit a call to C here instead of a call to the runtime.
|
||||
__ PrepareCallCFunction(0);
|
||||
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
|
||||
0);
|
||||
__ LeaveFrame(StackFrame::WASM);
|
||||
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
|
||||
size_t pop_size =
|
||||
call_descriptor->ParameterSlotCount() * kSystemPointerSize;
|
||||
// Use rcx as a scratch register, we return anyways immediately.
|
||||
__ Ret(static_cast<int>(pop_size), rcx);
|
||||
} else {
|
||||
gen_->AssembleSourcePosition(instr_);
|
||||
// A direct call to a wasm runtime stub defined in this module.
|
||||
// Just encode the stub index. This will be patched when the code
|
||||
// is added to the native module and copied into wasm code space.
|
||||
__ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
|
||||
ReferenceMap* reference_map =
|
||||
gen_->zone()->New<ReferenceMap>(gen_->zone());
|
||||
gen_->RecordSafepoint(reference_map);
|
||||
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
|
||||
}
|
||||
}
|
||||
|
||||
Instruction* instr_;
|
||||
};
|
||||
|
||||
class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
|
||||
public:
|
||||
WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
|
||||
: WasmOutOfLineTrap(gen, instr), pc_(pc) {}
|
||||
|
||||
void Generate() final {
|
||||
DCHECK(FLAG_wasm_bounds_checks && !FLAG_wasm_enforce_bounds_checks);
|
||||
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
|
||||
GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
|
||||
}
|
||||
|
||||
private:
|
||||
int pc_;
|
||||
};
|
||||
|
||||
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
int pc) {
|
||||
const MemoryAccessMode access_mode = instr->memory_access_mode();
|
||||
if (access_mode == kMemoryAccessProtected) {
|
||||
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr, int pc) {
|
||||
DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
|
||||
}
|
||||
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
|
||||
} // namespace
|
||||
|
||||
#define ASSEMBLE_UNOP(asm_instr) \
|
||||
@ -1038,7 +1059,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
Operand operand = i.MemoryOperand(1); \
|
||||
EmitTSANAwareStore<std::memory_order_seq_cst>( \
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
|
||||
rep); \
|
||||
rep, instr); \
|
||||
} while (false)
|
||||
|
||||
void CodeGenerator::AssembleDeconstructFrame() {
|
||||
@ -1488,12 +1509,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
if (arch_opcode == kArchStoreWithWriteBarrier) {
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kTagged);
|
||||
MachineRepresentation::kTagged, instr);
|
||||
} else {
|
||||
DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
|
||||
EmitTSANAwareStore<std::memory_order_seq_cst>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kTagged);
|
||||
MachineRepresentation::kTagged, instr);
|
||||
}
|
||||
if (mode > RecordWriteMode::kValueIsPointer) {
|
||||
__ JumpIfSmi(value, ool->exit());
|
||||
@ -2334,19 +2355,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ AssertZeroExtended(i.OutputRegister());
|
||||
break;
|
||||
case kX64Movb: {
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||
size_t index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
if (HasImmediateInput(instr, index)) {
|
||||
Immediate value(Immediate(i.InputInt8(index)));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord8);
|
||||
MachineRepresentation::kWord8, instr);
|
||||
} else {
|
||||
Register value(i.InputRegister(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord8);
|
||||
MachineRepresentation::kWord8, instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -2370,25 +2390,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ AssertZeroExtended(i.OutputRegister());
|
||||
break;
|
||||
case kX64Movw: {
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||
size_t index = 0;
|
||||
Operand operand = i.MemoryOperand(&index);
|
||||
if (HasImmediateInput(instr, index)) {
|
||||
Immediate value(Immediate(i.InputInt16(index)));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord16);
|
||||
MachineRepresentation::kWord16, instr);
|
||||
} else {
|
||||
Register value(i.InputRegister(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord16);
|
||||
MachineRepresentation::kWord16, instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kX64Movl:
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||
if (instr->HasOutput()) {
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||
if (HasAddressingMode(instr)) {
|
||||
Operand address(i.MemoryOperand());
|
||||
__ movl(i.OutputRegister(), address);
|
||||
@ -2409,12 +2428,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Immediate value(i.InputImmediate(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord32);
|
||||
MachineRepresentation::kWord32, instr);
|
||||
} else {
|
||||
Register value(i.InputRegister(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord32);
|
||||
MachineRepresentation::kWord32, instr);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -2454,12 +2473,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Immediate value(i.InputImmediate(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kTagged);
|
||||
MachineRepresentation::kTagged, instr);
|
||||
} else {
|
||||
Register value(i.InputRegister(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kTagged);
|
||||
MachineRepresentation::kTagged, instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -2482,12 +2501,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Register value(i.InputRegister(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kSandboxedPointer);
|
||||
MachineRepresentation::kSandboxedPointer, instr);
|
||||
break;
|
||||
}
|
||||
case kX64Movq:
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||
if (instr->HasOutput()) {
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||
Operand address(i.MemoryOperand());
|
||||
__ movq(i.OutputRegister(), address);
|
||||
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
|
||||
@ -2499,12 +2518,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Immediate value(i.InputImmediate(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord64);
|
||||
MachineRepresentation::kWord64, instr);
|
||||
} else {
|
||||
Register value(i.InputRegister(index));
|
||||
EmitTSANAwareStore<std::memory_order_relaxed>(
|
||||
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
|
||||
MachineRepresentation::kWord64);
|
||||
MachineRepresentation::kWord64, instr);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -48,7 +48,8 @@ namespace compiler {
|
||||
V(X64S128Load8x8S) \
|
||||
V(X64S128Load8x8U) \
|
||||
V(X64S128Store32Lane) \
|
||||
V(X64S128Store64Lane)
|
||||
V(X64S128Store64Lane) \
|
||||
V(X64Word64AtomicStoreWord64)
|
||||
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
|
||||
@ -403,7 +404,6 @@ namespace compiler {
|
||||
V(X64Word64AtomicAndUint64) \
|
||||
V(X64Word64AtomicOrUint64) \
|
||||
V(X64Word64AtomicXorUint64) \
|
||||
V(X64Word64AtomicStoreWord64) \
|
||||
V(X64Word64AtomicExchangeUint64) \
|
||||
V(X64Word64AtomicCompareExchangeUint64)
|
||||
|
||||
|
@ -502,7 +502,9 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
|
||||
AddressingMode mode =
|
||||
g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count, reg_kind);
|
||||
InstructionCode code = opcode | AddressingModeField::encode(mode);
|
||||
if (node->opcode() == IrOpcode::kProtectedLoad) {
|
||||
if (node->opcode() == IrOpcode::kProtectedLoad ||
|
||||
node->opcode() == IrOpcode::kWord32AtomicLoad ||
|
||||
node->opcode() == IrOpcode::kWord64AtomicLoad) {
|
||||
code |= AccessModeField::encode(kMemoryAccessProtected);
|
||||
}
|
||||
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
|
||||
@ -537,7 +539,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
|
||||
|
||||
void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||
StoreRepresentation store_rep,
|
||||
base::Optional<AtomicMemoryOrder> atomic_order) {
|
||||
base::Optional<AtomicMemoryOrder> atomic_order,
|
||||
MemoryAccessKind acs_kind = MemoryAccessKind::kNormal) {
|
||||
X64OperandGenerator g(selector);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
@ -553,6 +556,10 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||
write_barrier_kind = kFullWriteBarrier;
|
||||
}
|
||||
|
||||
const auto access_mode = acs_kind == MemoryAccessKind::kProtected
|
||||
? MemoryAccessMode::kMemoryAccessProtected
|
||||
: MemoryAccessMode::kMemoryAccessDirect;
|
||||
|
||||
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
|
||||
DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation()));
|
||||
AddressingMode addressing_mode;
|
||||
@ -567,6 +574,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||
: kArchStoreWithWriteBarrier;
|
||||
code |= AddressingModeField::encode(addressing_mode);
|
||||
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
||||
code |= AccessModeField::encode(access_mode);
|
||||
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
} else {
|
||||
@ -617,8 +625,9 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||
opcode = GetStoreOpcode(store_rep);
|
||||
}
|
||||
|
||||
InstructionCode code =
|
||||
opcode | AddressingModeField::encode(addressing_mode);
|
||||
InstructionCode code = opcode
|
||||
| AddressingModeField::encode(addressing_mode)
|
||||
| AccessModeField::encode(access_mode);
|
||||
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
|
||||
input_count, inputs, temp_count, temps);
|
||||
}
|
||||
@ -2901,14 +2910,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
||||
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
|
||||
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
|
||||
kTaggedSize == 4);
|
||||
VisitStoreCommon(this, node, params.store_representation(), params.order());
|
||||
VisitStoreCommon(this, node, params.store_representation(), params.order(),
|
||||
params.kind());
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
||||
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
|
||||
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
|
||||
kTaggedSize == 8);
|
||||
VisitStoreCommon(this, node, params.store_representation(), params.order());
|
||||
VisitStoreCommon(this, node, params.store_representation(), params.order(),
|
||||
params.kind());
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
||||
|
@ -34,7 +34,7 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
|
||||
|
||||
bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
|
||||
return lhs.store_representation() == rhs.store_representation() &&
|
||||
lhs.order() == rhs.order();
|
||||
lhs.order() == rhs.order() && lhs.kind() == rhs.kind();
|
||||
}
|
||||
|
||||
bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
|
||||
@ -43,7 +43,7 @@ bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
|
||||
|
||||
size_t hash_value(AtomicStoreParameters params) {
|
||||
return base::hash_combine(hash_value(params.store_representation()),
|
||||
params.order());
|
||||
params.order(), params.kind());
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
|
||||
@ -52,7 +52,7 @@ std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
|
||||
|
||||
bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
|
||||
return lhs.representation() == rhs.representation() &&
|
||||
lhs.order() == rhs.order();
|
||||
lhs.order() == rhs.order() && lhs.kind() == rhs.kind();
|
||||
}
|
||||
|
||||
bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
|
||||
@ -60,7 +60,8 @@ bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
|
||||
}
|
||||
|
||||
size_t hash_value(AtomicLoadParameters params) {
|
||||
return base::hash_combine(params.representation(), params.order());
|
||||
return base::hash_combine(params.representation(), params.order(),
|
||||
params.kind());
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
|
||||
@ -1067,64 +1068,84 @@ struct MachineOperatorGlobalCache {
|
||||
MACHINE_REPRESENTATION_LIST(STORE)
|
||||
#undef STORE
|
||||
|
||||
#define ATOMIC_LOAD(Type) \
|
||||
struct Word32SeqCstLoad##Type##Operator \
|
||||
#define ATOMIC_LOAD_WITH_KIND(Type, Kind) \
|
||||
struct Word32SeqCstLoad##Type##Kind##Operator \
|
||||
: public Operator1<AtomicLoadParameters> { \
|
||||
Word32SeqCstLoad##Type##Operator() \
|
||||
Word32SeqCstLoad##Type##Kind##Operator() \
|
||||
: Operator1<AtomicLoadParameters>( \
|
||||
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
|
||||
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
|
||||
AtomicLoadParameters(MachineType::Type(), \
|
||||
AtomicMemoryOrder::kSeqCst)) {} \
|
||||
AtomicMemoryOrder::kSeqCst, \
|
||||
MemoryAccessKind::k##Kind)) {} \
|
||||
}; \
|
||||
Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
|
||||
Word32SeqCstLoad##Type##Kind##Operator kWord32SeqCstLoad##Type##Kind;
|
||||
#define ATOMIC_LOAD(Type) \
|
||||
ATOMIC_LOAD_WITH_KIND(Type, Normal) \
|
||||
ATOMIC_LOAD_WITH_KIND(Type, Protected)
|
||||
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
|
||||
#undef ATOMIC_LOAD_WITH_KIND
|
||||
#undef ATOMIC_LOAD
|
||||
|
||||
#define ATOMIC_LOAD(Type) \
|
||||
struct Word64SeqCstLoad##Type##Operator \
|
||||
#define ATOMIC_LOAD_WITH_KIND(Type, Kind) \
|
||||
struct Word64SeqCstLoad##Type##Kind##Operator \
|
||||
: public Operator1<AtomicLoadParameters> { \
|
||||
Word64SeqCstLoad##Type##Operator() \
|
||||
Word64SeqCstLoad##Type##Kind##Operator() \
|
||||
: Operator1<AtomicLoadParameters>( \
|
||||
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
|
||||
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
|
||||
AtomicLoadParameters(MachineType::Type(), \
|
||||
AtomicMemoryOrder::kSeqCst)) {} \
|
||||
AtomicMemoryOrder::kSeqCst, \
|
||||
MemoryAccessKind::k##Kind)) {} \
|
||||
}; \
|
||||
Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
|
||||
Word64SeqCstLoad##Type##Kind##Operator kWord64SeqCstLoad##Type##Kind;
|
||||
#define ATOMIC_LOAD(Type) \
|
||||
ATOMIC_LOAD_WITH_KIND(Type, Normal) \
|
||||
ATOMIC_LOAD_WITH_KIND(Type, Protected)
|
||||
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
|
||||
#undef ATOMIC_LOAD_WITH_KIND
|
||||
#undef ATOMIC_LOAD
|
||||
|
||||
#define ATOMIC_STORE(Type) \
|
||||
struct Word32SeqCstStore##Type##Operator \
|
||||
#define ATOMIC_STORE_WITH_KIND(Type, Kind) \
|
||||
struct Word32SeqCstStore##Type##Kind##Operator \
|
||||
: public Operator1<AtomicStoreParameters> { \
|
||||
Word32SeqCstStore##Type##Operator() \
|
||||
Word32SeqCstStore##Type##Kind##Operator() \
|
||||
: Operator1<AtomicStoreParameters>( \
|
||||
IrOpcode::kWord32AtomicStore, \
|
||||
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
||||
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
|
||||
AtomicStoreParameters(MachineRepresentation::Type, \
|
||||
kNoWriteBarrier, \
|
||||
AtomicMemoryOrder::kSeqCst)) {} \
|
||||
AtomicMemoryOrder::kSeqCst, \
|
||||
MemoryAccessKind::k##Kind)) {} \
|
||||
}; \
|
||||
Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
|
||||
Word32SeqCstStore##Type##Kind##Operator kWord32SeqCstStore##Type##Kind;
|
||||
#define ATOMIC_STORE(Type) \
|
||||
ATOMIC_STORE_WITH_KIND(Type, Normal) \
|
||||
ATOMIC_STORE_WITH_KIND(Type, Protected)
|
||||
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
|
||||
#undef ATOMIC_STORE_WITH_KIND
|
||||
#undef ATOMIC_STORE
|
||||
|
||||
#define ATOMIC_STORE(Type) \
|
||||
struct Word64SeqCstStore##Type##Operator \
|
||||
#define ATOMIC_STORE_WITH_KIND(Type, Kind) \
|
||||
struct Word64SeqCstStore##Type##Kind##Operator \
|
||||
: public Operator1<AtomicStoreParameters> { \
|
||||
Word64SeqCstStore##Type##Operator() \
|
||||
Word64SeqCstStore##Type##Kind##Operator() \
|
||||
: Operator1<AtomicStoreParameters>( \
|
||||
IrOpcode::kWord64AtomicStore, \
|
||||
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
||||
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
|
||||
AtomicStoreParameters(MachineRepresentation::Type, \
|
||||
kNoWriteBarrier, \
|
||||
AtomicMemoryOrder::kSeqCst)) {} \
|
||||
AtomicMemoryOrder::kSeqCst, \
|
||||
MemoryAccessKind::k##Kind)) {} \
|
||||
}; \
|
||||
Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
|
||||
Word64SeqCstStore##Type##Kind##Operator kWord64SeqCstStore##Type##Kind;
|
||||
#define ATOMIC_STORE(Type) \
|
||||
ATOMIC_STORE_WITH_KIND(Type, Normal) \
|
||||
ATOMIC_STORE_WITH_KIND(Type, Protected)
|
||||
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
|
||||
#undef ATOMIC_STORE_WITH_KIND
|
||||
#undef ATOMIC_STORE
|
||||
|
||||
#define ATOMIC_OP(op, type) \
|
||||
@ -1662,12 +1683,17 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
|
||||
AtomicLoadParameters params) {
|
||||
#define CACHED_LOAD(Type) \
|
||||
#define CACHED_LOAD_WITH_KIND(Type, Kind) \
|
||||
if (params.representation() == MachineType::Type() && \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||
return &cache_.kWord32SeqCstLoad##Type; \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst && \
|
||||
params.kind() == MemoryAccessKind::k##Kind) { \
|
||||
return &cache_.kWord32SeqCstLoad##Type##Kind; \
|
||||
}
|
||||
#define CACHED_LOAD(Type) \
|
||||
CACHED_LOAD_WITH_KIND(Type, Normal) \
|
||||
CACHED_LOAD_WITH_KIND(Type, Protected)
|
||||
ATOMIC_TYPE_LIST(CACHED_LOAD)
|
||||
#undef CACHED_LOAD_WITH_KIND
|
||||
#undef CACHED_LOAD
|
||||
|
||||
#define LOAD(Type) \
|
||||
@ -1685,12 +1711,17 @@ const Operator* MachineOperatorBuilder::Word32AtomicLoad(
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word32AtomicStore(
|
||||
AtomicStoreParameters params) {
|
||||
#define CACHED_STORE(kRep) \
|
||||
#define CACHED_STORE_WITH_KIND(kRep, Kind) \
|
||||
if (params.representation() == MachineRepresentation::kRep && \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||
return &cache_.kWord32SeqCstStore##kRep; \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst && \
|
||||
params.kind() == MemoryAccessKind::k##Kind) { \
|
||||
return &cache_.kWord32SeqCstStore##kRep##Kind; \
|
||||
}
|
||||
#define CACHED_STORE(kRep) \
|
||||
CACHED_STORE_WITH_KIND(kRep, Normal) \
|
||||
CACHED_STORE_WITH_KIND(kRep, Protected)
|
||||
ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
|
||||
#undef CACHED_STORE_WITH_KIND
|
||||
#undef CACHED_STORE
|
||||
|
||||
#define STORE(kRep) \
|
||||
@ -1779,12 +1810,17 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
|
||||
AtomicLoadParameters params) {
|
||||
#define CACHED_LOAD(Type) \
|
||||
#define CACHED_LOAD_WITH_KIND(Type, Kind) \
|
||||
if (params.representation() == MachineType::Type() && \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||
return &cache_.kWord64SeqCstLoad##Type; \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst && \
|
||||
params.kind() == MemoryAccessKind::k##Kind) { \
|
||||
return &cache_.kWord64SeqCstLoad##Type##Kind; \
|
||||
}
|
||||
#define CACHED_LOAD(Type) \
|
||||
CACHED_LOAD_WITH_KIND(Type, Normal) \
|
||||
CACHED_LOAD_WITH_KIND(Type, Protected)
|
||||
ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
|
||||
#undef CACHED_LOAD_WITH_KIND
|
||||
#undef CACHED_LOAD
|
||||
|
||||
#define LOAD(Type) \
|
||||
@ -1802,12 +1838,17 @@ const Operator* MachineOperatorBuilder::Word64AtomicLoad(
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word64AtomicStore(
|
||||
AtomicStoreParameters params) {
|
||||
#define CACHED_STORE(kRep) \
|
||||
#define CACHED_STORE_WITH_KIND(kRep, Kind) \
|
||||
if (params.representation() == MachineRepresentation::kRep && \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||
return &cache_.kWord64SeqCstStore##kRep; \
|
||||
params.order() == AtomicMemoryOrder::kSeqCst && \
|
||||
params.kind() == MemoryAccessKind::k##Kind) { \
|
||||
return &cache_.kWord64SeqCstStore##kRep##Kind; \
|
||||
}
|
||||
#define CACHED_STORE(kRep) \
|
||||
CACHED_STORE_WITH_KIND(kRep, Normal) \
|
||||
CACHED_STORE_WITH_KIND(kRep, Protected)
|
||||
ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
|
||||
#undef CACHED_STORE_WITH_KIND
|
||||
#undef CACHED_STORE
|
||||
|
||||
#define STORE(kRep) \
|
||||
|
@ -44,6 +44,15 @@ class OptionalOperator final {
|
||||
const Operator* const op_;
|
||||
};
|
||||
|
||||
enum class MemoryAccessKind {
|
||||
kNormal,
|
||||
kUnaligned,
|
||||
kProtected,
|
||||
};
|
||||
|
||||
size_t hash_value(MemoryAccessKind);
|
||||
|
||||
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
|
||||
|
||||
// A Load needs a MachineType.
|
||||
using LoadRepresentation = MachineType;
|
||||
@ -56,15 +65,18 @@ V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
|
||||
class AtomicLoadParameters final {
|
||||
public:
|
||||
AtomicLoadParameters(LoadRepresentation representation,
|
||||
AtomicMemoryOrder order)
|
||||
: representation_(representation), order_(order) {}
|
||||
AtomicMemoryOrder order,
|
||||
MemoryAccessKind kind = MemoryAccessKind::kNormal)
|
||||
: representation_(representation), order_(order), kind_(kind) {}
|
||||
|
||||
LoadRepresentation representation() const { return representation_; }
|
||||
AtomicMemoryOrder order() const { return order_; }
|
||||
MemoryAccessKind kind() const { return kind_; }
|
||||
|
||||
private:
|
||||
LoadRepresentation representation_;
|
||||
AtomicMemoryOrder order_;
|
||||
MemoryAccessKind kind_;
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
|
||||
@ -77,16 +89,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
|
||||
V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
|
||||
V8_WARN_UNUSED_RESULT;
|
||||
|
||||
enum class MemoryAccessKind {
|
||||
kNormal,
|
||||
kUnaligned,
|
||||
kProtected,
|
||||
};
|
||||
|
||||
size_t hash_value(MemoryAccessKind);
|
||||
|
||||
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
|
||||
|
||||
enum class LoadTransformation {
|
||||
kS128Load8Splat,
|
||||
kS128Load16Splat,
|
||||
@ -167,9 +169,10 @@ class AtomicStoreParameters final {
|
||||
public:
|
||||
AtomicStoreParameters(MachineRepresentation representation,
|
||||
WriteBarrierKind write_barrier_kind,
|
||||
AtomicMemoryOrder order)
|
||||
AtomicMemoryOrder order,
|
||||
MemoryAccessKind kind = MemoryAccessKind::kNormal)
|
||||
: store_representation_(representation, write_barrier_kind),
|
||||
order_(order) {}
|
||||
order_(order), kind_(kind) {}
|
||||
|
||||
MachineRepresentation representation() const {
|
||||
return store_representation_.representation();
|
||||
@ -178,6 +181,7 @@ class AtomicStoreParameters final {
|
||||
return store_representation_.write_barrier_kind();
|
||||
}
|
||||
AtomicMemoryOrder order() const { return order_; }
|
||||
MemoryAccessKind kind() const { return kind_; }
|
||||
|
||||
StoreRepresentation store_representation() const {
|
||||
return store_representation_;
|
||||
@ -186,6 +190,7 @@ class AtomicStoreParameters final {
|
||||
private:
|
||||
StoreRepresentation store_representation_;
|
||||
AtomicMemoryOrder order_;
|
||||
MemoryAccessKind kind_;
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
|
||||
|
@ -3334,14 +3334,16 @@ void WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
|
||||
gasm_->IntPtrConstant(table_index), index, val);
|
||||
}
|
||||
|
||||
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
|
||||
int8_t access_size, Node* index, uint64_t offset,
|
||||
wasm::WasmCodePosition position) {
|
||||
std::pair<Node*, WasmGraphBuilder::BoundsCheckResult>
|
||||
WasmGraphBuilder::CheckBoundsAndAlignment(int8_t access_size, Node* index,
|
||||
uint64_t offset,
|
||||
wasm::WasmCodePosition position,
|
||||
EnforceBoundsCheck enforce_check) {
|
||||
// Atomic operations need bounds checks until the backend can emit protected
|
||||
// loads.
|
||||
index =
|
||||
BoundsCheckMem(access_size, index, offset, position, kNeedsBoundsCheck)
|
||||
.first;
|
||||
BoundsCheckResult bounds_check_result;
|
||||
std::tie(index, bounds_check_result) =
|
||||
BoundsCheckMem(access_size, index, offset, position, enforce_check);
|
||||
|
||||
const uintptr_t align_mask = access_size - 1;
|
||||
|
||||
@ -3356,7 +3358,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
|
||||
// statically known to be unaligned; trap.
|
||||
TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position);
|
||||
}
|
||||
return index;
|
||||
return {index, bounds_check_result};
|
||||
}
|
||||
|
||||
// Unlike regular memory accesses, atomic memory accesses should trap if
|
||||
@ -3368,7 +3370,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
|
||||
gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask));
|
||||
TrapIfFalse(wasm::kTrapUnalignedAccess,
|
||||
gasm_->Word32Equal(cond, Int32Constant(0)), position);
|
||||
return index;
|
||||
return {index, bounds_check_result};
|
||||
}
|
||||
|
||||
// Insert code to bounds check a memory access if necessary. Return the
|
||||
@ -4750,6 +4752,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
|
||||
const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
|
||||
const wasm::ValueType wasm_type;
|
||||
const EnforceBoundsCheck enforce_bounds_check =
|
||||
EnforceBoundsCheck::kNeedsBoundsCheck;
|
||||
|
||||
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
|
||||
: type(t), machine_type(m), operator_by_type(o) {}
|
||||
@ -4760,13 +4764,15 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
: type(t),
|
||||
machine_type(m),
|
||||
operator_by_atomic_load_params(o),
|
||||
wasm_type(v) {}
|
||||
wasm_type(v),
|
||||
enforce_bounds_check(EnforceBoundsCheck::kCanOmitBoundsCheck) {}
|
||||
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o,
|
||||
wasm::ValueType v)
|
||||
: type(t),
|
||||
machine_type(m),
|
||||
operator_by_atomic_store_rep(o),
|
||||
wasm_type(v) {}
|
||||
wasm_type(v),
|
||||
enforce_bounds_check(EnforceBoundsCheck::kCanOmitBoundsCheck) {}
|
||||
|
||||
// Constexpr, hence just a table lookup in most compilers.
|
||||
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
|
||||
@ -4888,8 +4894,16 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
|
||||
AtomicOpInfo info = AtomicOpInfo::Get(opcode);
|
||||
|
||||
Node* index = CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0],
|
||||
offset, position);
|
||||
Node* index;
|
||||
BoundsCheckResult bounds_check_result;
|
||||
std::tie(index, bounds_check_result) =
|
||||
CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0], offset,
|
||||
position, info.enforce_bounds_check);
|
||||
// MemoryAccessKind::kUnalligned is impossible due to explicit aligment check.
|
||||
MemoryAccessKind access_kind =
|
||||
bounds_check_result == WasmGraphBuilder::kTrapHandler
|
||||
? MemoryAccessKind::kProtected
|
||||
: MemoryAccessKind::kNormal;
|
||||
|
||||
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
|
||||
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
|
||||
@ -4902,12 +4916,14 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
info.machine_type.representation());
|
||||
} else if (info.operator_by_atomic_load_params) {
|
||||
op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
|
||||
AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
|
||||
AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst,
|
||||
access_kind));
|
||||
} else {
|
||||
op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
|
||||
AtomicStoreParameters(info.machine_type.representation(),
|
||||
WriteBarrierKind::kNoWriteBarrier,
|
||||
AtomicMemoryOrder::kSeqCst));
|
||||
AtomicMemoryOrder::kSeqCst,
|
||||
access_kind));
|
||||
}
|
||||
|
||||
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
|
||||
@ -4928,6 +4944,10 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
||||
Node* result = gasm_->AddNode(
|
||||
graph()->NewNode(op, num_actual_inputs + 4, input_nodes));
|
||||
|
||||
if (access_kind == MemoryAccessKind::kProtected) {
|
||||
SetSourcePosition(result, position);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_BIG_ENDIAN
|
||||
// Reverse the value bytes after load.
|
||||
if (info.operator_by_atomic_load_params) {
|
||||
|
@ -616,8 +616,9 @@ class WasmGraphBuilder {
|
||||
wasm::WasmCodePosition,
|
||||
EnforceBoundsCheck);
|
||||
|
||||
Node* CheckBoundsAndAlignment(int8_t access_size, Node* index,
|
||||
uint64_t offset, wasm::WasmCodePosition);
|
||||
std::pair<Node*, BoundsCheckResult> CheckBoundsAndAlignment(
|
||||
int8_t access_size, Node* index, uint64_t offset, wasm::WasmCodePosition,
|
||||
EnforceBoundsCheck);
|
||||
|
||||
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
|
||||
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
|
||||
|
@ -2401,6 +2401,8 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
|
||||
unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
|
||||
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
|
||||
DCHECK_EQ(address % access_size, 0);
|
||||
// First, check whether the memory is accessible (for wasm trap handling).
|
||||
if (!ProbeMemory(address, access_size)) return;
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
if (is_load != 0) {
|
||||
if (is_exclusive) {
|
||||
|
@ -368,6 +368,50 @@ WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
|
||||
CHECK_EQ(1, r.Call());
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(I32AtomicLoad_trap) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t> r(execution_tier);
|
||||
r.builder().SetHasSharedMemory();
|
||||
r.builder().AddMemory(kWasmPageSize);
|
||||
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_I32V_3(kWasmPageSize),
|
||||
MachineRepresentation::kWord32));
|
||||
CHECK_TRAP(r.Call());
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(I64AtomicLoad_trap) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint64_t> r(execution_tier);
|
||||
r.builder().SetHasSharedMemory();
|
||||
r.builder().AddMemory(kWasmPageSize);
|
||||
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_I32V_3(kWasmPageSize),
|
||||
MachineRepresentation::kWord64));
|
||||
CHECK_TRAP64(r.Call());
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(I32AtomicStore_trap) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t> r(execution_tier);
|
||||
r.builder().SetHasSharedMemory();
|
||||
r.builder().AddMemory(kWasmPageSize);
|
||||
BUILD(r,
|
||||
WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_I32V_3(kWasmPageSize),
|
||||
WASM_ZERO, MachineRepresentation::kWord32),
|
||||
WASM_ZERO);
|
||||
CHECK_TRAP(r.Call());
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(I64AtomicStore_trap) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t> r(execution_tier);
|
||||
r.builder().SetHasSharedMemory();
|
||||
r.builder().AddMemory(kWasmPageSize);
|
||||
BUILD(r,
|
||||
WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_I32V_3(kWasmPageSize),
|
||||
WASM_ZERO64, MachineRepresentation::kWord64),
|
||||
WASM_ZERO);
|
||||
CHECK_TRAP(r.Call());
|
||||
}
|
||||
|
||||
} // namespace test_run_wasm_atomics
|
||||
} // namespace wasm
|
||||
} // namespace internal
|
||||
|
@ -97,7 +97,13 @@ function VerifyBoundsCheck(func, memtype_size) {
|
||||
// Test out of bounds at boundary
|
||||
for (let i = memory.buffer.byteLength - memtype_size + 1;
|
||||
i < memory.buffer.byteLength + memtype_size + 4; i++) {
|
||||
assertTraps(kTrapMemOutOfBounds, () => func(i, 5, 10));
|
||||
assertTrapsOneOf(
|
||||
// If an underlying platform uses traps for a bounds check,
|
||||
// kTrapUnalignedAccess will be thrown before kTrapMemOutOfBounds.
|
||||
// Otherwise, kTrapMemOutOfBounds will be first.
|
||||
[kTrapMemOutOfBounds, kTrapUnalignedAccess],
|
||||
() => func(i, 5, 10)
|
||||
);
|
||||
}
|
||||
// Test out of bounds at maximum + 1
|
||||
assertTraps(kTrapMemOutOfBounds, () => func((maxSize + 1) * kPageSize, 5, 1));
|
||||
|
@ -343,7 +343,10 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
|
||||
assertEquals(0xACED, instance.exports.atomic_load(0));
|
||||
assertEquals(0xACED, instance.exports.atomic_load(5 * kPageSize - 4));
|
||||
// Verify bounds.
|
||||
assertTraps(kTrapMemOutOfBounds,
|
||||
// If an underlying platform uses traps for a bounds check,
|
||||
// kTrapUnalignedAccess will be thrown before kTrapMemOutOfBounds.
|
||||
// Otherwise, kTrapMemOutOfBounds will be first.
|
||||
assertTrapsOneOf([kTrapMemOutOfBounds, kTrapUnalignedAccess],
|
||||
() => instance.exports.atomic_load(5 * kPageSize - 3));
|
||||
let obj = {memory: memory, module: module};
|
||||
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
|
||||
@ -358,11 +361,11 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
|
||||
assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
|
||||
assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
|
||||
assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4));
|
||||
assertTraps(kTrapMemOutOfBounds,
|
||||
assertTrapsOneOf([kTrapMemOutOfBounds, kTrapUnalignedAccess],
|
||||
() => instance.exports.atomic_load(19 * kPageSize - 3));
|
||||
assertEquals(19, memory.grow(6));
|
||||
assertEquals(obj.memory.buffer.byteLength, 25 * kPageSize);
|
||||
assertTraps(kTrapMemOutOfBounds,
|
||||
assertTrapsOneOf([kTrapMemOutOfBounds, kTrapUnalignedAccess],
|
||||
() => instance.exports.atomic_load(25 * kPageSize - 3));
|
||||
})();
|
||||
|
||||
|
@ -934,6 +934,13 @@ function assertTraps(trap, code) {
|
||||
assertThrows(code, WebAssembly.RuntimeError, kTrapMsgs[trap]);
|
||||
}
|
||||
|
||||
function assertTrapsOneOf(traps, code) {
|
||||
const errorChecker = new RegExp(
|
||||
'(' + traps.map(trap => kTrapMsgs[trap]).join('|') + ')'
|
||||
);
|
||||
assertThrows(code, WebAssembly.RuntimeError,errorChecker);
|
||||
}
|
||||
|
||||
class Binary {
|
||||
constructor() {
|
||||
this.length = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user