[compiler] Support acq/rel accesses and atomic accesses on tagged
This CL adds an AtomicMemoryOrder parameter to the various atomic load and store operators. Currently only acquire release (kAcqRel) and sequentially consistent (kSeqCst) orders are supported. Additionally, atomic loads and stores are extended to work with tagged values. This CL is a pre-requisite for supporting atomic accesses in Torque, which is in turn a pre-requisite for prototyping shared strings. Bug: v8:11995 Change-Id: Ic77d2640e2dc7e5581b1211a054c93210c219355 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3101765 Reviewed-by: Nico Hartmann <nicohartmann@chromium.org> Reviewed-by: Zhi An Ng <zhin@chromium.org> Commit-Queue: Shu-yu Guo <syg@chromium.org> Cr-Commit-Position: refs/heads/main@{#76393}
This commit is contained in:
parent
89933af67f
commit
faf2208a0b
1
BUILD.gn
1
BUILD.gn
@ -2449,6 +2449,7 @@ v8_header_set("v8_internal_headers") {
|
|||||||
"src/codegen/assembler-arch.h",
|
"src/codegen/assembler-arch.h",
|
||||||
"src/codegen/assembler-inl.h",
|
"src/codegen/assembler-inl.h",
|
||||||
"src/codegen/assembler.h",
|
"src/codegen/assembler.h",
|
||||||
|
"src/codegen/atomic-memory-order.h",
|
||||||
"src/codegen/bailout-reason.h",
|
"src/codegen/bailout-reason.h",
|
||||||
"src/codegen/callable.h",
|
"src/codegen/callable.h",
|
||||||
"src/codegen/code-comments.h",
|
"src/codegen/code-comments.h",
|
||||||
|
@ -204,26 +204,28 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
|
|||||||
arraysize(case_labels));
|
arraysize(case_labels));
|
||||||
|
|
||||||
BIND(&i8);
|
BIND(&i8);
|
||||||
Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
|
Return(SmiFromInt32(AtomicLoad<Int8T>(AtomicMemoryOrder::kSeqCst,
|
||||||
|
backing_store, index_word)));
|
||||||
|
|
||||||
BIND(&u8);
|
BIND(&u8);
|
||||||
Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
|
Return(SmiFromInt32(AtomicLoad<Uint8T>(AtomicMemoryOrder::kSeqCst,
|
||||||
|
backing_store, index_word)));
|
||||||
|
|
||||||
BIND(&i16);
|
BIND(&i16);
|
||||||
Return(
|
Return(SmiFromInt32(AtomicLoad<Int16T>(
|
||||||
SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
|
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
|
||||||
|
|
||||||
BIND(&u16);
|
BIND(&u16);
|
||||||
Return(
|
Return(SmiFromInt32(AtomicLoad<Uint16T>(
|
||||||
SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
|
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
|
||||||
|
|
||||||
BIND(&i32);
|
BIND(&i32);
|
||||||
Return(ChangeInt32ToTagged(
|
Return(ChangeInt32ToTagged(AtomicLoad<Int32T>(
|
||||||
AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
|
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
|
||||||
|
|
||||||
BIND(&u32);
|
BIND(&u32);
|
||||||
Return(ChangeUint32ToTagged(
|
Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
|
||||||
AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
|
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
|
||||||
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
|
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
|
||||||
BIND(&i64);
|
BIND(&i64);
|
||||||
Goto(&u64);
|
Goto(&u64);
|
||||||
@ -235,12 +237,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
BIND(&i64);
|
BIND(&i64);
|
||||||
Return(BigIntFromSigned64(
|
Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
|
||||||
AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
|
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
|
||||||
|
|
||||||
BIND(&u64);
|
BIND(&u64);
|
||||||
Return(BigIntFromUnsigned64(
|
Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
|
||||||
AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
|
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// This shouldn't happen, we've already validated the type.
|
// This shouldn't happen, we've already validated the type.
|
||||||
@ -307,18 +309,18 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
|
|||||||
arraysize(case_labels));
|
arraysize(case_labels));
|
||||||
|
|
||||||
BIND(&u8);
|
BIND(&u8);
|
||||||
AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
|
AtomicStore(MachineRepresentation::kWord8, AtomicMemoryOrder::kSeqCst,
|
||||||
value_word32);
|
backing_store, index_word, value_word32);
|
||||||
Return(value_integer);
|
Return(value_integer);
|
||||||
|
|
||||||
BIND(&u16);
|
BIND(&u16);
|
||||||
AtomicStore(MachineRepresentation::kWord16, backing_store,
|
AtomicStore(MachineRepresentation::kWord16, AtomicMemoryOrder::kSeqCst,
|
||||||
WordShl(index_word, 1), value_word32);
|
backing_store, WordShl(index_word, 1), value_word32);
|
||||||
Return(value_integer);
|
Return(value_integer);
|
||||||
|
|
||||||
BIND(&u32);
|
BIND(&u32);
|
||||||
AtomicStore(MachineRepresentation::kWord32, backing_store,
|
AtomicStore(MachineRepresentation::kWord32, AtomicMemoryOrder::kSeqCst,
|
||||||
WordShl(index_word, 2), value_word32);
|
backing_store, WordShl(index_word, 2), value_word32);
|
||||||
Return(value_integer);
|
Return(value_integer);
|
||||||
|
|
||||||
BIND(&u64);
|
BIND(&u64);
|
||||||
@ -340,7 +342,8 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
|
|||||||
TVARIABLE(UintPtrT, var_high);
|
TVARIABLE(UintPtrT, var_high);
|
||||||
BigIntToRawBytes(value_bigint, &var_low, &var_high);
|
BigIntToRawBytes(value_bigint, &var_low, &var_high);
|
||||||
TNode<UintPtrT> high = Is64() ? TNode<UintPtrT>() : var_high.value();
|
TNode<UintPtrT> high = Is64() ? TNode<UintPtrT>() : var_high.value();
|
||||||
AtomicStore64(backing_store, WordShl(index_word, 3), var_low.value(), high);
|
AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
|
||||||
|
WordShl(index_word, 3), var_low.value(), high);
|
||||||
Return(value_bigint);
|
Return(value_bigint);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -2919,6 +2919,18 @@ void TurboAssembler::StoreTaggedField(const Register& value,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::AtomicStoreTaggedField(const Register& value,
|
||||||
|
const Register& dst_base,
|
||||||
|
const Register& dst_index,
|
||||||
|
const Register& temp) {
|
||||||
|
Add(temp, dst_base, dst_index);
|
||||||
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
|
Stlr(value.W(), temp);
|
||||||
|
} else {
|
||||||
|
Stlr(value, temp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
|
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
@ -2950,6 +2962,40 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
|
|||||||
Add(destination, kPtrComprCageBaseRegister, destination);
|
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
|
||||||
|
const Register& base,
|
||||||
|
const Register& index,
|
||||||
|
const Register& temp) {
|
||||||
|
ASM_CODE_COMMENT(this);
|
||||||
|
Add(temp, base, index);
|
||||||
|
Ldar(destination.W(), temp);
|
||||||
|
if (FLAG_debug_code) {
|
||||||
|
// Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
|
||||||
|
Add(destination, destination,
|
||||||
|
((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination,
|
||||||
|
const Register& base,
|
||||||
|
const Register& index,
|
||||||
|
const Register& temp) {
|
||||||
|
ASM_CODE_COMMENT(this);
|
||||||
|
Add(temp, base, index);
|
||||||
|
Ldar(destination.W(), temp);
|
||||||
|
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination,
|
||||||
|
const Register& base,
|
||||||
|
const Register& index,
|
||||||
|
const Register& temp) {
|
||||||
|
ASM_CODE_COMMENT(this);
|
||||||
|
Add(temp, base, index);
|
||||||
|
Ldar(destination.W(), temp);
|
||||||
|
Add(destination, kPtrComprCageBaseRegister, destination);
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
|
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
|
||||||
Condition cc, Label* condition_met) {
|
Condition cc, Label* condition_met) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
|
@ -1371,6 +1371,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void StoreTaggedField(const Register& value,
|
void StoreTaggedField(const Register& value,
|
||||||
const MemOperand& dst_field_operand);
|
const MemOperand& dst_field_operand);
|
||||||
|
|
||||||
|
void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
|
||||||
|
const Register& dst_index, const Register& temp);
|
||||||
|
|
||||||
void DecompressTaggedSigned(const Register& destination,
|
void DecompressTaggedSigned(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTaggedPointer(const Register& destination,
|
||||||
@ -1380,6 +1383,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void DecompressAnyTagged(const Register& destination,
|
void DecompressAnyTagged(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
|
|
||||||
|
void AtomicDecompressTaggedSigned(const Register& destination,
|
||||||
|
const Register& base, const Register& index,
|
||||||
|
const Register& temp);
|
||||||
|
void AtomicDecompressTaggedPointer(const Register& destination,
|
||||||
|
const Register& base,
|
||||||
|
const Register& index,
|
||||||
|
const Register& temp);
|
||||||
|
void AtomicDecompressAnyTagged(const Register& destination,
|
||||||
|
const Register& base, const Register& index,
|
||||||
|
const Register& temp);
|
||||||
|
|
||||||
// Restore FP and LR from the values stored in the current frame. This will
|
// Restore FP and LR from the values stored in the current frame. This will
|
||||||
// authenticate the LR when pointer authentication is enabled.
|
// authenticate the LR when pointer authentication is enabled.
|
||||||
void RestoreFPAndLR();
|
void RestoreFPAndLR();
|
||||||
|
35
src/codegen/atomic-memory-order.h
Normal file
35
src/codegen/atomic-memory-order.h
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
|
||||||
|
#define V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
|
||||||
|
|
||||||
|
#include <ostream>
|
||||||
|
|
||||||
|
#include "src/base/logging.h"
|
||||||
|
|
||||||
|
namespace v8 {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Atomic memory orders supported by the compiler.
|
||||||
|
enum class AtomicMemoryOrder : uint8_t { kAcqRel, kSeqCst };
|
||||||
|
|
||||||
|
inline size_t hash_value(AtomicMemoryOrder order) {
|
||||||
|
return static_cast<uint8_t>(order);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline std::ostream& operator<<(std::ostream& os, AtomicMemoryOrder order) {
|
||||||
|
switch (order) {
|
||||||
|
case AtomicMemoryOrder::kAcqRel:
|
||||||
|
return os << "kAcqRel";
|
||||||
|
case AtomicMemoryOrder::kSeqCst:
|
||||||
|
return os << "kSeqCst";
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace v8
|
||||||
|
|
||||||
|
#endif // V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
|
@ -688,6 +688,14 @@ void Assembler::movq(XMMRegister dst, Operand src) {
|
|||||||
emit_operand(dst, src);
|
emit_operand(dst, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::movq(Operand dst, XMMRegister src) {
|
||||||
|
EnsureSpace ensure_space(this);
|
||||||
|
EMIT(0x66);
|
||||||
|
EMIT(0x0F);
|
||||||
|
EMIT(0xD6);
|
||||||
|
emit_operand(src, dst);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::cmov(Condition cc, Register dst, Operand src) {
|
void Assembler::cmov(Condition cc, Register dst, Operand src) {
|
||||||
EnsureSpace ensure_space(this);
|
EnsureSpace ensure_space(this);
|
||||||
// Opcode: 0f 40 + cc /r.
|
// Opcode: 0f 40 + cc /r.
|
||||||
|
@ -535,6 +535,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
void movzx_w(Register dst, Operand src);
|
void movzx_w(Register dst, Operand src);
|
||||||
|
|
||||||
void movq(XMMRegister dst, Operand src);
|
void movq(XMMRegister dst, Operand src);
|
||||||
|
void movq(Operand dst, XMMRegister src);
|
||||||
|
|
||||||
// Conditional moves
|
// Conditional moves
|
||||||
void cmov(Condition cc, Register dst, Register src) {
|
void cmov(Condition cc, Register dst, Register src) {
|
||||||
|
@ -294,6 +294,17 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
|
||||||
|
Register value) {
|
||||||
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
|
movl(kScratchRegister, value);
|
||||||
|
xchgl(kScratchRegister, dst_field_operand);
|
||||||
|
} else {
|
||||||
|
movq(kScratchRegister, value);
|
||||||
|
xchgq(kScratchRegister, dst_field_operand);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::DecompressTaggedSigned(Register destination,
|
void TurboAssembler::DecompressTaggedSigned(Register destination,
|
||||||
Operand field_operand) {
|
Operand field_operand) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
|
@ -667,6 +667,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
|
|||||||
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
|
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
|
||||||
void StoreTaggedField(Operand dst_field_operand, Register value);
|
void StoreTaggedField(Operand dst_field_operand, Register value);
|
||||||
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
|
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
|
||||||
|
void AtomicStoreTaggedField(Operand dst_field_operand, Register value);
|
||||||
|
|
||||||
// The following macros work even when pointer compression is not enabled.
|
// The following macros work even when pointer compression is not enabled.
|
||||||
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
||||||
|
@ -329,12 +329,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
|||||||
__ dmb(ISH); \
|
__ dmb(ISH); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
|
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order) \
|
||||||
do { \
|
do { \
|
||||||
__ dmb(ISH); \
|
__ dmb(ISH); \
|
||||||
__ asm_instr(i.InputRegister(2), \
|
__ asm_instr(i.InputRegister(0), i.InputOffset(1)); \
|
||||||
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
|
if (order == AtomicMemoryOrder::kSeqCst) __ dmb(ISH); \
|
||||||
__ dmb(ISH); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
|
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
|
||||||
@ -927,15 +926,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
i.InputDoubleRegister(0), DetermineStubCallMode());
|
i.InputDoubleRegister(0), DetermineStubCallMode());
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
case kArchStoreWithWriteBarrier: {
|
case kArchStoreWithWriteBarrier: // Fall through.
|
||||||
RecordWriteMode mode =
|
case kArchAtomicStoreWithWriteBarrier: {
|
||||||
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
RecordWriteMode mode;
|
||||||
|
if (arch_opcode == kArchStoreWithWriteBarrier) {
|
||||||
|
mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
||||||
|
} else {
|
||||||
|
mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
|
||||||
|
}
|
||||||
Register object = i.InputRegister(0);
|
Register object = i.InputRegister(0);
|
||||||
Register value = i.InputRegister(2);
|
Register value = i.InputRegister(2);
|
||||||
|
|
||||||
AddressingMode addressing_mode =
|
AddressingMode addressing_mode =
|
||||||
AddressingModeField::decode(instr->opcode());
|
AddressingModeField::decode(instr->opcode());
|
||||||
Operand offset(0);
|
Operand offset(0);
|
||||||
|
|
||||||
|
if (arch_opcode == kArchAtomicStoreWithWriteBarrier) {
|
||||||
|
__ dmb(ISH);
|
||||||
|
}
|
||||||
if (addressing_mode == kMode_Offset_RI) {
|
if (addressing_mode == kMode_Offset_RI) {
|
||||||
int32_t immediate = i.InputInt32(1);
|
int32_t immediate = i.InputInt32(1);
|
||||||
offset = Operand(immediate);
|
offset = Operand(immediate);
|
||||||
@ -946,6 +954,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
offset = Operand(reg);
|
offset = Operand(reg);
|
||||||
__ str(value, MemOperand(object, reg));
|
__ str(value, MemOperand(object, reg));
|
||||||
}
|
}
|
||||||
|
if (arch_opcode == kArchAtomicStoreWithWriteBarrier &&
|
||||||
|
AtomicMemoryOrderField::decode(instr->opcode()) ==
|
||||||
|
AtomicMemoryOrder::kSeqCst) {
|
||||||
|
__ dmb(ISH);
|
||||||
|
}
|
||||||
|
|
||||||
auto ool = zone()->New<OutOfLineRecordWrite>(
|
auto ool = zone()->New<OutOfLineRecordWrite>(
|
||||||
this, object, offset, value, mode, DetermineStubCallMode(),
|
this, object, offset, value, mode, DetermineStubCallMode(),
|
||||||
&unwinding_info_writer_);
|
&unwinding_info_writer_);
|
||||||
@ -3314,13 +3328,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
|
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
|
||||||
break;
|
break;
|
||||||
case kAtomicStoreWord8:
|
case kAtomicStoreWord8:
|
||||||
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
|
ASSEMBLE_ATOMIC_STORE_INTEGER(strb,
|
||||||
|
AtomicMemoryOrderField::decode(opcode));
|
||||||
break;
|
break;
|
||||||
case kAtomicStoreWord16:
|
case kAtomicStoreWord16:
|
||||||
ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
|
ASSEMBLE_ATOMIC_STORE_INTEGER(strh,
|
||||||
|
AtomicMemoryOrderField::decode(opcode));
|
||||||
break;
|
break;
|
||||||
case kAtomicStoreWord32:
|
case kAtomicStoreWord32:
|
||||||
ASSEMBLE_ATOMIC_STORE_INTEGER(str);
|
ASSEMBLE_ATOMIC_STORE_INTEGER(str,
|
||||||
|
AtomicMemoryOrderField::decode(opcode));
|
||||||
break;
|
break;
|
||||||
case kAtomicExchangeInt8:
|
case kAtomicExchangeInt8:
|
||||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
|
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
|
||||||
|
@ -430,17 +430,18 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
|
|||||||
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
|
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
|
||||||
size_t input_count, InstructionOperand* inputs, Node* index) {
|
size_t input_count, InstructionOperand* inputs, Node* index) {
|
||||||
ArmOperandGenerator g(selector);
|
ArmOperandGenerator g(selector);
|
||||||
|
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
|
||||||
|
|
||||||
if (g.CanBeImmediate(index, opcode)) {
|
if (g.CanBeImmediate(index, opcode)) {
|
||||||
inputs[input_count++] = g.UseImmediate(index);
|
inputs[input_count++] = g.UseImmediate(index);
|
||||||
opcode |= AddressingModeField::encode(kMode_Offset_RI);
|
opcode |= AddressingModeField::encode(kMode_Offset_RI);
|
||||||
} else if ((opcode == kArmStr) &&
|
} else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
|
||||||
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
|
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
|
||||||
&inputs[3])) {
|
&inputs[3])) {
|
||||||
input_count = 4;
|
input_count = 4;
|
||||||
} else {
|
} else {
|
||||||
inputs[input_count++] = g.UseRegister(index);
|
inputs[input_count++] = g.UseRegister(index);
|
||||||
if (opcode == kArmVst1S128) {
|
if (arch_opcode == kArmVst1S128) {
|
||||||
// Inputs are value, base, index, only care about base and index.
|
// Inputs are value, base, index, only care about base and index.
|
||||||
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
|
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
|
||||||
} else {
|
} else {
|
||||||
@ -640,13 +641,60 @@ void InstructionSelector::VisitProtectedLoad(Node* node) {
|
|||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitStore(Node* node) {
|
namespace {
|
||||||
ArmOperandGenerator g(this);
|
|
||||||
|
ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
|
||||||
|
switch (rep) {
|
||||||
|
case MachineRepresentation::kFloat32:
|
||||||
|
return kArmVstrF32;
|
||||||
|
case MachineRepresentation::kFloat64:
|
||||||
|
return kArmVstrF64;
|
||||||
|
case MachineRepresentation::kBit: // Fall through.
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
return kArmStrb;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
return kArmStrh;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged: // Fall through.
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
return kArmStr;
|
||||||
|
case MachineRepresentation::kSimd128:
|
||||||
|
return kArmVst1S128;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kCompressedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kCompressed: // Fall through.
|
||||||
|
case MachineRepresentation::kWord64: // Fall through.
|
||||||
|
case MachineRepresentation::kMapWord: // Fall through.
|
||||||
|
case MachineRepresentation::kNone:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
|
||||||
|
switch (rep) {
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
return kAtomicStoreWord8;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
return kAtomicStoreWord16;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged: // Fall through.
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
return kAtomicStoreWord32;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||||
|
StoreRepresentation store_rep,
|
||||||
|
base::Optional<AtomicMemoryOrder> atomic_order) {
|
||||||
|
ArmOperandGenerator g(selector);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
|
|
||||||
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
|
|
||||||
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
||||||
MachineRepresentation rep = store_rep.representation();
|
MachineRepresentation rep = store_rep.representation();
|
||||||
|
|
||||||
@ -672,58 +720,44 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||||
RecordWriteMode record_write_mode =
|
RecordWriteMode record_write_mode =
|
||||||
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
||||||
InstructionCode code = kArchStoreWithWriteBarrier;
|
InstructionCode code;
|
||||||
|
if (!atomic_order) {
|
||||||
|
code = kArchStoreWithWriteBarrier;
|
||||||
|
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
||||||
|
} else {
|
||||||
|
code = kArchAtomicStoreWithWriteBarrier;
|
||||||
|
code |= AtomicMemoryOrderField::encode(*atomic_order);
|
||||||
|
code |= AtomicStoreRecordWriteModeField::encode(record_write_mode);
|
||||||
|
}
|
||||||
code |= AddressingModeField::encode(addressing_mode);
|
code |= AddressingModeField::encode(addressing_mode);
|
||||||
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
selector->Emit(code, 0, nullptr, input_count, inputs);
|
||||||
Emit(code, 0, nullptr, input_count, inputs);
|
|
||||||
} else {
|
} else {
|
||||||
InstructionCode opcode = kArchNop;
|
InstructionCode opcode = kArchNop;
|
||||||
switch (rep) {
|
if (!atomic_order) {
|
||||||
case MachineRepresentation::kFloat32:
|
opcode = GetStoreOpcode(rep);
|
||||||
opcode = kArmVstrF32;
|
} else {
|
||||||
break;
|
// Release stores emit DMB ISH; STR while sequentially consistent stores
|
||||||
case MachineRepresentation::kFloat64:
|
// emit DMB ISH; STR; DMB ISH.
|
||||||
opcode = kArmVstrF64;
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
break;
|
opcode = GetAtomicStoreOpcode(rep);
|
||||||
case MachineRepresentation::kBit: // Fall through.
|
opcode |= AtomicMemoryOrderField::encode(*atomic_order);
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = kArmStrb;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = kArmStrh;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
|
||||||
case MachineRepresentation::kTaggedPointer: // Fall through.
|
|
||||||
case MachineRepresentation::kTagged: // Fall through.
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kArmStr;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kSimd128:
|
|
||||||
opcode = kArmVst1S128;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kCompressedPointer: // Fall through.
|
|
||||||
case MachineRepresentation::kCompressed: // Fall through.
|
|
||||||
case MachineRepresentation::kWord64: // Fall through.
|
|
||||||
case MachineRepresentation::kMapWord: // Fall through.
|
|
||||||
case MachineRepresentation::kNone:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ExternalReferenceMatcher m(base);
|
ExternalReferenceMatcher m(base);
|
||||||
if (m.HasResolvedValue() &&
|
if (m.HasResolvedValue() &&
|
||||||
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
||||||
Int32Matcher int_matcher(index);
|
Int32Matcher int_matcher(index);
|
||||||
if (int_matcher.HasResolvedValue()) {
|
if (int_matcher.HasResolvedValue()) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
int_matcher.ResolvedValue() +
|
int_matcher.ResolvedValue() +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
isolate(), m.ResolvedValue());
|
selector->isolate(), m.ResolvedValue());
|
||||||
int input_count = 2;
|
int input_count = 2;
|
||||||
InstructionOperand inputs[2];
|
InstructionOperand inputs[2];
|
||||||
inputs[0] = g.UseRegister(value);
|
inputs[0] = g.UseRegister(value);
|
||||||
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
|
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
|
||||||
opcode |= AddressingModeField::encode(kMode_Root);
|
opcode |= AddressingModeField::encode(kMode_Root);
|
||||||
Emit(opcode, 0, nullptr, input_count, inputs);
|
selector->Emit(opcode, 0, nullptr, input_count, inputs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -732,10 +766,17 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
size_t input_count = 0;
|
size_t input_count = 0;
|
||||||
inputs[input_count++] = g.UseRegister(value);
|
inputs[input_count++] = g.UseRegister(value);
|
||||||
inputs[input_count++] = g.UseRegister(base);
|
inputs[input_count++] = g.UseRegister(base);
|
||||||
EmitStore(this, opcode, input_count, inputs, index);
|
EmitStore(selector, opcode, input_count, inputs, index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void InstructionSelector::VisitStore(Node* node) {
|
||||||
|
VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
|
||||||
|
base::nullopt);
|
||||||
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitProtectedStore(Node* node) {
|
void InstructionSelector::VisitProtectedStore(Node* node) {
|
||||||
// TODO(eholk)
|
// TODO(eholk)
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
@ -2230,7 +2271,11 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
// The memory order is ignored as both acquire and sequentially consistent
|
||||||
|
// loads can emit LDR; DMB ISH.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
|
||||||
|
LoadRepresentation load_rep = atomic_load_params.representation();
|
||||||
ArmOperandGenerator g(this);
|
ArmOperandGenerator g(this);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
@ -2242,6 +2287,9 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
|||||||
case MachineRepresentation::kWord16:
|
case MachineRepresentation::kWord16:
|
||||||
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
|
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
|
||||||
break;
|
break;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged: // Fall through.
|
||||||
case MachineRepresentation::kWord32:
|
case MachineRepresentation::kWord32:
|
||||||
opcode = kAtomicLoadWord32;
|
opcode = kAtomicLoadWord32;
|
||||||
break;
|
break;
|
||||||
@ -2253,34 +2301,9 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
|
||||||
ArmOperandGenerator g(this);
|
VisitStoreCommon(this, node, store_params.store_representation(),
|
||||||
Node* base = node->InputAt(0);
|
store_params.order());
|
||||||
Node* index = node->InputAt(1);
|
|
||||||
Node* value = node->InputAt(2);
|
|
||||||
ArchOpcode opcode;
|
|
||||||
switch (rep) {
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = kAtomicStoreWord8;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = kAtomicStoreWord16;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kAtomicStoreWord32;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
|
||||||
InstructionOperand inputs[4];
|
|
||||||
size_t input_count = 0;
|
|
||||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
|
||||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
|
||||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
|
||||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
|
||||||
Emit(code, 0, nullptr, input_count, inputs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
||||||
|
@ -969,6 +969,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ Bind(ool->exit());
|
__ Bind(ool->exit());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case kArchAtomicStoreWithWriteBarrier: {
|
||||||
|
DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
|
||||||
|
RecordWriteMode mode =
|
||||||
|
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
||||||
|
Register object = i.InputRegister(0);
|
||||||
|
Register offset = i.InputRegister(1);
|
||||||
|
Register value = i.InputRegister(2);
|
||||||
|
auto ool = zone()->New<OutOfLineRecordWrite>(
|
||||||
|
this, object, offset, value, mode, DetermineStubCallMode(),
|
||||||
|
&unwinding_info_writer_);
|
||||||
|
__ AtomicStoreTaggedField(value, object, offset, i.TempRegister(0));
|
||||||
|
if (mode > RecordWriteMode::kValueIsPointer) {
|
||||||
|
__ JumpIfSmi(value, ool->exit());
|
||||||
|
}
|
||||||
|
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
|
||||||
|
eq, ool->entry());
|
||||||
|
__ Bind(ool->exit());
|
||||||
|
break;
|
||||||
|
}
|
||||||
case kArchStackSlot: {
|
case kArchStackSlot: {
|
||||||
FrameOffset offset =
|
FrameOffset offset =
|
||||||
frame_access_state()->GetFrameOffset(i.InputInt32(0));
|
frame_access_state()->GetFrameOffset(i.InputInt32(0));
|
||||||
@ -1811,6 +1830,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArm64LdrDecompressAnyTagged:
|
case kArm64LdrDecompressAnyTagged:
|
||||||
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
break;
|
||||||
|
case kArm64LdarDecompressTaggedSigned:
|
||||||
|
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
|
||||||
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
|
break;
|
||||||
|
case kArm64LdarDecompressTaggedPointer:
|
||||||
|
__ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
|
||||||
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
|
break;
|
||||||
|
case kArm64LdarDecompressAnyTagged:
|
||||||
|
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
|
||||||
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
|
break;
|
||||||
case kArm64Str:
|
case kArm64Str:
|
||||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||||
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
|
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
|
||||||
@ -1818,6 +1849,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArm64StrCompressTagged:
|
case kArm64StrCompressTagged:
|
||||||
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
|
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
|
||||||
break;
|
break;
|
||||||
|
case kArm64StlrCompressTagged:
|
||||||
|
// To be consistent with other STLR instructions, the value is stored at
|
||||||
|
// the 3rd input register instead of the 1st.
|
||||||
|
__ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
|
||||||
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
|
break;
|
||||||
case kArm64LdrS:
|
case kArm64LdrS:
|
||||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
|
||||||
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
|
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
|
||||||
|
@ -11,400 +11,404 @@ namespace compiler {
|
|||||||
|
|
||||||
// ARM64-specific opcodes that specify which assembly sequence to emit.
|
// ARM64-specific opcodes that specify which assembly sequence to emit.
|
||||||
// Most opcodes specify a single instruction.
|
// Most opcodes specify a single instruction.
|
||||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||||
V(Arm64Add) \
|
V(Arm64Add) \
|
||||||
V(Arm64Add32) \
|
V(Arm64Add32) \
|
||||||
V(Arm64And) \
|
V(Arm64And) \
|
||||||
V(Arm64And32) \
|
V(Arm64And32) \
|
||||||
V(Arm64Bic) \
|
V(Arm64Bic) \
|
||||||
V(Arm64Bic32) \
|
V(Arm64Bic32) \
|
||||||
V(Arm64Clz) \
|
V(Arm64Clz) \
|
||||||
V(Arm64Clz32) \
|
V(Arm64Clz32) \
|
||||||
V(Arm64Cmp) \
|
V(Arm64Cmp) \
|
||||||
V(Arm64Cmp32) \
|
V(Arm64Cmp32) \
|
||||||
V(Arm64Cmn) \
|
V(Arm64Cmn) \
|
||||||
V(Arm64Cmn32) \
|
V(Arm64Cmn32) \
|
||||||
V(Arm64Cnt) \
|
V(Arm64Cnt) \
|
||||||
V(Arm64Cnt32) \
|
V(Arm64Cnt32) \
|
||||||
V(Arm64Cnt64) \
|
V(Arm64Cnt64) \
|
||||||
V(Arm64Tst) \
|
V(Arm64Tst) \
|
||||||
V(Arm64Tst32) \
|
V(Arm64Tst32) \
|
||||||
V(Arm64Or) \
|
V(Arm64Or) \
|
||||||
V(Arm64Or32) \
|
V(Arm64Or32) \
|
||||||
V(Arm64Orn) \
|
V(Arm64Orn) \
|
||||||
V(Arm64Orn32) \
|
V(Arm64Orn32) \
|
||||||
V(Arm64Eor) \
|
V(Arm64Eor) \
|
||||||
V(Arm64Eor32) \
|
V(Arm64Eor32) \
|
||||||
V(Arm64Eon) \
|
V(Arm64Eon) \
|
||||||
V(Arm64Eon32) \
|
V(Arm64Eon32) \
|
||||||
V(Arm64Sadalp) \
|
V(Arm64Sadalp) \
|
||||||
V(Arm64Saddlp) \
|
V(Arm64Saddlp) \
|
||||||
V(Arm64Sub) \
|
V(Arm64Sub) \
|
||||||
V(Arm64Sub32) \
|
V(Arm64Sub32) \
|
||||||
V(Arm64Mul) \
|
V(Arm64Mul) \
|
||||||
V(Arm64Mul32) \
|
V(Arm64Mul32) \
|
||||||
V(Arm64Smull) \
|
V(Arm64Smull) \
|
||||||
V(Arm64Smull2) \
|
V(Arm64Smull2) \
|
||||||
V(Arm64Uadalp) \
|
V(Arm64Uadalp) \
|
||||||
V(Arm64Uaddlp) \
|
V(Arm64Uaddlp) \
|
||||||
V(Arm64Umull) \
|
V(Arm64Umull) \
|
||||||
V(Arm64Umull2) \
|
V(Arm64Umull2) \
|
||||||
V(Arm64Madd) \
|
V(Arm64Madd) \
|
||||||
V(Arm64Madd32) \
|
V(Arm64Madd32) \
|
||||||
V(Arm64Msub) \
|
V(Arm64Msub) \
|
||||||
V(Arm64Msub32) \
|
V(Arm64Msub32) \
|
||||||
V(Arm64Mneg) \
|
V(Arm64Mneg) \
|
||||||
V(Arm64Mneg32) \
|
V(Arm64Mneg32) \
|
||||||
V(Arm64Idiv) \
|
V(Arm64Idiv) \
|
||||||
V(Arm64Idiv32) \
|
V(Arm64Idiv32) \
|
||||||
V(Arm64Udiv) \
|
V(Arm64Udiv) \
|
||||||
V(Arm64Udiv32) \
|
V(Arm64Udiv32) \
|
||||||
V(Arm64Imod) \
|
V(Arm64Imod) \
|
||||||
V(Arm64Imod32) \
|
V(Arm64Imod32) \
|
||||||
V(Arm64Umod) \
|
V(Arm64Umod) \
|
||||||
V(Arm64Umod32) \
|
V(Arm64Umod32) \
|
||||||
V(Arm64Not) \
|
V(Arm64Not) \
|
||||||
V(Arm64Not32) \
|
V(Arm64Not32) \
|
||||||
V(Arm64Lsl) \
|
V(Arm64Lsl) \
|
||||||
V(Arm64Lsl32) \
|
V(Arm64Lsl32) \
|
||||||
V(Arm64Lsr) \
|
V(Arm64Lsr) \
|
||||||
V(Arm64Lsr32) \
|
V(Arm64Lsr32) \
|
||||||
V(Arm64Asr) \
|
V(Arm64Asr) \
|
||||||
V(Arm64Asr32) \
|
V(Arm64Asr32) \
|
||||||
V(Arm64Ror) \
|
V(Arm64Ror) \
|
||||||
V(Arm64Ror32) \
|
V(Arm64Ror32) \
|
||||||
V(Arm64Mov32) \
|
V(Arm64Mov32) \
|
||||||
V(Arm64Sxtb32) \
|
V(Arm64Sxtb32) \
|
||||||
V(Arm64Sxth32) \
|
V(Arm64Sxth32) \
|
||||||
V(Arm64Sxtb) \
|
V(Arm64Sxtb) \
|
||||||
V(Arm64Sxth) \
|
V(Arm64Sxth) \
|
||||||
V(Arm64Sxtw) \
|
V(Arm64Sxtw) \
|
||||||
V(Arm64Sbfx) \
|
V(Arm64Sbfx) \
|
||||||
V(Arm64Sbfx32) \
|
V(Arm64Sbfx32) \
|
||||||
V(Arm64Ubfx) \
|
V(Arm64Ubfx) \
|
||||||
V(Arm64Ubfx32) \
|
V(Arm64Ubfx32) \
|
||||||
V(Arm64Ubfiz32) \
|
V(Arm64Ubfiz32) \
|
||||||
V(Arm64Bfi) \
|
V(Arm64Bfi) \
|
||||||
V(Arm64Rbit) \
|
V(Arm64Rbit) \
|
||||||
V(Arm64Rbit32) \
|
V(Arm64Rbit32) \
|
||||||
V(Arm64Rev) \
|
V(Arm64Rev) \
|
||||||
V(Arm64Rev32) \
|
V(Arm64Rev32) \
|
||||||
V(Arm64TestAndBranch32) \
|
V(Arm64TestAndBranch32) \
|
||||||
V(Arm64TestAndBranch) \
|
V(Arm64TestAndBranch) \
|
||||||
V(Arm64CompareAndBranch32) \
|
V(Arm64CompareAndBranch32) \
|
||||||
V(Arm64CompareAndBranch) \
|
V(Arm64CompareAndBranch) \
|
||||||
V(Arm64Claim) \
|
V(Arm64Claim) \
|
||||||
V(Arm64Poke) \
|
V(Arm64Poke) \
|
||||||
V(Arm64PokePair) \
|
V(Arm64PokePair) \
|
||||||
V(Arm64Peek) \
|
V(Arm64Peek) \
|
||||||
V(Arm64Float32Cmp) \
|
V(Arm64Float32Cmp) \
|
||||||
V(Arm64Float32Add) \
|
V(Arm64Float32Add) \
|
||||||
V(Arm64Float32Sub) \
|
V(Arm64Float32Sub) \
|
||||||
V(Arm64Float32Mul) \
|
V(Arm64Float32Mul) \
|
||||||
V(Arm64Float32Div) \
|
V(Arm64Float32Div) \
|
||||||
V(Arm64Float32Abs) \
|
V(Arm64Float32Abs) \
|
||||||
V(Arm64Float32Abd) \
|
V(Arm64Float32Abd) \
|
||||||
V(Arm64Float32Neg) \
|
V(Arm64Float32Neg) \
|
||||||
V(Arm64Float32Sqrt) \
|
V(Arm64Float32Sqrt) \
|
||||||
V(Arm64Float32Fnmul) \
|
V(Arm64Float32Fnmul) \
|
||||||
V(Arm64Float32RoundDown) \
|
V(Arm64Float32RoundDown) \
|
||||||
V(Arm64Float32Max) \
|
V(Arm64Float32Max) \
|
||||||
V(Arm64Float32Min) \
|
V(Arm64Float32Min) \
|
||||||
V(Arm64Float64Cmp) \
|
V(Arm64Float64Cmp) \
|
||||||
V(Arm64Float64Add) \
|
V(Arm64Float64Add) \
|
||||||
V(Arm64Float64Sub) \
|
V(Arm64Float64Sub) \
|
||||||
V(Arm64Float64Mul) \
|
V(Arm64Float64Mul) \
|
||||||
V(Arm64Float64Div) \
|
V(Arm64Float64Div) \
|
||||||
V(Arm64Float64Mod) \
|
V(Arm64Float64Mod) \
|
||||||
V(Arm64Float64Max) \
|
V(Arm64Float64Max) \
|
||||||
V(Arm64Float64Min) \
|
V(Arm64Float64Min) \
|
||||||
V(Arm64Float64Abs) \
|
V(Arm64Float64Abs) \
|
||||||
V(Arm64Float64Abd) \
|
V(Arm64Float64Abd) \
|
||||||
V(Arm64Float64Neg) \
|
V(Arm64Float64Neg) \
|
||||||
V(Arm64Float64Sqrt) \
|
V(Arm64Float64Sqrt) \
|
||||||
V(Arm64Float64Fnmul) \
|
V(Arm64Float64Fnmul) \
|
||||||
V(Arm64Float64RoundDown) \
|
V(Arm64Float64RoundDown) \
|
||||||
V(Arm64Float32RoundUp) \
|
V(Arm64Float32RoundUp) \
|
||||||
V(Arm64Float64RoundUp) \
|
V(Arm64Float64RoundUp) \
|
||||||
V(Arm64Float64RoundTiesAway) \
|
V(Arm64Float64RoundTiesAway) \
|
||||||
V(Arm64Float32RoundTruncate) \
|
V(Arm64Float32RoundTruncate) \
|
||||||
V(Arm64Float64RoundTruncate) \
|
V(Arm64Float64RoundTruncate) \
|
||||||
V(Arm64Float32RoundTiesEven) \
|
V(Arm64Float32RoundTiesEven) \
|
||||||
V(Arm64Float64RoundTiesEven) \
|
V(Arm64Float64RoundTiesEven) \
|
||||||
V(Arm64Float64SilenceNaN) \
|
V(Arm64Float64SilenceNaN) \
|
||||||
V(Arm64Float32ToFloat64) \
|
V(Arm64Float32ToFloat64) \
|
||||||
V(Arm64Float64ToFloat32) \
|
V(Arm64Float64ToFloat32) \
|
||||||
V(Arm64Float32ToInt32) \
|
V(Arm64Float32ToInt32) \
|
||||||
V(Arm64Float64ToInt32) \
|
V(Arm64Float64ToInt32) \
|
||||||
V(Arm64Float32ToUint32) \
|
V(Arm64Float32ToUint32) \
|
||||||
V(Arm64Float64ToUint32) \
|
V(Arm64Float64ToUint32) \
|
||||||
V(Arm64Float32ToInt64) \
|
V(Arm64Float32ToInt64) \
|
||||||
V(Arm64Float64ToInt64) \
|
V(Arm64Float64ToInt64) \
|
||||||
V(Arm64Float32ToUint64) \
|
V(Arm64Float32ToUint64) \
|
||||||
V(Arm64Float64ToUint64) \
|
V(Arm64Float64ToUint64) \
|
||||||
V(Arm64Int32ToFloat32) \
|
V(Arm64Int32ToFloat32) \
|
||||||
V(Arm64Int32ToFloat64) \
|
V(Arm64Int32ToFloat64) \
|
||||||
V(Arm64Int64ToFloat32) \
|
V(Arm64Int64ToFloat32) \
|
||||||
V(Arm64Int64ToFloat64) \
|
V(Arm64Int64ToFloat64) \
|
||||||
V(Arm64Uint32ToFloat32) \
|
V(Arm64Uint32ToFloat32) \
|
||||||
V(Arm64Uint32ToFloat64) \
|
V(Arm64Uint32ToFloat64) \
|
||||||
V(Arm64Uint64ToFloat32) \
|
V(Arm64Uint64ToFloat32) \
|
||||||
V(Arm64Uint64ToFloat64) \
|
V(Arm64Uint64ToFloat64) \
|
||||||
V(Arm64Float64ExtractLowWord32) \
|
V(Arm64Float64ExtractLowWord32) \
|
||||||
V(Arm64Float64ExtractHighWord32) \
|
V(Arm64Float64ExtractHighWord32) \
|
||||||
V(Arm64Float64InsertLowWord32) \
|
V(Arm64Float64InsertLowWord32) \
|
||||||
V(Arm64Float64InsertHighWord32) \
|
V(Arm64Float64InsertHighWord32) \
|
||||||
V(Arm64Float64MoveU64) \
|
V(Arm64Float64MoveU64) \
|
||||||
V(Arm64U64MoveFloat64) \
|
V(Arm64U64MoveFloat64) \
|
||||||
V(Arm64LdrS) \
|
V(Arm64LdrS) \
|
||||||
V(Arm64StrS) \
|
V(Arm64StrS) \
|
||||||
V(Arm64LdrD) \
|
V(Arm64LdrD) \
|
||||||
V(Arm64StrD) \
|
V(Arm64StrD) \
|
||||||
V(Arm64LdrQ) \
|
V(Arm64LdrQ) \
|
||||||
V(Arm64StrQ) \
|
V(Arm64StrQ) \
|
||||||
V(Arm64Ldrb) \
|
V(Arm64Ldrb) \
|
||||||
V(Arm64Ldrsb) \
|
V(Arm64Ldrsb) \
|
||||||
V(Arm64LdrsbW) \
|
V(Arm64LdrsbW) \
|
||||||
V(Arm64Strb) \
|
V(Arm64Strb) \
|
||||||
V(Arm64Ldrh) \
|
V(Arm64Ldrh) \
|
||||||
V(Arm64Ldrsh) \
|
V(Arm64Ldrsh) \
|
||||||
V(Arm64LdrshW) \
|
V(Arm64LdrshW) \
|
||||||
V(Arm64Strh) \
|
V(Arm64Strh) \
|
||||||
V(Arm64Ldrsw) \
|
V(Arm64Ldrsw) \
|
||||||
V(Arm64LdrW) \
|
V(Arm64LdrW) \
|
||||||
V(Arm64StrW) \
|
V(Arm64StrW) \
|
||||||
V(Arm64Ldr) \
|
V(Arm64Ldr) \
|
||||||
V(Arm64LdrDecompressTaggedSigned) \
|
V(Arm64LdrDecompressTaggedSigned) \
|
||||||
V(Arm64LdrDecompressTaggedPointer) \
|
V(Arm64LdrDecompressTaggedPointer) \
|
||||||
V(Arm64LdrDecompressAnyTagged) \
|
V(Arm64LdrDecompressAnyTagged) \
|
||||||
V(Arm64Str) \
|
V(Arm64LdarDecompressTaggedSigned) \
|
||||||
V(Arm64StrCompressTagged) \
|
V(Arm64LdarDecompressTaggedPointer) \
|
||||||
V(Arm64DmbIsh) \
|
V(Arm64LdarDecompressAnyTagged) \
|
||||||
V(Arm64DsbIsb) \
|
V(Arm64Str) \
|
||||||
V(Arm64Sxtl) \
|
V(Arm64StrCompressTagged) \
|
||||||
V(Arm64Sxtl2) \
|
V(Arm64StlrCompressTagged) \
|
||||||
V(Arm64Uxtl) \
|
V(Arm64DmbIsh) \
|
||||||
V(Arm64Uxtl2) \
|
V(Arm64DsbIsb) \
|
||||||
V(Arm64F64x2Splat) \
|
V(Arm64Sxtl) \
|
||||||
V(Arm64F64x2ExtractLane) \
|
V(Arm64Sxtl2) \
|
||||||
V(Arm64F64x2ReplaceLane) \
|
V(Arm64Uxtl) \
|
||||||
V(Arm64F64x2Abs) \
|
V(Arm64Uxtl2) \
|
||||||
V(Arm64F64x2Neg) \
|
V(Arm64F64x2Splat) \
|
||||||
V(Arm64F64x2Sqrt) \
|
V(Arm64F64x2ExtractLane) \
|
||||||
V(Arm64F64x2Add) \
|
V(Arm64F64x2ReplaceLane) \
|
||||||
V(Arm64F64x2Sub) \
|
V(Arm64F64x2Abs) \
|
||||||
V(Arm64F64x2Mul) \
|
V(Arm64F64x2Neg) \
|
||||||
V(Arm64F64x2MulElement) \
|
V(Arm64F64x2Sqrt) \
|
||||||
V(Arm64F64x2Div) \
|
V(Arm64F64x2Add) \
|
||||||
V(Arm64F64x2Min) \
|
V(Arm64F64x2Sub) \
|
||||||
V(Arm64F64x2Max) \
|
V(Arm64F64x2Mul) \
|
||||||
V(Arm64F64x2Eq) \
|
V(Arm64F64x2MulElement) \
|
||||||
V(Arm64F64x2Ne) \
|
V(Arm64F64x2Div) \
|
||||||
V(Arm64F64x2Lt) \
|
V(Arm64F64x2Min) \
|
||||||
V(Arm64F64x2Le) \
|
V(Arm64F64x2Max) \
|
||||||
V(Arm64F64x2Qfma) \
|
V(Arm64F64x2Eq) \
|
||||||
V(Arm64F64x2Qfms) \
|
V(Arm64F64x2Ne) \
|
||||||
V(Arm64F64x2Pmin) \
|
V(Arm64F64x2Lt) \
|
||||||
V(Arm64F64x2Pmax) \
|
V(Arm64F64x2Le) \
|
||||||
V(Arm64F64x2ConvertLowI32x4S) \
|
V(Arm64F64x2Qfma) \
|
||||||
V(Arm64F64x2ConvertLowI32x4U) \
|
V(Arm64F64x2Qfms) \
|
||||||
V(Arm64F64x2PromoteLowF32x4) \
|
V(Arm64F64x2Pmin) \
|
||||||
V(Arm64F32x4Splat) \
|
V(Arm64F64x2Pmax) \
|
||||||
V(Arm64F32x4ExtractLane) \
|
V(Arm64F64x2ConvertLowI32x4S) \
|
||||||
V(Arm64F32x4ReplaceLane) \
|
V(Arm64F64x2ConvertLowI32x4U) \
|
||||||
V(Arm64F32x4SConvertI32x4) \
|
V(Arm64F64x2PromoteLowF32x4) \
|
||||||
V(Arm64F32x4UConvertI32x4) \
|
V(Arm64F32x4Splat) \
|
||||||
V(Arm64F32x4Abs) \
|
V(Arm64F32x4ExtractLane) \
|
||||||
V(Arm64F32x4Neg) \
|
V(Arm64F32x4ReplaceLane) \
|
||||||
V(Arm64F32x4Sqrt) \
|
V(Arm64F32x4SConvertI32x4) \
|
||||||
V(Arm64F32x4RecipApprox) \
|
V(Arm64F32x4UConvertI32x4) \
|
||||||
V(Arm64F32x4RecipSqrtApprox) \
|
V(Arm64F32x4Abs) \
|
||||||
V(Arm64F32x4Add) \
|
V(Arm64F32x4Neg) \
|
||||||
V(Arm64F32x4Sub) \
|
V(Arm64F32x4Sqrt) \
|
||||||
V(Arm64F32x4Mul) \
|
V(Arm64F32x4RecipApprox) \
|
||||||
V(Arm64F32x4MulElement) \
|
V(Arm64F32x4RecipSqrtApprox) \
|
||||||
V(Arm64F32x4Div) \
|
V(Arm64F32x4Add) \
|
||||||
V(Arm64F32x4Min) \
|
V(Arm64F32x4Sub) \
|
||||||
V(Arm64F32x4Max) \
|
V(Arm64F32x4Mul) \
|
||||||
V(Arm64F32x4Eq) \
|
V(Arm64F32x4MulElement) \
|
||||||
V(Arm64F32x4Ne) \
|
V(Arm64F32x4Div) \
|
||||||
V(Arm64F32x4Lt) \
|
V(Arm64F32x4Min) \
|
||||||
V(Arm64F32x4Le) \
|
V(Arm64F32x4Max) \
|
||||||
V(Arm64F32x4Qfma) \
|
V(Arm64F32x4Eq) \
|
||||||
V(Arm64F32x4Qfms) \
|
V(Arm64F32x4Ne) \
|
||||||
V(Arm64F32x4Pmin) \
|
V(Arm64F32x4Lt) \
|
||||||
V(Arm64F32x4Pmax) \
|
V(Arm64F32x4Le) \
|
||||||
V(Arm64F32x4DemoteF64x2Zero) \
|
V(Arm64F32x4Qfma) \
|
||||||
V(Arm64I64x2Splat) \
|
V(Arm64F32x4Qfms) \
|
||||||
V(Arm64I64x2ExtractLane) \
|
V(Arm64F32x4Pmin) \
|
||||||
V(Arm64I64x2ReplaceLane) \
|
V(Arm64F32x4Pmax) \
|
||||||
V(Arm64I64x2Abs) \
|
V(Arm64F32x4DemoteF64x2Zero) \
|
||||||
V(Arm64I64x2Neg) \
|
V(Arm64I64x2Splat) \
|
||||||
V(Arm64I64x2Shl) \
|
V(Arm64I64x2ExtractLane) \
|
||||||
V(Arm64I64x2ShrS) \
|
V(Arm64I64x2ReplaceLane) \
|
||||||
V(Arm64I64x2Add) \
|
V(Arm64I64x2Abs) \
|
||||||
V(Arm64I64x2Sub) \
|
V(Arm64I64x2Neg) \
|
||||||
V(Arm64I64x2Mul) \
|
V(Arm64I64x2Shl) \
|
||||||
V(Arm64I64x2Eq) \
|
V(Arm64I64x2ShrS) \
|
||||||
V(Arm64I64x2Ne) \
|
V(Arm64I64x2Add) \
|
||||||
V(Arm64I64x2GtS) \
|
V(Arm64I64x2Sub) \
|
||||||
V(Arm64I64x2GeS) \
|
V(Arm64I64x2Mul) \
|
||||||
V(Arm64I64x2ShrU) \
|
V(Arm64I64x2Eq) \
|
||||||
V(Arm64I64x2BitMask) \
|
V(Arm64I64x2Ne) \
|
||||||
V(Arm64I32x4Splat) \
|
V(Arm64I64x2GtS) \
|
||||||
V(Arm64I32x4ExtractLane) \
|
V(Arm64I64x2GeS) \
|
||||||
V(Arm64I32x4ReplaceLane) \
|
V(Arm64I64x2ShrU) \
|
||||||
V(Arm64I32x4SConvertF32x4) \
|
V(Arm64I64x2BitMask) \
|
||||||
V(Arm64I32x4Neg) \
|
V(Arm64I32x4Splat) \
|
||||||
V(Arm64I32x4Shl) \
|
V(Arm64I32x4ExtractLane) \
|
||||||
V(Arm64I32x4ShrS) \
|
V(Arm64I32x4ReplaceLane) \
|
||||||
V(Arm64I32x4Add) \
|
V(Arm64I32x4SConvertF32x4) \
|
||||||
V(Arm64I32x4Sub) \
|
V(Arm64I32x4Neg) \
|
||||||
V(Arm64I32x4Mul) \
|
V(Arm64I32x4Shl) \
|
||||||
V(Arm64I32x4Mla) \
|
V(Arm64I32x4ShrS) \
|
||||||
V(Arm64I32x4Mls) \
|
V(Arm64I32x4Add) \
|
||||||
V(Arm64I32x4MinS) \
|
V(Arm64I32x4Sub) \
|
||||||
V(Arm64I32x4MaxS) \
|
V(Arm64I32x4Mul) \
|
||||||
V(Arm64I32x4Eq) \
|
V(Arm64I32x4Mla) \
|
||||||
V(Arm64I32x4Ne) \
|
V(Arm64I32x4Mls) \
|
||||||
V(Arm64I32x4GtS) \
|
V(Arm64I32x4MinS) \
|
||||||
V(Arm64I32x4GeS) \
|
V(Arm64I32x4MaxS) \
|
||||||
V(Arm64I32x4UConvertF32x4) \
|
V(Arm64I32x4Eq) \
|
||||||
V(Arm64I32x4ShrU) \
|
V(Arm64I32x4Ne) \
|
||||||
V(Arm64I32x4MinU) \
|
V(Arm64I32x4GtS) \
|
||||||
V(Arm64I32x4MaxU) \
|
V(Arm64I32x4GeS) \
|
||||||
V(Arm64I32x4GtU) \
|
V(Arm64I32x4UConvertF32x4) \
|
||||||
V(Arm64I32x4GeU) \
|
V(Arm64I32x4ShrU) \
|
||||||
V(Arm64I32x4Abs) \
|
V(Arm64I32x4MinU) \
|
||||||
V(Arm64I32x4BitMask) \
|
V(Arm64I32x4MaxU) \
|
||||||
V(Arm64I32x4DotI16x8S) \
|
V(Arm64I32x4GtU) \
|
||||||
V(Arm64I32x4TruncSatF64x2SZero) \
|
V(Arm64I32x4GeU) \
|
||||||
V(Arm64I32x4TruncSatF64x2UZero) \
|
V(Arm64I32x4Abs) \
|
||||||
V(Arm64I16x8Splat) \
|
V(Arm64I32x4BitMask) \
|
||||||
V(Arm64I16x8ExtractLaneU) \
|
V(Arm64I32x4DotI16x8S) \
|
||||||
V(Arm64I16x8ExtractLaneS) \
|
V(Arm64I32x4TruncSatF64x2SZero) \
|
||||||
V(Arm64I16x8ReplaceLane) \
|
V(Arm64I32x4TruncSatF64x2UZero) \
|
||||||
V(Arm64I16x8Neg) \
|
V(Arm64I16x8Splat) \
|
||||||
V(Arm64I16x8Shl) \
|
V(Arm64I16x8ExtractLaneU) \
|
||||||
V(Arm64I16x8ShrS) \
|
V(Arm64I16x8ExtractLaneS) \
|
||||||
V(Arm64I16x8SConvertI32x4) \
|
V(Arm64I16x8ReplaceLane) \
|
||||||
V(Arm64I16x8Add) \
|
V(Arm64I16x8Neg) \
|
||||||
V(Arm64I16x8AddSatS) \
|
V(Arm64I16x8Shl) \
|
||||||
V(Arm64I16x8Sub) \
|
V(Arm64I16x8ShrS) \
|
||||||
V(Arm64I16x8SubSatS) \
|
V(Arm64I16x8SConvertI32x4) \
|
||||||
V(Arm64I16x8Mul) \
|
V(Arm64I16x8Add) \
|
||||||
V(Arm64I16x8Mla) \
|
V(Arm64I16x8AddSatS) \
|
||||||
V(Arm64I16x8Mls) \
|
V(Arm64I16x8Sub) \
|
||||||
V(Arm64I16x8MinS) \
|
V(Arm64I16x8SubSatS) \
|
||||||
V(Arm64I16x8MaxS) \
|
V(Arm64I16x8Mul) \
|
||||||
V(Arm64I16x8Eq) \
|
V(Arm64I16x8Mla) \
|
||||||
V(Arm64I16x8Ne) \
|
V(Arm64I16x8Mls) \
|
||||||
V(Arm64I16x8GtS) \
|
V(Arm64I16x8MinS) \
|
||||||
V(Arm64I16x8GeS) \
|
V(Arm64I16x8MaxS) \
|
||||||
V(Arm64I16x8ShrU) \
|
V(Arm64I16x8Eq) \
|
||||||
V(Arm64I16x8UConvertI32x4) \
|
V(Arm64I16x8Ne) \
|
||||||
V(Arm64I16x8AddSatU) \
|
V(Arm64I16x8GtS) \
|
||||||
V(Arm64I16x8SubSatU) \
|
V(Arm64I16x8GeS) \
|
||||||
V(Arm64I16x8MinU) \
|
V(Arm64I16x8ShrU) \
|
||||||
V(Arm64I16x8MaxU) \
|
V(Arm64I16x8UConvertI32x4) \
|
||||||
V(Arm64I16x8GtU) \
|
V(Arm64I16x8AddSatU) \
|
||||||
V(Arm64I16x8GeU) \
|
V(Arm64I16x8SubSatU) \
|
||||||
V(Arm64I16x8RoundingAverageU) \
|
V(Arm64I16x8MinU) \
|
||||||
V(Arm64I16x8Q15MulRSatS) \
|
V(Arm64I16x8MaxU) \
|
||||||
V(Arm64I16x8Abs) \
|
V(Arm64I16x8GtU) \
|
||||||
V(Arm64I16x8BitMask) \
|
V(Arm64I16x8GeU) \
|
||||||
V(Arm64I8x16Splat) \
|
V(Arm64I16x8RoundingAverageU) \
|
||||||
V(Arm64I8x16ExtractLaneU) \
|
V(Arm64I16x8Q15MulRSatS) \
|
||||||
V(Arm64I8x16ExtractLaneS) \
|
V(Arm64I16x8Abs) \
|
||||||
V(Arm64I8x16ReplaceLane) \
|
V(Arm64I16x8BitMask) \
|
||||||
V(Arm64I8x16Neg) \
|
V(Arm64I8x16Splat) \
|
||||||
V(Arm64I8x16Shl) \
|
V(Arm64I8x16ExtractLaneU) \
|
||||||
V(Arm64I8x16ShrS) \
|
V(Arm64I8x16ExtractLaneS) \
|
||||||
V(Arm64I8x16SConvertI16x8) \
|
V(Arm64I8x16ReplaceLane) \
|
||||||
V(Arm64I8x16Add) \
|
V(Arm64I8x16Neg) \
|
||||||
V(Arm64I8x16AddSatS) \
|
V(Arm64I8x16Shl) \
|
||||||
V(Arm64I8x16Sub) \
|
V(Arm64I8x16ShrS) \
|
||||||
V(Arm64I8x16SubSatS) \
|
V(Arm64I8x16SConvertI16x8) \
|
||||||
V(Arm64I8x16Mla) \
|
V(Arm64I8x16Add) \
|
||||||
V(Arm64I8x16Mls) \
|
V(Arm64I8x16AddSatS) \
|
||||||
V(Arm64I8x16MinS) \
|
V(Arm64I8x16Sub) \
|
||||||
V(Arm64I8x16MaxS) \
|
V(Arm64I8x16SubSatS) \
|
||||||
V(Arm64I8x16Eq) \
|
V(Arm64I8x16Mla) \
|
||||||
V(Arm64I8x16Ne) \
|
V(Arm64I8x16Mls) \
|
||||||
V(Arm64I8x16GtS) \
|
V(Arm64I8x16MinS) \
|
||||||
V(Arm64I8x16GeS) \
|
V(Arm64I8x16MaxS) \
|
||||||
V(Arm64I8x16ShrU) \
|
V(Arm64I8x16Eq) \
|
||||||
V(Arm64I8x16UConvertI16x8) \
|
V(Arm64I8x16Ne) \
|
||||||
V(Arm64I8x16AddSatU) \
|
V(Arm64I8x16GtS) \
|
||||||
V(Arm64I8x16SubSatU) \
|
V(Arm64I8x16GeS) \
|
||||||
V(Arm64I8x16MinU) \
|
V(Arm64I8x16ShrU) \
|
||||||
V(Arm64I8x16MaxU) \
|
V(Arm64I8x16UConvertI16x8) \
|
||||||
V(Arm64I8x16GtU) \
|
V(Arm64I8x16AddSatU) \
|
||||||
V(Arm64I8x16GeU) \
|
V(Arm64I8x16SubSatU) \
|
||||||
V(Arm64I8x16RoundingAverageU) \
|
V(Arm64I8x16MinU) \
|
||||||
V(Arm64I8x16Abs) \
|
V(Arm64I8x16MaxU) \
|
||||||
V(Arm64I8x16BitMask) \
|
V(Arm64I8x16GtU) \
|
||||||
V(Arm64S128Const) \
|
V(Arm64I8x16GeU) \
|
||||||
V(Arm64S128Zero) \
|
V(Arm64I8x16RoundingAverageU) \
|
||||||
V(Arm64S128Dup) \
|
V(Arm64I8x16Abs) \
|
||||||
V(Arm64S128And) \
|
V(Arm64I8x16BitMask) \
|
||||||
V(Arm64S128Or) \
|
V(Arm64S128Const) \
|
||||||
V(Arm64S128Xor) \
|
V(Arm64S128Zero) \
|
||||||
V(Arm64S128Not) \
|
V(Arm64S128Dup) \
|
||||||
V(Arm64S128Select) \
|
V(Arm64S128And) \
|
||||||
V(Arm64S128AndNot) \
|
V(Arm64S128Or) \
|
||||||
V(Arm64Ssra) \
|
V(Arm64S128Xor) \
|
||||||
V(Arm64Usra) \
|
V(Arm64S128Not) \
|
||||||
V(Arm64S32x4ZipLeft) \
|
V(Arm64S128Select) \
|
||||||
V(Arm64S32x4ZipRight) \
|
V(Arm64S128AndNot) \
|
||||||
V(Arm64S32x4UnzipLeft) \
|
V(Arm64Ssra) \
|
||||||
V(Arm64S32x4UnzipRight) \
|
V(Arm64Usra) \
|
||||||
V(Arm64S32x4TransposeLeft) \
|
V(Arm64S32x4ZipLeft) \
|
||||||
V(Arm64S32x4TransposeRight) \
|
V(Arm64S32x4ZipRight) \
|
||||||
V(Arm64S32x4Shuffle) \
|
V(Arm64S32x4UnzipLeft) \
|
||||||
V(Arm64S16x8ZipLeft) \
|
V(Arm64S32x4UnzipRight) \
|
||||||
V(Arm64S16x8ZipRight) \
|
V(Arm64S32x4TransposeLeft) \
|
||||||
V(Arm64S16x8UnzipLeft) \
|
V(Arm64S32x4TransposeRight) \
|
||||||
V(Arm64S16x8UnzipRight) \
|
V(Arm64S32x4Shuffle) \
|
||||||
V(Arm64S16x8TransposeLeft) \
|
V(Arm64S16x8ZipLeft) \
|
||||||
V(Arm64S16x8TransposeRight) \
|
V(Arm64S16x8ZipRight) \
|
||||||
V(Arm64S8x16ZipLeft) \
|
V(Arm64S16x8UnzipLeft) \
|
||||||
V(Arm64S8x16ZipRight) \
|
V(Arm64S16x8UnzipRight) \
|
||||||
V(Arm64S8x16UnzipLeft) \
|
V(Arm64S16x8TransposeLeft) \
|
||||||
V(Arm64S8x16UnzipRight) \
|
V(Arm64S16x8TransposeRight) \
|
||||||
V(Arm64S8x16TransposeLeft) \
|
V(Arm64S8x16ZipLeft) \
|
||||||
V(Arm64S8x16TransposeRight) \
|
V(Arm64S8x16ZipRight) \
|
||||||
V(Arm64S8x16Concat) \
|
V(Arm64S8x16UnzipLeft) \
|
||||||
V(Arm64I8x16Swizzle) \
|
V(Arm64S8x16UnzipRight) \
|
||||||
V(Arm64I8x16Shuffle) \
|
V(Arm64S8x16TransposeLeft) \
|
||||||
V(Arm64S32x2Reverse) \
|
V(Arm64S8x16TransposeRight) \
|
||||||
V(Arm64S16x4Reverse) \
|
V(Arm64S8x16Concat) \
|
||||||
V(Arm64S16x2Reverse) \
|
V(Arm64I8x16Swizzle) \
|
||||||
V(Arm64S8x8Reverse) \
|
V(Arm64I8x16Shuffle) \
|
||||||
V(Arm64S8x4Reverse) \
|
V(Arm64S32x2Reverse) \
|
||||||
V(Arm64S8x2Reverse) \
|
V(Arm64S16x4Reverse) \
|
||||||
V(Arm64V128AnyTrue) \
|
V(Arm64S16x2Reverse) \
|
||||||
V(Arm64I64x2AllTrue) \
|
V(Arm64S8x8Reverse) \
|
||||||
V(Arm64I32x4AllTrue) \
|
V(Arm64S8x4Reverse) \
|
||||||
V(Arm64I16x8AllTrue) \
|
V(Arm64S8x2Reverse) \
|
||||||
V(Arm64I8x16AllTrue) \
|
V(Arm64V128AnyTrue) \
|
||||||
V(Arm64LoadSplat) \
|
V(Arm64I64x2AllTrue) \
|
||||||
V(Arm64LoadLane) \
|
V(Arm64I32x4AllTrue) \
|
||||||
V(Arm64StoreLane) \
|
V(Arm64I16x8AllTrue) \
|
||||||
V(Arm64S128Load8x8S) \
|
V(Arm64I8x16AllTrue) \
|
||||||
V(Arm64S128Load8x8U) \
|
V(Arm64LoadSplat) \
|
||||||
V(Arm64S128Load16x4S) \
|
V(Arm64LoadLane) \
|
||||||
V(Arm64S128Load16x4U) \
|
V(Arm64StoreLane) \
|
||||||
V(Arm64S128Load32x2S) \
|
V(Arm64S128Load8x8S) \
|
||||||
V(Arm64S128Load32x2U) \
|
V(Arm64S128Load8x8U) \
|
||||||
V(Arm64Word64AtomicLoadUint64) \
|
V(Arm64S128Load16x4S) \
|
||||||
V(Arm64Word64AtomicStoreWord64) \
|
V(Arm64S128Load16x4U) \
|
||||||
V(Arm64Word64AtomicAddUint64) \
|
V(Arm64S128Load32x2S) \
|
||||||
V(Arm64Word64AtomicSubUint64) \
|
V(Arm64S128Load32x2U) \
|
||||||
V(Arm64Word64AtomicAndUint64) \
|
V(Arm64Word64AtomicLoadUint64) \
|
||||||
V(Arm64Word64AtomicOrUint64) \
|
V(Arm64Word64AtomicStoreWord64) \
|
||||||
V(Arm64Word64AtomicXorUint64) \
|
V(Arm64Word64AtomicAddUint64) \
|
||||||
V(Arm64Word64AtomicExchangeUint64) \
|
V(Arm64Word64AtomicSubUint64) \
|
||||||
|
V(Arm64Word64AtomicAndUint64) \
|
||||||
|
V(Arm64Word64AtomicOrUint64) \
|
||||||
|
V(Arm64Word64AtomicXorUint64) \
|
||||||
|
V(Arm64Word64AtomicExchangeUint64) \
|
||||||
V(Arm64Word64AtomicCompareExchangeUint64)
|
V(Arm64Word64AtomicCompareExchangeUint64)
|
||||||
|
|
||||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||||
|
@ -377,6 +377,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTaggedPointer:
|
||||||
case kArm64LdrDecompressAnyTagged:
|
case kArm64LdrDecompressAnyTagged:
|
||||||
|
case kArm64LdarDecompressTaggedSigned:
|
||||||
|
case kArm64LdarDecompressTaggedPointer:
|
||||||
|
case kArm64LdarDecompressAnyTagged:
|
||||||
case kArm64Peek:
|
case kArm64Peek:
|
||||||
case kArm64LoadSplat:
|
case kArm64LoadSplat:
|
||||||
case kArm64LoadLane:
|
case kArm64LoadLane:
|
||||||
@ -399,6 +402,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kArm64StrW:
|
case kArm64StrW:
|
||||||
case kArm64Str:
|
case kArm64Str:
|
||||||
case kArm64StrCompressTagged:
|
case kArm64StrCompressTagged:
|
||||||
|
case kArm64StlrCompressTagged:
|
||||||
case kArm64DmbIsh:
|
case kArm64DmbIsh:
|
||||||
case kArm64DsbIsb:
|
case kArm64DsbIsb:
|
||||||
case kArm64StoreLane:
|
case kArm64StoreLane:
|
||||||
|
@ -144,21 +144,6 @@ class Arm64OperandGenerator final : public OperandGenerator {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
|
|
||||||
switch (rep) {
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
return kAtomicStoreWord8;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
return kAtomicStoreWord16;
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
return kAtomicStoreWord32;
|
|
||||||
case MachineRepresentation::kWord64:
|
|
||||||
return kArm64Word64AtomicStoreWord64;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
|
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
|
||||||
Arm64OperandGenerator g(selector);
|
Arm64OperandGenerator g(selector);
|
||||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||||
@ -2618,30 +2603,135 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
|
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
|
||||||
ArchOpcode opcode, AtomicWidth width) {
|
AtomicWidth width) {
|
||||||
Arm64OperandGenerator g(selector);
|
Arm64OperandGenerator g(selector);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
|
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
|
||||||
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
|
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
|
||||||
InstructionOperand temps[] = {g.TempRegister()};
|
InstructionOperand temps[] = {g.TempRegister()};
|
||||||
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
|
|
||||||
AtomicWidthField::encode(width);
|
// The memory order is ignored as both acquire and sequentially consistent
|
||||||
|
// loads can emit LDAR.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
|
||||||
|
LoadRepresentation load_rep = atomic_load_params.representation();
|
||||||
|
InstructionCode code;
|
||||||
|
switch (load_rep.representation()) {
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
|
||||||
|
code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
|
||||||
|
code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
code = kAtomicLoadWord32;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord64:
|
||||||
|
code = kArm64Word64AtomicLoadUint64;
|
||||||
|
break;
|
||||||
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
|
case MachineRepresentation::kTaggedSigned:
|
||||||
|
code = kArm64LdarDecompressTaggedSigned;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kTaggedPointer:
|
||||||
|
code = kArm64LdarDecompressTaggedPointer;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kTagged:
|
||||||
|
code = kArm64LdarDecompressAnyTagged;
|
||||||
|
break;
|
||||||
|
#else
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged:
|
||||||
|
if (kTaggedSize == 8) {
|
||||||
|
code = kArm64Word64AtomicLoadUint64;
|
||||||
|
} else {
|
||||||
|
code = kAtomicLoadWord32;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
case MachineRepresentation::kCompressedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kCompressed:
|
||||||
|
DCHECK(COMPRESS_POINTERS_BOOL);
|
||||||
|
code = kAtomicLoadWord32;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
code |=
|
||||||
|
AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
|
||||||
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||||
arraysize(temps), temps);
|
arraysize(temps), temps);
|
||||||
}
|
}
|
||||||
|
|
||||||
void VisitAtomicStore(InstructionSelector* selector, Node* node,
|
void VisitAtomicStore(InstructionSelector* selector, Node* node,
|
||||||
ArchOpcode opcode, AtomicWidth width) {
|
AtomicWidth width) {
|
||||||
Arm64OperandGenerator g(selector);
|
Arm64OperandGenerator g(selector);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
|
|
||||||
|
// The memory order is ignored as both release and sequentially consistent
|
||||||
|
// stores can emit STLR.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
|
||||||
|
WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
|
||||||
|
MachineRepresentation rep = store_params.representation();
|
||||||
|
|
||||||
|
if (FLAG_enable_unconditional_write_barriers &&
|
||||||
|
CanBeTaggedOrCompressedPointer(rep)) {
|
||||||
|
write_barrier_kind = kFullWriteBarrier;
|
||||||
|
}
|
||||||
|
|
||||||
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
|
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
|
||||||
g.UseUniqueRegister(value)};
|
g.UseUniqueRegister(value)};
|
||||||
InstructionOperand temps[] = {g.TempRegister()};
|
InstructionOperand temps[] = {g.TempRegister()};
|
||||||
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
|
InstructionCode code;
|
||||||
AtomicWidthField::encode(width);
|
|
||||||
|
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
|
||||||
|
DCHECK(CanBeTaggedOrCompressedPointer(rep));
|
||||||
|
DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
|
||||||
|
|
||||||
|
RecordWriteMode record_write_mode =
|
||||||
|
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
||||||
|
code = kArchAtomicStoreWithWriteBarrier;
|
||||||
|
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
||||||
|
} else {
|
||||||
|
switch (rep) {
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
code = kAtomicStoreWord8;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
code = kAtomicStoreWord16;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
code = kAtomicStoreWord32;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord64:
|
||||||
|
DCHECK_EQ(width, AtomicWidth::kWord64);
|
||||||
|
code = kArm64Word64AtomicStoreWord64;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged:
|
||||||
|
DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
|
||||||
|
V8_FALLTHROUGH;
|
||||||
|
case MachineRepresentation::kCompressedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kCompressed:
|
||||||
|
CHECK(COMPRESS_POINTERS_BOOL);
|
||||||
|
DCHECK_EQ(width, AtomicWidth::kWord32);
|
||||||
|
code = kArm64StlrCompressTagged;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
code |= AtomicWidthField::encode(width);
|
||||||
|
}
|
||||||
|
|
||||||
|
code |= AddressingModeField::encode(kMode_MRR);
|
||||||
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
|
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
|
||||||
temps);
|
temps);
|
||||||
}
|
}
|
||||||
@ -3202,55 +3292,19 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
VisitAtomicLoad(this, node, AtomicWidth::kWord32);
|
||||||
ArchOpcode opcode;
|
|
||||||
switch (load_rep.representation()) {
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kAtomicLoadWord32;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
VisitAtomicLoad(this, node, AtomicWidth::kWord64);
|
||||||
ArchOpcode opcode;
|
|
||||||
switch (load_rep.representation()) {
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = kAtomicLoadUint8;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = kAtomicLoadUint16;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kAtomicLoadWord32;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord64:
|
|
||||||
opcode = kArm64Word64AtomicLoadUint64;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
VisitAtomicStore(this, node, AtomicWidth::kWord32);
|
||||||
DCHECK_NE(rep, MachineRepresentation::kWord64);
|
|
||||||
VisitAtomicStore(this, node, GetAtomicStoreOpcode(rep), AtomicWidth::kWord32);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
VisitAtomicStore(this, node, AtomicWidth::kWord64);
|
||||||
VisitAtomicStore(this, node, GetAtomicStoreOpcode(rep), AtomicWidth::kWord64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
||||||
|
@ -957,7 +957,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ bind(ool->exit());
|
__ bind(ool->exit());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArchStoreWithWriteBarrier: {
|
case kArchStoreWithWriteBarrier: // Fall thrugh.
|
||||||
|
case kArchAtomicStoreWithWriteBarrier: {
|
||||||
RecordWriteMode mode =
|
RecordWriteMode mode =
|
||||||
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
||||||
Register object = i.InputRegister(0);
|
Register object = i.InputRegister(0);
|
||||||
@ -969,7 +970,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
|
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
|
||||||
scratch0, scratch1, mode,
|
scratch0, scratch1, mode,
|
||||||
DetermineStubCallMode());
|
DetermineStubCallMode());
|
||||||
__ mov(operand, value);
|
if (arch_opcode == kArchStoreWithWriteBarrier) {
|
||||||
|
__ mov(operand, value);
|
||||||
|
} else {
|
||||||
|
__ mov(scratch0, value);
|
||||||
|
__ xchg(scratch0, operand);
|
||||||
|
}
|
||||||
if (mode > RecordWriteMode::kValueIsPointer) {
|
if (mode > RecordWriteMode::kValueIsPointer) {
|
||||||
__ JumpIfSmi(value, ool->exit());
|
__ JumpIfSmi(value, ool->exit());
|
||||||
}
|
}
|
||||||
@ -3835,17 +3841,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kIA32Word32AtomicPairLoad: {
|
case kIA32Word32AtomicPairLoad: {
|
||||||
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
|
__ movq(kScratchDoubleReg, i.MemoryOperand());
|
||||||
__ movq(tmp, i.MemoryOperand());
|
__ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
|
||||||
__ Pextrd(i.OutputRegister(0), tmp, 0);
|
__ Pextrd(i.OutputRegister(1), kScratchDoubleReg, 1);
|
||||||
__ Pextrd(i.OutputRegister(1), tmp, 1);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kIA32Word32AtomicPairStore: {
|
case kIA32Word32ReleasePairStore: {
|
||||||
|
__ push(ebx);
|
||||||
|
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(1));
|
||||||
|
__ push(ebx);
|
||||||
|
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
|
||||||
|
__ push(ebx);
|
||||||
|
frame_access_state()->IncreaseSPDelta(3);
|
||||||
|
__ movq(kScratchDoubleReg, MemOperand(esp, 0));
|
||||||
|
__ pop(ebx);
|
||||||
|
__ pop(ebx);
|
||||||
|
__ pop(ebx);
|
||||||
|
frame_access_state()->IncreaseSPDelta(-3);
|
||||||
|
__ movq(i.MemoryOperand(2), kScratchDoubleReg);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case kIA32Word32SeqCstPairStore: {
|
||||||
Label store;
|
Label store;
|
||||||
__ bind(&store);
|
__ bind(&store);
|
||||||
__ mov(i.TempRegister(0), i.MemoryOperand(2));
|
__ mov(eax, i.MemoryOperand(2));
|
||||||
__ mov(i.TempRegister(1), i.NextMemoryOperand(2));
|
__ mov(edx, i.NextMemoryOperand(2));
|
||||||
__ push(ebx);
|
__ push(ebx);
|
||||||
frame_access_state()->IncreaseSPDelta(1);
|
frame_access_state()->IncreaseSPDelta(1);
|
||||||
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
|
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
|
||||||
|
@ -402,7 +402,8 @@ namespace compiler {
|
|||||||
V(IA32I16x8AllTrue) \
|
V(IA32I16x8AllTrue) \
|
||||||
V(IA32I8x16AllTrue) \
|
V(IA32I8x16AllTrue) \
|
||||||
V(IA32Word32AtomicPairLoad) \
|
V(IA32Word32AtomicPairLoad) \
|
||||||
V(IA32Word32AtomicPairStore) \
|
V(IA32Word32ReleasePairStore) \
|
||||||
|
V(IA32Word32SeqCstPairStore) \
|
||||||
V(IA32Word32AtomicPairAdd) \
|
V(IA32Word32AtomicPairAdd) \
|
||||||
V(IA32Word32AtomicPairSub) \
|
V(IA32Word32AtomicPairSub) \
|
||||||
V(IA32Word32AtomicPairAnd) \
|
V(IA32Word32AtomicPairAnd) \
|
||||||
|
@ -423,7 +423,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kIA32Word32AtomicPairLoad:
|
case kIA32Word32AtomicPairLoad:
|
||||||
return kIsLoadOperation;
|
return kIsLoadOperation;
|
||||||
|
|
||||||
case kIA32Word32AtomicPairStore:
|
case kIA32Word32ReleasePairStore:
|
||||||
|
case kIA32Word32SeqCstPairStore:
|
||||||
case kIA32Word32AtomicPairAdd:
|
case kIA32Word32AtomicPairAdd:
|
||||||
case kIA32Word32AtomicPairSub:
|
case kIA32Word32AtomicPairSub:
|
||||||
case kIA32Word32AtomicPairAnd:
|
case kIA32Word32AtomicPairAnd:
|
||||||
|
@ -246,6 +246,41 @@ class IA32OperandGenerator final : public OperandGenerator {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
|
||||||
|
ArchOpcode opcode;
|
||||||
|
switch (load_rep.representation()) {
|
||||||
|
case MachineRepresentation::kFloat32:
|
||||||
|
opcode = kIA32Movss;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kFloat64:
|
||||||
|
opcode = kIA32Movsd;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kBit: // Fall through.
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged: // Fall through.
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
opcode = kIA32Movl;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kSimd128:
|
||||||
|
opcode = kIA32Movdqu;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kCompressedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kCompressed: // Fall through.
|
||||||
|
case MachineRepresentation::kWord64: // Fall through.
|
||||||
|
case MachineRepresentation::kMapWord: // Fall through.
|
||||||
|
case MachineRepresentation::kNone:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
return opcode;
|
||||||
|
}
|
||||||
|
|
||||||
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
|
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
|
||||||
IA32OperandGenerator g(selector);
|
IA32OperandGenerator g(selector);
|
||||||
Node* input = node->InputAt(0);
|
Node* input = node->InputAt(0);
|
||||||
@ -535,41 +570,8 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
|
|||||||
Emit(code, 1, outputs, input_count, inputs);
|
Emit(code, 1, outputs, input_count, inputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitLoad(Node* node) {
|
void InstructionSelector::VisitLoad(Node* node, Node* value,
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
InstructionCode opcode) {
|
||||||
|
|
||||||
ArchOpcode opcode;
|
|
||||||
switch (load_rep.representation()) {
|
|
||||||
case MachineRepresentation::kFloat32:
|
|
||||||
opcode = kIA32Movss;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kFloat64:
|
|
||||||
opcode = kIA32Movsd;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kBit: // Fall through.
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
|
||||||
case MachineRepresentation::kTaggedPointer: // Fall through.
|
|
||||||
case MachineRepresentation::kTagged: // Fall through.
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kIA32Movl;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kSimd128:
|
|
||||||
opcode = kIA32Movdqu;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kCompressedPointer: // Fall through.
|
|
||||||
case MachineRepresentation::kCompressed: // Fall through.
|
|
||||||
case MachineRepresentation::kWord64: // Fall through.
|
|
||||||
case MachineRepresentation::kNone:
|
|
||||||
case MachineRepresentation::kMapWord:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
IA32OperandGenerator g(this);
|
IA32OperandGenerator g(this);
|
||||||
InstructionOperand outputs[1];
|
InstructionOperand outputs[1];
|
||||||
outputs[0] = g.DefineAsRegister(node);
|
outputs[0] = g.DefineAsRegister(node);
|
||||||
@ -581,20 +583,97 @@ void InstructionSelector::VisitLoad(Node* node) {
|
|||||||
Emit(code, 1, outputs, input_count, inputs);
|
Emit(code, 1, outputs, input_count, inputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InstructionSelector::VisitLoad(Node* node) {
|
||||||
|
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||||
|
DCHECK(!load_rep.IsMapWord());
|
||||||
|
VisitLoad(node, node, GetLoadOpcode(load_rep));
|
||||||
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitProtectedLoad(Node* node) {
|
void InstructionSelector::VisitProtectedLoad(Node* node) {
|
||||||
// TODO(eholk)
|
// TODO(eholk)
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitStore(Node* node) {
|
namespace {
|
||||||
IA32OperandGenerator g(this);
|
|
||||||
|
ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
|
||||||
|
switch (rep) {
|
||||||
|
case MachineRepresentation::kFloat32:
|
||||||
|
return kIA32Movss;
|
||||||
|
case MachineRepresentation::kFloat64:
|
||||||
|
return kIA32Movsd;
|
||||||
|
case MachineRepresentation::kBit: // Fall through.
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
return kIA32Movb;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
return kIA32Movw;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged: // Fall through.
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
return kIA32Movl;
|
||||||
|
case MachineRepresentation::kSimd128:
|
||||||
|
return kIA32Movdqu;
|
||||||
|
case MachineRepresentation::kCompressedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kCompressed: // Fall through.
|
||||||
|
case MachineRepresentation::kWord64: // Fall through.
|
||||||
|
case MachineRepresentation::kMapWord: // Fall through.
|
||||||
|
case MachineRepresentation::kNone:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ArchOpcode GetSeqCstStoreOpcode(MachineRepresentation rep) {
|
||||||
|
switch (rep) {
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
return kAtomicExchangeInt8;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
return kAtomicExchangeInt16;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged: // Fall through.
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
return kAtomicExchangeWord32;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
|
||||||
|
ArchOpcode opcode, MachineRepresentation rep) {
|
||||||
|
IA32OperandGenerator g(selector);
|
||||||
|
Node* base = node->InputAt(0);
|
||||||
|
Node* index = node->InputAt(1);
|
||||||
|
Node* value = node->InputAt(2);
|
||||||
|
|
||||||
|
AddressingMode addressing_mode;
|
||||||
|
InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
|
||||||
|
? g.UseFixed(value, edx)
|
||||||
|
: g.UseUniqueRegister(value);
|
||||||
|
InstructionOperand inputs[] = {
|
||||||
|
value_operand, g.UseUniqueRegister(base),
|
||||||
|
g.GetEffectiveIndexOperand(index, &addressing_mode)};
|
||||||
|
InstructionOperand outputs[] = {
|
||||||
|
(rep == MachineRepresentation::kWord8)
|
||||||
|
// Using DefineSameAsFirst requires the register to be unallocated.
|
||||||
|
? g.DefineAsFixed(node, edx)
|
||||||
|
: g.DefineSameAsFirst(node)};
|
||||||
|
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||||
|
selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||||
|
StoreRepresentation store_rep,
|
||||||
|
base::Optional<AtomicMemoryOrder> atomic_order) {
|
||||||
|
IA32OperandGenerator g(selector);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
|
|
||||||
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
|
|
||||||
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
||||||
MachineRepresentation rep = store_rep.representation();
|
MachineRepresentation rep = store_rep.representation();
|
||||||
|
const bool is_seqcst =
|
||||||
|
atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
|
||||||
|
|
||||||
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
|
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
|
||||||
write_barrier_kind = kFullWriteBarrier;
|
write_barrier_kind = kFullWriteBarrier;
|
||||||
@ -611,48 +690,23 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
||||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
||||||
size_t const temp_count = arraysize(temps);
|
size_t const temp_count = arraysize(temps);
|
||||||
InstructionCode code = kArchStoreWithWriteBarrier;
|
InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
|
||||||
|
: kArchStoreWithWriteBarrier;
|
||||||
code |= AddressingModeField::encode(addressing_mode);
|
code |= AddressingModeField::encode(addressing_mode);
|
||||||
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
||||||
Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
|
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
|
||||||
|
temps);
|
||||||
|
} else if (is_seqcst) {
|
||||||
|
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(rep), rep);
|
||||||
} else {
|
} else {
|
||||||
ArchOpcode opcode;
|
// Release and non-atomic stores emit MOV.
|
||||||
switch (rep) {
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
case MachineRepresentation::kFloat32:
|
|
||||||
opcode = kIA32Movss;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kFloat64:
|
|
||||||
opcode = kIA32Movsd;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kBit: // Fall through.
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = kIA32Movb;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = kIA32Movw;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
|
||||||
case MachineRepresentation::kTaggedPointer: // Fall through.
|
|
||||||
case MachineRepresentation::kTagged: // Fall through.
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kIA32Movl;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kSimd128:
|
|
||||||
opcode = kIA32Movdqu;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kCompressedPointer: // Fall through.
|
|
||||||
case MachineRepresentation::kCompressed: // Fall through.
|
|
||||||
case MachineRepresentation::kWord64: // Fall through.
|
|
||||||
case MachineRepresentation::kMapWord: // Fall through.
|
|
||||||
case MachineRepresentation::kNone:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
InstructionOperand val;
|
InstructionOperand val;
|
||||||
if (g.CanBeImmediate(value)) {
|
if (g.CanBeImmediate(value)) {
|
||||||
val = g.UseImmediate(value);
|
val = g.UseImmediate(value);
|
||||||
} else if (rep == MachineRepresentation::kWord8 ||
|
} else if (!atomic_order && (rep == MachineRepresentation::kWord8 ||
|
||||||
rep == MachineRepresentation::kBit) {
|
rep == MachineRepresentation::kBit)) {
|
||||||
val = g.UseByteRegister(value);
|
val = g.UseByteRegister(value);
|
||||||
} else {
|
} else {
|
||||||
val = g.UseRegister(value);
|
val = g.UseRegister(value);
|
||||||
@ -663,13 +717,20 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
AddressingMode addressing_mode =
|
AddressingMode addressing_mode =
|
||||||
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
|
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
|
||||||
InstructionCode code =
|
InstructionCode code =
|
||||||
opcode | AddressingModeField::encode(addressing_mode);
|
GetStoreOpcode(rep) | AddressingModeField::encode(addressing_mode);
|
||||||
inputs[input_count++] = val;
|
inputs[input_count++] = val;
|
||||||
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
|
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
|
||||||
inputs);
|
input_count, inputs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void InstructionSelector::VisitStore(Node* node) {
|
||||||
|
VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
|
||||||
|
base::nullopt);
|
||||||
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitProtectedStore(Node* node) {
|
void InstructionSelector::VisitProtectedStore(Node* node) {
|
||||||
// TODO(eholk)
|
// TODO(eholk)
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
@ -1625,29 +1686,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
|
|||||||
VisitWordCompare(selector, node, kIA32Cmp, cont);
|
VisitWordCompare(selector, node, kIA32Cmp, cont);
|
||||||
}
|
}
|
||||||
|
|
||||||
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
|
|
||||||
ArchOpcode opcode, MachineRepresentation rep) {
|
|
||||||
IA32OperandGenerator g(selector);
|
|
||||||
Node* base = node->InputAt(0);
|
|
||||||
Node* index = node->InputAt(1);
|
|
||||||
Node* value = node->InputAt(2);
|
|
||||||
|
|
||||||
AddressingMode addressing_mode;
|
|
||||||
InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
|
|
||||||
? g.UseFixed(value, edx)
|
|
||||||
: g.UseUniqueRegister(value);
|
|
||||||
InstructionOperand inputs[] = {
|
|
||||||
value_operand, g.UseUniqueRegister(base),
|
|
||||||
g.GetEffectiveIndexOperand(index, &addressing_mode)};
|
|
||||||
InstructionOperand outputs[] = {
|
|
||||||
(rep == MachineRepresentation::kWord8)
|
|
||||||
// Using DefineSameAsFirst requires the register to be unallocated.
|
|
||||||
? g.DefineAsFixed(node, edx)
|
|
||||||
: g.DefineSameAsFirst(node)};
|
|
||||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
|
||||||
selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
|
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
|
||||||
ArchOpcode opcode, MachineRepresentation rep) {
|
ArchOpcode opcode, MachineRepresentation rep) {
|
||||||
AddressingMode addressing_mode;
|
AddressingMode addressing_mode;
|
||||||
@ -1957,32 +1995,25 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
|
||||||
|
LoadRepresentation load_rep = atomic_load_params.representation();
|
||||||
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
|
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
|
||||||
load_rep.representation() == MachineRepresentation::kWord16 ||
|
load_rep.representation() == MachineRepresentation::kWord16 ||
|
||||||
load_rep.representation() == MachineRepresentation::kWord32);
|
load_rep.representation() == MachineRepresentation::kWord32 ||
|
||||||
|
load_rep.representation() == MachineRepresentation::kTaggedSigned ||
|
||||||
|
load_rep.representation() == MachineRepresentation::kTaggedPointer ||
|
||||||
|
load_rep.representation() == MachineRepresentation::kTagged);
|
||||||
USE(load_rep);
|
USE(load_rep);
|
||||||
VisitLoad(node);
|
// The memory order is ignored as both acquire and sequentially consistent
|
||||||
|
// loads can emit MOV.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
VisitLoad(node, node, GetLoadOpcode(load_rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
||||||
IA32OperandGenerator g(this);
|
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
VisitStoreCommon(this, node, store_params.store_representation(),
|
||||||
ArchOpcode opcode;
|
store_params.order());
|
||||||
switch (rep) {
|
|
||||||
case MachineRepresentation::kWord8:
|
|
||||||
opcode = kAtomicExchangeInt8;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord16:
|
|
||||||
opcode = kAtomicExchangeInt16;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kWord32:
|
|
||||||
opcode = kAtomicExchangeWord32;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
VisitAtomicExchange(this, node, opcode, rep);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
||||||
@ -2075,6 +2106,8 @@ VISIT_ATOMIC_BINOP(Xor)
|
|||||||
#undef VISIT_ATOMIC_BINOP
|
#undef VISIT_ATOMIC_BINOP
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
||||||
|
// Both acquire and sequentially consistent loads can emit MOV.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
IA32OperandGenerator g(this);
|
IA32OperandGenerator g(this);
|
||||||
AddressingMode mode;
|
AddressingMode mode;
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
@ -2086,10 +2119,9 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
|||||||
g.GetEffectiveIndexOperand(index, &mode)};
|
g.GetEffectiveIndexOperand(index, &mode)};
|
||||||
InstructionCode code =
|
InstructionCode code =
|
||||||
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
|
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
|
||||||
InstructionOperand temps[] = {g.TempDoubleRegister()};
|
|
||||||
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
|
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
|
||||||
g.DefineAsRegister(projection1)};
|
g.DefineAsRegister(projection1)};
|
||||||
Emit(code, 2, outputs, 2, inputs, 1, temps);
|
Emit(code, 2, outputs, 2, inputs);
|
||||||
} else if (projection0 || projection1) {
|
} else if (projection0 || projection1) {
|
||||||
// Only one word is needed, so it's enough to load just that.
|
// Only one word is needed, so it's enough to load just that.
|
||||||
ArchOpcode opcode = kIA32Movl;
|
ArchOpcode opcode = kIA32Movl;
|
||||||
@ -2110,25 +2142,45 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
|
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
|
||||||
|
// Release pair stores emit a MOVQ via a double register, and sequentially
|
||||||
|
// consistent stores emit CMPXCHG8B.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
|
||||||
IA32OperandGenerator g(this);
|
IA32OperandGenerator g(this);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
Node* value_high = node->InputAt(3);
|
Node* value_high = node->InputAt(3);
|
||||||
|
|
||||||
AddressingMode addressing_mode;
|
AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
|
||||||
InstructionOperand inputs[] = {
|
if (order == AtomicMemoryOrder::kAcqRel) {
|
||||||
g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
|
AddressingMode addressing_mode;
|
||||||
g.UseUniqueRegister(base),
|
InstructionOperand inputs[] = {
|
||||||
g.GetEffectiveIndexOperand(index, &addressing_mode)};
|
g.UseUniqueRegisterOrSlotOrConstant(value),
|
||||||
// Allocating temp registers here as stores are performed using an atomic
|
g.UseUniqueRegisterOrSlotOrConstant(value_high),
|
||||||
// exchange, the output of which is stored in edx:eax, which should be saved
|
g.UseUniqueRegister(base),
|
||||||
// and restored at the end of the instruction.
|
g.GetEffectiveIndexOperand(index, &addressing_mode),
|
||||||
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
|
};
|
||||||
const int num_temps = arraysize(temps);
|
InstructionCode code = kIA32Word32ReleasePairStore |
|
||||||
InstructionCode code =
|
AddressingModeField::encode(addressing_mode);
|
||||||
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
|
Emit(code, 0, nullptr, arraysize(inputs), inputs);
|
||||||
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
|
} else {
|
||||||
|
DCHECK_EQ(order, AtomicMemoryOrder::kSeqCst);
|
||||||
|
|
||||||
|
AddressingMode addressing_mode;
|
||||||
|
InstructionOperand inputs[] = {
|
||||||
|
g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
|
||||||
|
g.UseUniqueRegister(base),
|
||||||
|
g.GetEffectiveIndexOperand(index, &addressing_mode)};
|
||||||
|
// Allocating temp registers here as stores are performed using an atomic
|
||||||
|
// exchange, the output of which is stored in edx:eax, which should be saved
|
||||||
|
// and restored at the end of the instruction.
|
||||||
|
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
|
||||||
|
const int num_temps = arraysize(temps);
|
||||||
|
InstructionCode code = kIA32Word32SeqCstPairStore |
|
||||||
|
AddressingModeField::encode(addressing_mode);
|
||||||
|
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
|
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#define TARGET_ADDRESSING_MODE_LIST(V)
|
#define TARGET_ADDRESSING_MODE_LIST(V)
|
||||||
#endif
|
#endif
|
||||||
#include "src/base/bit-field.h"
|
#include "src/base/bit-field.h"
|
||||||
|
#include "src/codegen/atomic-memory-order.h"
|
||||||
#include "src/compiler/write-barrier-kind.h"
|
#include "src/compiler/write-barrier-kind.h"
|
||||||
|
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
@ -101,6 +102,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
|
|||||||
V(ArchParentFramePointer) \
|
V(ArchParentFramePointer) \
|
||||||
V(ArchTruncateDoubleToI) \
|
V(ArchTruncateDoubleToI) \
|
||||||
V(ArchStoreWithWriteBarrier) \
|
V(ArchStoreWithWriteBarrier) \
|
||||||
|
V(ArchAtomicStoreWithWriteBarrier) \
|
||||||
V(ArchStackSlot) \
|
V(ArchStackSlot) \
|
||||||
V(ArchStackPointerGreaterThan) \
|
V(ArchStackPointerGreaterThan) \
|
||||||
V(ArchStackCheckOffset) \
|
V(ArchStackCheckOffset) \
|
||||||
@ -265,6 +267,16 @@ enum MemoryAccessMode {
|
|||||||
|
|
||||||
enum class AtomicWidth { kWord32, kWord64 };
|
enum class AtomicWidth { kWord32, kWord64 };
|
||||||
|
|
||||||
|
inline size_t AtomicWidthSize(AtomicWidth width) {
|
||||||
|
switch (width) {
|
||||||
|
case AtomicWidth::kWord32:
|
||||||
|
return 4;
|
||||||
|
case AtomicWidth::kWord64:
|
||||||
|
return 8;
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
// The InstructionCode is an opaque, target-specific integer that encodes
|
// The InstructionCode is an opaque, target-specific integer that encodes
|
||||||
// what code to emit for an instruction in the code generator. It is not
|
// what code to emit for an instruction in the code generator. It is not
|
||||||
// interesting to the register allocator, as the inputs and flags on the
|
// interesting to the register allocator, as the inputs and flags on the
|
||||||
@ -290,10 +302,16 @@ using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
|
|||||||
// size, an access mode, or both inside the overlapping MiscField.
|
// size, an access mode, or both inside the overlapping MiscField.
|
||||||
using LaneSizeField = base::BitField<int, 22, 8>;
|
using LaneSizeField = base::BitField<int, 22, 8>;
|
||||||
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
|
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
|
||||||
// AtomicOperandWidth overlaps with MiscField and is used for the various Atomic
|
// AtomicWidthField overlaps with MiscField and is used for the various Atomic
|
||||||
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
|
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
|
||||||
// architectures are assumed to be 32bit wide.
|
// architectures are assumed to be 32bit wide.
|
||||||
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
|
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
|
||||||
|
// AtomicMemoryOrderField overlaps with MiscField and is used for the various
|
||||||
|
// Atomic opcodes. This field is not used on all architectures. It is used on
|
||||||
|
// architectures where the codegen for kSeqCst and kAcqRel differ only by
|
||||||
|
// emitting fences.
|
||||||
|
using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
|
||||||
|
using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
|
||||||
using MiscField = base::BitField<int, 22, 10>;
|
using MiscField = base::BitField<int, 22, 10>;
|
||||||
|
|
||||||
// This static assertion serves as an early warning if we are about to exhaust
|
// This static assertion serves as an early warning if we are about to exhaust
|
||||||
|
@ -328,6 +328,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
|
|||||||
return kIsBarrier;
|
return kIsBarrier;
|
||||||
|
|
||||||
case kArchStoreWithWriteBarrier:
|
case kArchStoreWithWriteBarrier:
|
||||||
|
case kArchAtomicStoreWithWriteBarrier:
|
||||||
return kHasSideEffect;
|
return kHasSideEffect;
|
||||||
|
|
||||||
case kAtomicLoadInt8:
|
case kAtomicLoadInt8:
|
||||||
|
@ -1869,12 +1869,14 @@ void InstructionSelector::VisitNode(Node* node) {
|
|||||||
case IrOpcode::kMemoryBarrier:
|
case IrOpcode::kMemoryBarrier:
|
||||||
return VisitMemoryBarrier(node);
|
return VisitMemoryBarrier(node);
|
||||||
case IrOpcode::kWord32AtomicLoad: {
|
case IrOpcode::kWord32AtomicLoad: {
|
||||||
LoadRepresentation type = LoadRepresentationOf(node->op());
|
AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
|
||||||
|
LoadRepresentation type = params.representation();
|
||||||
MarkAsRepresentation(type.representation(), node);
|
MarkAsRepresentation(type.representation(), node);
|
||||||
return VisitWord32AtomicLoad(node);
|
return VisitWord32AtomicLoad(node);
|
||||||
}
|
}
|
||||||
case IrOpcode::kWord64AtomicLoad: {
|
case IrOpcode::kWord64AtomicLoad: {
|
||||||
LoadRepresentation type = LoadRepresentationOf(node->op());
|
AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
|
||||||
|
LoadRepresentation type = params.representation();
|
||||||
MarkAsRepresentation(type.representation(), node);
|
MarkAsRepresentation(type.representation(), node);
|
||||||
return VisitWord64AtomicLoad(node);
|
return VisitWord64AtomicLoad(node);
|
||||||
}
|
}
|
||||||
|
@ -1292,7 +1292,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ movl(result, result);
|
__ movl(result, result);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArchStoreWithWriteBarrier: {
|
case kArchStoreWithWriteBarrier: // Fall through.
|
||||||
|
case kArchAtomicStoreWithWriteBarrier: {
|
||||||
RecordWriteMode mode =
|
RecordWriteMode mode =
|
||||||
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
|
||||||
Register object = i.InputRegister(0);
|
Register object = i.InputRegister(0);
|
||||||
@ -1304,7 +1305,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
|
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
|
||||||
scratch0, scratch1, mode,
|
scratch0, scratch1, mode,
|
||||||
DetermineStubCallMode());
|
DetermineStubCallMode());
|
||||||
__ StoreTaggedField(operand, value);
|
if (arch_opcode == kArchStoreWithWriteBarrier) {
|
||||||
|
__ StoreTaggedField(operand, value);
|
||||||
|
} else {
|
||||||
|
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
|
||||||
|
__ AtomicStoreTaggedField(operand, value);
|
||||||
|
}
|
||||||
if (mode > RecordWriteMode::kValueIsPointer) {
|
if (mode > RecordWriteMode::kValueIsPointer) {
|
||||||
__ JumpIfSmi(value, ool->exit());
|
__ JumpIfSmi(value, ool->exit());
|
||||||
}
|
}
|
||||||
@ -1312,6 +1318,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
MemoryChunk::kPointersFromHereAreInterestingMask,
|
MemoryChunk::kPointersFromHereAreInterestingMask,
|
||||||
not_zero, ool->entry());
|
not_zero, ool->entry());
|
||||||
__ bind(ool->exit());
|
__ bind(ool->exit());
|
||||||
|
// TODO(syg): Support non-relaxed memory orders in TSAN.
|
||||||
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
|
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
|
||||||
DetermineStubCallMode(), kTaggedSize);
|
DetermineStubCallMode(), kTaggedSize);
|
||||||
break;
|
break;
|
||||||
|
@ -341,8 +341,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
ArchOpcode GetAtomicStoreOp(MachineRepresentation rep) {
|
ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
|
||||||
switch (rep) {
|
switch (store_rep.representation()) {
|
||||||
case MachineRepresentation::kWord8:
|
case MachineRepresentation::kWord8:
|
||||||
return kAtomicExchangeUint8;
|
return kAtomicExchangeUint8;
|
||||||
case MachineRepresentation::kWord16:
|
case MachineRepresentation::kWord16:
|
||||||
@ -351,6 +351,15 @@ ArchOpcode GetAtomicStoreOp(MachineRepresentation rep) {
|
|||||||
return kAtomicExchangeWord32;
|
return kAtomicExchangeWord32;
|
||||||
case MachineRepresentation::kWord64:
|
case MachineRepresentation::kWord64:
|
||||||
return kX64Word64AtomicExchangeUint64;
|
return kX64Word64AtomicExchangeUint64;
|
||||||
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kTagged:
|
||||||
|
if (COMPRESS_POINTERS_BOOL) return kAtomicExchangeWord32;
|
||||||
|
return kX64Word64AtomicExchangeUint64;
|
||||||
|
case MachineRepresentation::kCompressedPointer: // Fall through.
|
||||||
|
case MachineRepresentation::kCompressed:
|
||||||
|
CHECK(COMPRESS_POINTERS_BOOL);
|
||||||
|
return kAtomicExchangeWord32;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -499,15 +508,38 @@ void InstructionSelector::VisitLoad(Node* node) {
|
|||||||
|
|
||||||
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
|
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
|
||||||
|
|
||||||
void InstructionSelector::VisitStore(Node* node) {
|
namespace {
|
||||||
X64OperandGenerator g(this);
|
|
||||||
|
// Shared routine for Word32/Word64 Atomic Exchange
|
||||||
|
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
|
||||||
|
ArchOpcode opcode, AtomicWidth width) {
|
||||||
|
X64OperandGenerator g(selector);
|
||||||
|
Node* base = node->InputAt(0);
|
||||||
|
Node* index = node->InputAt(1);
|
||||||
|
Node* value = node->InputAt(2);
|
||||||
|
AddressingMode addressing_mode;
|
||||||
|
InstructionOperand inputs[] = {
|
||||||
|
g.UseUniqueRegister(value), g.UseUniqueRegister(base),
|
||||||
|
g.GetEffectiveIndexOperand(index, &addressing_mode)};
|
||||||
|
InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
|
||||||
|
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
|
||||||
|
AtomicWidthField::encode(width);
|
||||||
|
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
||||||
|
StoreRepresentation store_rep,
|
||||||
|
base::Optional<AtomicMemoryOrder> atomic_order,
|
||||||
|
base::Optional<AtomicWidth> atomic_width) {
|
||||||
|
X64OperandGenerator g(selector);
|
||||||
Node* base = node->InputAt(0);
|
Node* base = node->InputAt(0);
|
||||||
Node* index = node->InputAt(1);
|
Node* index = node->InputAt(1);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
|
|
||||||
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
|
|
||||||
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
|
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
|
||||||
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
||||||
|
const bool is_seqcst =
|
||||||
|
atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
|
||||||
|
|
||||||
if (FLAG_enable_unconditional_write_barriers &&
|
if (FLAG_enable_unconditional_write_barriers &&
|
||||||
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
|
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
|
||||||
@ -524,11 +556,19 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
RecordWriteMode record_write_mode =
|
RecordWriteMode record_write_mode =
|
||||||
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
|
||||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
||||||
InstructionCode code = kArchStoreWithWriteBarrier;
|
InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
|
||||||
|
: kArchStoreWithWriteBarrier;
|
||||||
code |= AddressingModeField::encode(addressing_mode);
|
code |= AddressingModeField::encode(addressing_mode);
|
||||||
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
||||||
Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
|
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
|
||||||
|
arraysize(temps), temps);
|
||||||
|
} else if (is_seqcst) {
|
||||||
|
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(store_rep),
|
||||||
|
*atomic_width);
|
||||||
} else {
|
} else {
|
||||||
|
// Release and non-atomic stores emit MOV.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
|
||||||
if ((ElementSizeLog2Of(store_rep.representation()) <
|
if ((ElementSizeLog2Of(store_rep.representation()) <
|
||||||
kSystemPointerSizeLog2) &&
|
kSystemPointerSizeLog2) &&
|
||||||
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
|
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
|
||||||
@ -558,11 +598,18 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
ArchOpcode opcode = GetStoreOpcode(store_rep);
|
ArchOpcode opcode = GetStoreOpcode(store_rep);
|
||||||
InstructionCode code =
|
InstructionCode code =
|
||||||
opcode | AddressingModeField::encode(addressing_mode);
|
opcode | AddressingModeField::encode(addressing_mode);
|
||||||
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
|
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
|
||||||
inputs, temp_count, temps);
|
input_count, inputs, temp_count, temps);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void InstructionSelector::VisitStore(Node* node) {
|
||||||
|
return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
|
||||||
|
base::nullopt, base::nullopt);
|
||||||
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitProtectedStore(Node* node) {
|
void InstructionSelector::VisitProtectedStore(Node* node) {
|
||||||
X64OperandGenerator g(this);
|
X64OperandGenerator g(this);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
@ -2340,23 +2387,6 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
|
|||||||
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
|
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shared routine for Word32/Word64 Atomic Exchange
|
|
||||||
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
|
|
||||||
ArchOpcode opcode, AtomicWidth width) {
|
|
||||||
X64OperandGenerator g(selector);
|
|
||||||
Node* base = node->InputAt(0);
|
|
||||||
Node* index = node->InputAt(1);
|
|
||||||
Node* value = node->InputAt(2);
|
|
||||||
AddressingMode addressing_mode;
|
|
||||||
InstructionOperand inputs[] = {
|
|
||||||
g.UseUniqueRegister(value), g.UseUniqueRegister(base),
|
|
||||||
g.GetEffectiveIndexOperand(index, &addressing_mode)};
|
|
||||||
InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
|
|
||||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
|
|
||||||
AtomicWidthField::encode(width);
|
|
||||||
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// Shared routine for word comparison against zero.
|
// Shared routine for word comparison against zero.
|
||||||
@ -2724,29 +2754,44 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
|
||||||
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
|
LoadRepresentation load_rep = atomic_load_params.representation();
|
||||||
load_rep.representation() == MachineRepresentation::kWord16 ||
|
DCHECK(IsIntegral(load_rep.representation()) ||
|
||||||
load_rep.representation() == MachineRepresentation::kWord32);
|
IsAnyTagged(load_rep.representation()) ||
|
||||||
USE(load_rep);
|
(COMPRESS_POINTERS_BOOL &&
|
||||||
VisitLoad(node);
|
CanBeCompressedPointer(load_rep.representation())));
|
||||||
|
DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
|
||||||
|
DCHECK(!load_rep.IsMapWord());
|
||||||
|
// The memory order is ignored as both acquire and sequentially consistent
|
||||||
|
// loads can emit MOV.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
VisitLoad(node, node, GetLoadOpcode(load_rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
||||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
|
||||||
USE(load_rep);
|
DCHECK(!atomic_load_params.representation().IsMapWord());
|
||||||
VisitLoad(node);
|
// The memory order is ignored as both acquire and sequentially consistent
|
||||||
|
// loads can emit MOV.
|
||||||
|
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||||
|
VisitLoad(node, node, GetLoadOpcode(atomic_load_params.representation()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
|
||||||
DCHECK_NE(rep, MachineRepresentation::kWord64);
|
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
|
||||||
VisitAtomicExchange(this, node, GetAtomicStoreOp(rep), AtomicWidth::kWord32);
|
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
|
||||||
|
kTaggedSize == 4);
|
||||||
|
VisitStoreCommon(this, node, params.store_representation(), params.order(),
|
||||||
|
AtomicWidth::kWord32);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
|
||||||
VisitAtomicExchange(this, node, GetAtomicStoreOp(rep), AtomicWidth::kWord64);
|
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
|
||||||
|
kTaggedSize == 8);
|
||||||
|
VisitStoreCommon(this, node, params.store_representation(), params.order(),
|
||||||
|
AtomicWidth::kWord64);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
|
||||||
|
@ -679,22 +679,25 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
|
|||||||
return BitcastWordToTagged(Load<RawPtrT>(base, offset));
|
return BitcastWordToTagged(Load<RawPtrT>(base, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
|
Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order,
|
||||||
TNode<WordT> offset) {
|
TNode<RawPtrT> base, TNode<WordT> offset) {
|
||||||
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
|
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
|
||||||
return raw_assembler()->AtomicLoad(type, base, offset);
|
return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
|
||||||
|
offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Type>
|
template <class Type>
|
||||||
TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
|
TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order,
|
||||||
|
TNode<RawPtrT> base,
|
||||||
TNode<WordT> offset) {
|
TNode<WordT> offset) {
|
||||||
return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
|
return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(
|
||||||
|
AtomicLoadParameters(MachineType::Uint64(), order), base, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
|
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
|
||||||
TNode<RawPtrT> base, TNode<WordT> offset);
|
AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
|
||||||
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
|
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
|
||||||
TNode<RawPtrT> base, TNode<WordT> offset);
|
AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
|
||||||
|
|
||||||
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
|
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
|
||||||
TNode<IntPtrT> offset) {
|
TNode<IntPtrT> offset) {
|
||||||
@ -859,16 +862,22 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
|
|||||||
BitcastTaggedToWord(tagged_value));
|
BitcastTaggedToWord(tagged_value));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
|
void CodeAssembler::AtomicStore(MachineRepresentation rep,
|
||||||
|
AtomicMemoryOrder order, TNode<RawPtrT> base,
|
||||||
TNode<WordT> offset, TNode<Word32T> value) {
|
TNode<WordT> offset, TNode<Word32T> value) {
|
||||||
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
|
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
|
||||||
raw_assembler()->AtomicStore(rep, base, offset, value);
|
raw_assembler()->AtomicStore(
|
||||||
|
AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order),
|
||||||
|
base, offset, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeAssembler::AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
|
void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
|
||||||
TNode<UintPtrT> value,
|
TNode<WordT> offset, TNode<UintPtrT> value,
|
||||||
TNode<UintPtrT> value_high) {
|
TNode<UintPtrT> value_high) {
|
||||||
raw_assembler()->AtomicStore64(base, offset, value, value_high);
|
raw_assembler()->AtomicStore64(
|
||||||
|
AtomicStoreParameters(MachineRepresentation::kWord64,
|
||||||
|
WriteBarrierKind::kNoWriteBarrier, order),
|
||||||
|
base, offset, value, value_high);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ATOMIC_FUNCTION(name) \
|
#define ATOMIC_FUNCTION(name) \
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "src/base/optional.h"
|
#include "src/base/optional.h"
|
||||||
#include "src/base/type-traits.h"
|
#include "src/base/type-traits.h"
|
||||||
#include "src/builtins/builtins.h"
|
#include "src/builtins/builtins.h"
|
||||||
|
#include "src/codegen/atomic-memory-order.h"
|
||||||
#include "src/codegen/code-factory.h"
|
#include "src/codegen/code-factory.h"
|
||||||
#include "src/codegen/machine-type.h"
|
#include "src/codegen/machine-type.h"
|
||||||
#include "src/codegen/source-position.h"
|
#include "src/codegen/source-position.h"
|
||||||
@ -743,12 +744,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
|||||||
return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
|
return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
|
||||||
}
|
}
|
||||||
template <class Type>
|
template <class Type>
|
||||||
TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
|
TNode<Type> AtomicLoad(AtomicMemoryOrder order, TNode<RawPtrT> base,
|
||||||
|
TNode<WordT> offset) {
|
||||||
return UncheckedCast<Type>(
|
return UncheckedCast<Type>(
|
||||||
AtomicLoad(MachineTypeOf<Type>::value, base, offset));
|
AtomicLoad(MachineTypeOf<Type>::value, order, base, offset));
|
||||||
}
|
}
|
||||||
template <class Type>
|
template <class Type>
|
||||||
TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
|
TNode<Type> AtomicLoad64(AtomicMemoryOrder order, TNode<RawPtrT> base,
|
||||||
|
TNode<WordT> offset);
|
||||||
// Load uncompressed tagged value from (most likely off JS heap) memory
|
// Load uncompressed tagged value from (most likely off JS heap) memory
|
||||||
// location.
|
// location.
|
||||||
TNode<Object> LoadFullTagged(Node* base);
|
TNode<Object> LoadFullTagged(Node* base);
|
||||||
@ -809,12 +812,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
|||||||
TNode<HeapObject> object,
|
TNode<HeapObject> object,
|
||||||
int offset, Node* value);
|
int offset, Node* value);
|
||||||
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
|
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
|
||||||
void AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
|
void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order,
|
||||||
TNode<WordT> offset, TNode<Word32T> value);
|
TNode<RawPtrT> base, TNode<WordT> offset,
|
||||||
|
TNode<Word32T> value);
|
||||||
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
|
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
|
||||||
// nullptr in other cases.
|
// nullptr in other cases.
|
||||||
void AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
|
void AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
|
||||||
TNode<UintPtrT> value, TNode<UintPtrT> value_high);
|
TNode<WordT> offset, TNode<UintPtrT> value,
|
||||||
|
TNode<UintPtrT> value_high);
|
||||||
|
|
||||||
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
|
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
|
||||||
TNode<UintPtrT> offset, TNode<Word32T> value);
|
TNode<UintPtrT> offset, TNode<Word32T> value);
|
||||||
@ -1353,7 +1358,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
|||||||
const CallInterfaceDescriptor& descriptor, int input_count,
|
const CallInterfaceDescriptor& descriptor, int input_count,
|
||||||
Node* const* inputs);
|
Node* const* inputs);
|
||||||
|
|
||||||
Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
|
Node* AtomicLoad(MachineType type, AtomicMemoryOrder order,
|
||||||
|
TNode<RawPtrT> base, TNode<WordT> offset);
|
||||||
|
|
||||||
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
|
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
|
||||||
TNode<WordT> offset);
|
TNode<WordT> offset);
|
||||||
|
@ -944,29 +944,31 @@ void Int64Lowering::LowerNode(Node* node) {
|
|||||||
}
|
}
|
||||||
case IrOpcode::kWord64AtomicLoad: {
|
case IrOpcode::kWord64AtomicLoad: {
|
||||||
DCHECK_EQ(4, node->InputCount());
|
DCHECK_EQ(4, node->InputCount());
|
||||||
MachineType type = AtomicOpType(node->op());
|
AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
|
||||||
DefaultLowering(node, true);
|
DefaultLowering(node, true);
|
||||||
if (type == MachineType::Uint64()) {
|
if (params.representation() == MachineType::Uint64()) {
|
||||||
NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
|
NodeProperties::ChangeOp(
|
||||||
|
node, machine()->Word32AtomicPairLoad(params.order()));
|
||||||
ReplaceNodeWithProjections(node);
|
ReplaceNodeWithProjections(node);
|
||||||
} else {
|
} else {
|
||||||
NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
|
NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
|
||||||
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
|
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrOpcode::kWord64AtomicStore: {
|
case IrOpcode::kWord64AtomicStore: {
|
||||||
DCHECK_EQ(5, node->InputCount());
|
DCHECK_EQ(5, node->InputCount());
|
||||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
|
||||||
if (rep == MachineRepresentation::kWord64) {
|
if (params.representation() == MachineRepresentation::kWord64) {
|
||||||
LowerMemoryBaseAndIndex(node);
|
LowerMemoryBaseAndIndex(node);
|
||||||
Node* value = node->InputAt(2);
|
Node* value = node->InputAt(2);
|
||||||
node->ReplaceInput(2, GetReplacementLow(value));
|
node->ReplaceInput(2, GetReplacementLow(value));
|
||||||
node->InsertInput(zone(), 3, GetReplacementHigh(value));
|
node->InsertInput(zone(), 3, GetReplacementHigh(value));
|
||||||
NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
|
NodeProperties::ChangeOp(
|
||||||
|
node, machine()->Word32AtomicPairStore(params.order()));
|
||||||
} else {
|
} else {
|
||||||
DefaultLowering(node, true);
|
DefaultLowering(node, true);
|
||||||
NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
|
NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(params));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -121,6 +121,11 @@ class MachineRepresentationInferrer {
|
|||||||
break;
|
break;
|
||||||
case IrOpcode::kWord32AtomicLoad:
|
case IrOpcode::kWord32AtomicLoad:
|
||||||
case IrOpcode::kWord64AtomicLoad:
|
case IrOpcode::kWord64AtomicLoad:
|
||||||
|
representation_vector_[node->id()] =
|
||||||
|
PromoteRepresentation(AtomicLoadParametersOf(node->op())
|
||||||
|
.representation()
|
||||||
|
.representation());
|
||||||
|
break;
|
||||||
case IrOpcode::kLoad:
|
case IrOpcode::kLoad:
|
||||||
case IrOpcode::kLoadImmutable:
|
case IrOpcode::kLoadImmutable:
|
||||||
case IrOpcode::kProtectedLoad:
|
case IrOpcode::kProtectedLoad:
|
||||||
@ -153,8 +158,8 @@ class MachineRepresentationInferrer {
|
|||||||
}
|
}
|
||||||
case IrOpcode::kWord32AtomicStore:
|
case IrOpcode::kWord32AtomicStore:
|
||||||
case IrOpcode::kWord64AtomicStore:
|
case IrOpcode::kWord64AtomicStore:
|
||||||
representation_vector_[node->id()] =
|
representation_vector_[node->id()] = PromoteRepresentation(
|
||||||
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
|
AtomicStoreParametersOf(node->op()).representation());
|
||||||
break;
|
break;
|
||||||
case IrOpcode::kWord32AtomicPairLoad:
|
case IrOpcode::kWord32AtomicPairLoad:
|
||||||
case IrOpcode::kWord32AtomicPairStore:
|
case IrOpcode::kWord32AtomicPairStore:
|
||||||
@ -585,9 +590,12 @@ class MachineRepresentationChecker {
|
|||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
case MachineRepresentation::kTaggedSigned:
|
case MachineRepresentation::kTaggedSigned:
|
||||||
if (COMPRESS_POINTERS_BOOL &&
|
if (COMPRESS_POINTERS_BOOL &&
|
||||||
node->opcode() == IrOpcode::kStore &&
|
((node->opcode() == IrOpcode::kStore &&
|
||||||
IsAnyTagged(
|
IsAnyTagged(StoreRepresentationOf(node->op())
|
||||||
StoreRepresentationOf(node->op()).representation())) {
|
.representation())) ||
|
||||||
|
(node->opcode() == IrOpcode::kWord32AtomicStore &&
|
||||||
|
IsAnyTagged(AtomicStoreParametersOf(node->op())
|
||||||
|
.representation())))) {
|
||||||
CheckValueInputIsCompressedOrTagged(node, 2);
|
CheckValueInputIsCompressedOrTagged(node, 2);
|
||||||
} else {
|
} else {
|
||||||
CheckValueInputIsTagged(node, 2);
|
CheckValueInputIsTagged(node, 2);
|
||||||
|
@ -32,6 +32,41 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
|
|||||||
return os << rep.representation() << ", " << rep.write_barrier_kind();
|
return os << rep.representation() << ", " << rep.write_barrier_kind();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
|
||||||
|
return lhs.store_representation() == rhs.store_representation() &&
|
||||||
|
lhs.order() == rhs.order();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
|
||||||
|
return !(lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t hash_value(AtomicStoreParameters params) {
|
||||||
|
return base::hash_combine(hash_value(params.store_representation()),
|
||||||
|
params.order());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
|
||||||
|
return os << params.store_representation() << ", " << params.order();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
|
||||||
|
return lhs.representation() == rhs.representation() &&
|
||||||
|
lhs.order() == rhs.order();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
|
||||||
|
return !(lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t hash_value(AtomicLoadParameters params) {
|
||||||
|
return base::hash_combine(params.representation(), params.order());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
|
||||||
|
return os << params.representation() << ", " << params.order();
|
||||||
|
}
|
||||||
|
|
||||||
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
|
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
|
||||||
|
|
||||||
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
|
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
|
||||||
@ -121,20 +156,29 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
|
|||||||
LoadRepresentation LoadRepresentationOf(Operator const* op) {
|
LoadRepresentation LoadRepresentationOf(Operator const* op) {
|
||||||
DCHECK(IrOpcode::kLoad == op->opcode() ||
|
DCHECK(IrOpcode::kLoad == op->opcode() ||
|
||||||
IrOpcode::kProtectedLoad == op->opcode() ||
|
IrOpcode::kProtectedLoad == op->opcode() ||
|
||||||
IrOpcode::kWord32AtomicLoad == op->opcode() ||
|
|
||||||
IrOpcode::kWord64AtomicLoad == op->opcode() ||
|
|
||||||
IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
|
|
||||||
IrOpcode::kUnalignedLoad == op->opcode() ||
|
IrOpcode::kUnalignedLoad == op->opcode() ||
|
||||||
IrOpcode::kLoadImmutable == op->opcode());
|
IrOpcode::kLoadImmutable == op->opcode());
|
||||||
return OpParameter<LoadRepresentation>(op);
|
return OpParameter<LoadRepresentation>(op);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AtomicLoadParameters AtomicLoadParametersOf(Operator const* op) {
|
||||||
|
DCHECK(IrOpcode::kWord32AtomicLoad == op->opcode() ||
|
||||||
|
IrOpcode::kWord64AtomicLoad == op->opcode());
|
||||||
|
return OpParameter<AtomicLoadParameters>(op);
|
||||||
|
}
|
||||||
|
|
||||||
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
|
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
|
||||||
DCHECK(IrOpcode::kStore == op->opcode() ||
|
DCHECK(IrOpcode::kStore == op->opcode() ||
|
||||||
IrOpcode::kProtectedStore == op->opcode());
|
IrOpcode::kProtectedStore == op->opcode());
|
||||||
return OpParameter<StoreRepresentation>(op);
|
return OpParameter<StoreRepresentation>(op);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AtomicStoreParameters const& AtomicStoreParametersOf(Operator const* op) {
|
||||||
|
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
|
||||||
|
IrOpcode::kWord64AtomicStore == op->opcode());
|
||||||
|
return OpParameter<AtomicStoreParameters>(op);
|
||||||
|
}
|
||||||
|
|
||||||
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
|
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
|
||||||
Operator const* op) {
|
Operator const* op) {
|
||||||
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
|
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
|
||||||
@ -181,12 +225,6 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
|
|||||||
return OpParameter<StackSlotRepresentation>(op);
|
return OpParameter<StackSlotRepresentation>(op);
|
||||||
}
|
}
|
||||||
|
|
||||||
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
|
|
||||||
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
|
|
||||||
IrOpcode::kWord64AtomicStore == op->opcode());
|
|
||||||
return OpParameter<MachineRepresentation>(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
MachineType AtomicOpType(Operator const* op) {
|
MachineType AtomicOpType(Operator const* op) {
|
||||||
return OpParameter<MachineType>(op);
|
return OpParameter<MachineType>(op);
|
||||||
}
|
}
|
||||||
@ -649,6 +687,30 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
|
|||||||
V(S128Load32Zero) \
|
V(S128Load32Zero) \
|
||||||
V(S128Load64Zero)
|
V(S128Load64Zero)
|
||||||
|
|
||||||
|
#if TAGGED_SIZE_8_BYTES
|
||||||
|
|
||||||
|
#define ATOMIC_TAGGED_TYPE_LIST(V)
|
||||||
|
|
||||||
|
#define ATOMIC64_TAGGED_TYPE_LIST(V) \
|
||||||
|
V(TaggedSigned) \
|
||||||
|
V(TaggedPointer) \
|
||||||
|
V(AnyTagged) \
|
||||||
|
V(CompressedPointer) \
|
||||||
|
V(AnyCompressed)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define ATOMIC_TAGGED_TYPE_LIST(V) \
|
||||||
|
V(TaggedSigned) \
|
||||||
|
V(TaggedPointer) \
|
||||||
|
V(AnyTagged) \
|
||||||
|
V(CompressedPointer) \
|
||||||
|
V(AnyCompressed)
|
||||||
|
|
||||||
|
#define ATOMIC64_TAGGED_TYPE_LIST(V)
|
||||||
|
|
||||||
|
#endif // TAGGED_SIZE_8_BYTES
|
||||||
|
|
||||||
#define ATOMIC_U32_TYPE_LIST(V) \
|
#define ATOMIC_U32_TYPE_LIST(V) \
|
||||||
V(Uint8) \
|
V(Uint8) \
|
||||||
V(Uint16) \
|
V(Uint16) \
|
||||||
@ -664,6 +726,28 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
|
|||||||
ATOMIC_U32_TYPE_LIST(V) \
|
ATOMIC_U32_TYPE_LIST(V) \
|
||||||
V(Uint64)
|
V(Uint64)
|
||||||
|
|
||||||
|
#if TAGGED_SIZE_8_BYTES
|
||||||
|
|
||||||
|
#define ATOMIC_TAGGED_REPRESENTATION_LIST(V)
|
||||||
|
|
||||||
|
#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V) \
|
||||||
|
V(kTaggedSigned) \
|
||||||
|
V(kTaggedPointer) \
|
||||||
|
V(kTagged)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define ATOMIC_TAGGED_REPRESENTATION_LIST(V) \
|
||||||
|
V(kTaggedSigned) \
|
||||||
|
V(kTaggedPointer) \
|
||||||
|
V(kTagged) \
|
||||||
|
V(kCompressedPointer) \
|
||||||
|
V(kCompressed)
|
||||||
|
|
||||||
|
#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V)
|
||||||
|
|
||||||
|
#endif // TAGGED_SIZE_8_BYTES
|
||||||
|
|
||||||
#define ATOMIC_REPRESENTATION_LIST(V) \
|
#define ATOMIC_REPRESENTATION_LIST(V) \
|
||||||
V(kWord8) \
|
V(kWord8) \
|
||||||
V(kWord16) \
|
V(kWord16) \
|
||||||
@ -967,55 +1051,63 @@ struct MachineOperatorGlobalCache {
|
|||||||
MACHINE_REPRESENTATION_LIST(STORE)
|
MACHINE_REPRESENTATION_LIST(STORE)
|
||||||
#undef STORE
|
#undef STORE
|
||||||
|
|
||||||
#define ATOMIC_LOAD(Type) \
|
#define ATOMIC_LOAD(Type) \
|
||||||
struct Word32AtomicLoad##Type##Operator final \
|
struct Word32SeqCstLoad##Type##Operator \
|
||||||
: public Operator1<LoadRepresentation> { \
|
: public Operator1<AtomicLoadParameters> { \
|
||||||
Word32AtomicLoad##Type##Operator() \
|
Word32SeqCstLoad##Type##Operator() \
|
||||||
: Operator1<LoadRepresentation>( \
|
: Operator1<AtomicLoadParameters>( \
|
||||||
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
|
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
|
||||||
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
|
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
|
||||||
}; \
|
AtomicLoadParameters(MachineType::Type(), \
|
||||||
Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
|
AtomicMemoryOrder::kSeqCst)) {} \
|
||||||
|
}; \
|
||||||
|
Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
|
||||||
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
|
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
|
||||||
#undef ATOMIC_LOAD
|
#undef ATOMIC_LOAD
|
||||||
|
|
||||||
#define ATOMIC_LOAD(Type) \
|
#define ATOMIC_LOAD(Type) \
|
||||||
struct Word64AtomicLoad##Type##Operator final \
|
struct Word64SeqCstLoad##Type##Operator \
|
||||||
: public Operator1<LoadRepresentation> { \
|
: public Operator1<AtomicLoadParameters> { \
|
||||||
Word64AtomicLoad##Type##Operator() \
|
Word64SeqCstLoad##Type##Operator() \
|
||||||
: Operator1<LoadRepresentation>( \
|
: Operator1<AtomicLoadParameters>( \
|
||||||
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
|
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
|
||||||
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
|
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
|
||||||
}; \
|
AtomicLoadParameters(MachineType::Type(), \
|
||||||
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
|
AtomicMemoryOrder::kSeqCst)) {} \
|
||||||
|
}; \
|
||||||
|
Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
|
||||||
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
|
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
|
||||||
#undef ATOMIC_LOAD
|
#undef ATOMIC_LOAD
|
||||||
|
|
||||||
#define ATOMIC_STORE(Type) \
|
#define ATOMIC_STORE(Type) \
|
||||||
struct Word32AtomicStore##Type##Operator \
|
struct Word32SeqCstStore##Type##Operator \
|
||||||
: public Operator1<MachineRepresentation> { \
|
: public Operator1<AtomicStoreParameters> { \
|
||||||
Word32AtomicStore##Type##Operator() \
|
Word32SeqCstStore##Type##Operator() \
|
||||||
: Operator1<MachineRepresentation>( \
|
: Operator1<AtomicStoreParameters>( \
|
||||||
IrOpcode::kWord32AtomicStore, \
|
IrOpcode::kWord32AtomicStore, \
|
||||||
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
||||||
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
|
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
|
||||||
MachineRepresentation::Type) {} \
|
AtomicStoreParameters(MachineRepresentation::Type, \
|
||||||
|
kNoWriteBarrier, \
|
||||||
|
AtomicMemoryOrder::kSeqCst)) {} \
|
||||||
}; \
|
}; \
|
||||||
Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
|
Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
|
||||||
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
|
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
|
||||||
#undef ATOMIC_STORE
|
#undef ATOMIC_STORE
|
||||||
|
|
||||||
#define ATOMIC_STORE(Type) \
|
#define ATOMIC_STORE(Type) \
|
||||||
struct Word64AtomicStore##Type##Operator \
|
struct Word64SeqCstStore##Type##Operator \
|
||||||
: public Operator1<MachineRepresentation> { \
|
: public Operator1<AtomicStoreParameters> { \
|
||||||
Word64AtomicStore##Type##Operator() \
|
Word64SeqCstStore##Type##Operator() \
|
||||||
: Operator1<MachineRepresentation>( \
|
: Operator1<AtomicStoreParameters>( \
|
||||||
IrOpcode::kWord64AtomicStore, \
|
IrOpcode::kWord64AtomicStore, \
|
||||||
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
||||||
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
|
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
|
||||||
MachineRepresentation::Type) {} \
|
AtomicStoreParameters(MachineRepresentation::Type, \
|
||||||
|
kNoWriteBarrier, \
|
||||||
|
AtomicMemoryOrder::kSeqCst)) {} \
|
||||||
}; \
|
}; \
|
||||||
Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
|
Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
|
||||||
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
|
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
|
||||||
#undef ATOMIC_STORE
|
#undef ATOMIC_STORE
|
||||||
|
|
||||||
@ -1075,21 +1167,23 @@ struct MachineOperatorGlobalCache {
|
|||||||
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
|
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
|
||||||
#undef ATOMIC_COMPARE_EXCHANGE
|
#undef ATOMIC_COMPARE_EXCHANGE
|
||||||
|
|
||||||
struct Word32AtomicPairLoadOperator : public Operator {
|
struct Word32SeqCstPairLoadOperator : public Operator1<AtomicMemoryOrder> {
|
||||||
Word32AtomicPairLoadOperator()
|
Word32SeqCstPairLoadOperator()
|
||||||
: Operator(IrOpcode::kWord32AtomicPairLoad,
|
: Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairLoad,
|
||||||
Operator::kNoDeopt | Operator::kNoThrow,
|
Operator::kNoDeopt | Operator::kNoThrow,
|
||||||
"Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
|
"Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0,
|
||||||
|
AtomicMemoryOrder::kSeqCst) {}
|
||||||
};
|
};
|
||||||
Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
|
Word32SeqCstPairLoadOperator kWord32SeqCstPairLoad;
|
||||||
|
|
||||||
struct Word32AtomicPairStoreOperator : public Operator {
|
struct Word32SeqCstPairStoreOperator : public Operator1<AtomicMemoryOrder> {
|
||||||
Word32AtomicPairStoreOperator()
|
Word32SeqCstPairStoreOperator()
|
||||||
: Operator(IrOpcode::kWord32AtomicPairStore,
|
: Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairStore,
|
||||||
Operator::kNoDeopt | Operator::kNoThrow,
|
Operator::kNoDeopt | Operator::kNoThrow,
|
||||||
"Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
|
"Word32AtomicPairStore", 4, 1, 1, 0, 1,
|
||||||
|
0, AtomicMemoryOrder::kSeqCst) {}
|
||||||
};
|
};
|
||||||
Word32AtomicPairStoreOperator kWord32AtomicPairStore;
|
Word32SeqCstPairStoreOperator kWord32SeqCstPairStore;
|
||||||
|
|
||||||
#define ATOMIC_PAIR_OP(op) \
|
#define ATOMIC_PAIR_OP(op) \
|
||||||
struct Word32AtomicPair##op##Operator : public Operator { \
|
struct Word32AtomicPair##op##Operator : public Operator { \
|
||||||
@ -1549,23 +1643,47 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
|
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
|
||||||
LoadRepresentation rep) {
|
AtomicLoadParameters params) {
|
||||||
#define LOAD(Type) \
|
#define CACHED_LOAD(Type) \
|
||||||
if (rep == MachineType::Type()) { \
|
if (params.representation() == MachineType::Type() && \
|
||||||
return &cache_.kWord32AtomicLoad##Type; \
|
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||||
|
return &cache_.kWord32SeqCstLoad##Type; \
|
||||||
|
}
|
||||||
|
ATOMIC_TYPE_LIST(CACHED_LOAD)
|
||||||
|
#undef CACHED_LOAD
|
||||||
|
|
||||||
|
#define LOAD(Type) \
|
||||||
|
if (params.representation() == MachineType::Type()) { \
|
||||||
|
return zone_->New<Operator1<AtomicLoadParameters>>( \
|
||||||
|
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
|
||||||
|
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
|
||||||
}
|
}
|
||||||
ATOMIC_TYPE_LIST(LOAD)
|
ATOMIC_TYPE_LIST(LOAD)
|
||||||
|
ATOMIC_TAGGED_TYPE_LIST(LOAD)
|
||||||
#undef LOAD
|
#undef LOAD
|
||||||
|
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word32AtomicStore(
|
const Operator* MachineOperatorBuilder::Word32AtomicStore(
|
||||||
MachineRepresentation rep) {
|
AtomicStoreParameters params) {
|
||||||
#define STORE(kRep) \
|
#define CACHED_STORE(kRep) \
|
||||||
if (rep == MachineRepresentation::kRep) { \
|
if (params.representation() == MachineRepresentation::kRep && \
|
||||||
return &cache_.kWord32AtomicStore##kRep; \
|
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||||
|
return &cache_.kWord32SeqCstStore##kRep; \
|
||||||
|
}
|
||||||
|
ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
|
||||||
|
#undef CACHED_STORE
|
||||||
|
|
||||||
|
#define STORE(kRep) \
|
||||||
|
if (params.representation() == MachineRepresentation::kRep) { \
|
||||||
|
return zone_->New<Operator1<AtomicStoreParameters>>( \
|
||||||
|
IrOpcode::kWord32AtomicStore, \
|
||||||
|
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
||||||
|
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, params); \
|
||||||
}
|
}
|
||||||
ATOMIC_REPRESENTATION_LIST(STORE)
|
ATOMIC_REPRESENTATION_LIST(STORE)
|
||||||
|
ATOMIC_TAGGED_REPRESENTATION_LIST(STORE)
|
||||||
#undef STORE
|
#undef STORE
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -1642,24 +1760,49 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
|
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
|
||||||
LoadRepresentation rep) {
|
AtomicLoadParameters params) {
|
||||||
#define LOAD(Type) \
|
#define CACHED_LOAD(Type) \
|
||||||
if (rep == MachineType::Type()) { \
|
if (params.representation() == MachineType::Type() && \
|
||||||
return &cache_.kWord64AtomicLoad##Type; \
|
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||||
|
return &cache_.kWord64SeqCstLoad##Type; \
|
||||||
|
}
|
||||||
|
ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
|
||||||
|
#undef CACHED_LOAD
|
||||||
|
|
||||||
|
#define LOAD(Type) \
|
||||||
|
if (params.representation() == MachineType::Type()) { \
|
||||||
|
return zone_->New<Operator1<AtomicLoadParameters>>( \
|
||||||
|
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
|
||||||
|
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
|
||||||
}
|
}
|
||||||
ATOMIC_U64_TYPE_LIST(LOAD)
|
ATOMIC_U64_TYPE_LIST(LOAD)
|
||||||
|
ATOMIC64_TAGGED_TYPE_LIST(LOAD)
|
||||||
#undef LOAD
|
#undef LOAD
|
||||||
|
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word64AtomicStore(
|
const Operator* MachineOperatorBuilder::Word64AtomicStore(
|
||||||
MachineRepresentation rep) {
|
AtomicStoreParameters params) {
|
||||||
#define STORE(kRep) \
|
#define CACHED_STORE(kRep) \
|
||||||
if (rep == MachineRepresentation::kRep) { \
|
if (params.representation() == MachineRepresentation::kRep && \
|
||||||
return &cache_.kWord64AtomicStore##kRep; \
|
params.order() == AtomicMemoryOrder::kSeqCst) { \
|
||||||
|
return &cache_.kWord64SeqCstStore##kRep; \
|
||||||
|
}
|
||||||
|
ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
|
||||||
|
#undef CACHED_STORE
|
||||||
|
|
||||||
|
#define STORE(kRep) \
|
||||||
|
if (params.representation() == MachineRepresentation::kRep) { \
|
||||||
|
return zone_->New<Operator1<AtomicStoreParameters>>( \
|
||||||
|
IrOpcode::kWord64AtomicStore, \
|
||||||
|
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
|
||||||
|
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, params); \
|
||||||
}
|
}
|
||||||
ATOMIC64_REPRESENTATION_LIST(STORE)
|
ATOMIC64_REPRESENTATION_LIST(STORE)
|
||||||
|
ATOMIC64_TAGGED_REPRESENTATION_LIST(STORE)
|
||||||
#undef STORE
|
#undef STORE
|
||||||
|
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1734,12 +1877,24 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
|
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad(
|
||||||
return &cache_.kWord32AtomicPairLoad;
|
AtomicMemoryOrder order) {
|
||||||
|
if (order == AtomicMemoryOrder::kSeqCst) {
|
||||||
|
return &cache_.kWord32SeqCstPairLoad;
|
||||||
|
}
|
||||||
|
return zone_->New<Operator1<AtomicMemoryOrder>>(
|
||||||
|
IrOpcode::kWord32AtomicPairLoad, Operator::kNoDeopt | Operator::kNoThrow,
|
||||||
|
"Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
|
const Operator* MachineOperatorBuilder::Word32AtomicPairStore(
|
||||||
return &cache_.kWord32AtomicPairStore;
|
AtomicMemoryOrder order) {
|
||||||
|
if (order == AtomicMemoryOrder::kSeqCst) {
|
||||||
|
return &cache_.kWord32SeqCstPairStore;
|
||||||
|
}
|
||||||
|
return zone_->New<Operator1<AtomicMemoryOrder>>(
|
||||||
|
IrOpcode::kWord32AtomicPairStore, Operator::kNoDeopt | Operator::kNoThrow,
|
||||||
|
"Word32AtomicPairStore", 4, 1, 1, 0, 1, 0, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
|
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
|
||||||
@ -1863,8 +2018,12 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
|
|||||||
#undef ATOMIC_TYPE_LIST
|
#undef ATOMIC_TYPE_LIST
|
||||||
#undef ATOMIC_U64_TYPE_LIST
|
#undef ATOMIC_U64_TYPE_LIST
|
||||||
#undef ATOMIC_U32_TYPE_LIST
|
#undef ATOMIC_U32_TYPE_LIST
|
||||||
|
#undef ATOMIC_TAGGED_TYPE_LIST
|
||||||
|
#undef ATOMIC64_TAGGED_TYPE_LIST
|
||||||
#undef ATOMIC_REPRESENTATION_LIST
|
#undef ATOMIC_REPRESENTATION_LIST
|
||||||
|
#undef ATOMIC_TAGGED_REPRESENTATION_LIST
|
||||||
#undef ATOMIC64_REPRESENTATION_LIST
|
#undef ATOMIC64_REPRESENTATION_LIST
|
||||||
|
#undef ATOMIC64_TAGGED_REPRESENTATION_LIST
|
||||||
#undef SIMD_LANE_OP_LIST
|
#undef SIMD_LANE_OP_LIST
|
||||||
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
|
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
|
||||||
#undef LOAD_TRANSFORM_LIST
|
#undef LOAD_TRANSFORM_LIST
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include "src/base/compiler-specific.h"
|
#include "src/base/compiler-specific.h"
|
||||||
#include "src/base/enum-set.h"
|
#include "src/base/enum-set.h"
|
||||||
#include "src/base/flags.h"
|
#include "src/base/flags.h"
|
||||||
|
#include "src/codegen/atomic-memory-order.h"
|
||||||
#include "src/codegen/machine-type.h"
|
#include "src/codegen/machine-type.h"
|
||||||
#include "src/compiler/globals.h"
|
#include "src/compiler/globals.h"
|
||||||
#include "src/compiler/write-barrier-kind.h"
|
#include "src/compiler/write-barrier-kind.h"
|
||||||
@ -50,6 +51,32 @@ using LoadRepresentation = MachineType;
|
|||||||
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
|
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
|
||||||
V8_WARN_UNUSED_RESULT;
|
V8_WARN_UNUSED_RESULT;
|
||||||
|
|
||||||
|
// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
|
||||||
|
// order.
|
||||||
|
class AtomicLoadParameters final {
|
||||||
|
public:
|
||||||
|
AtomicLoadParameters(LoadRepresentation representation,
|
||||||
|
AtomicMemoryOrder order)
|
||||||
|
: representation_(representation), order_(order) {}
|
||||||
|
|
||||||
|
LoadRepresentation representation() const { return representation_; }
|
||||||
|
AtomicMemoryOrder order() const { return order_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
LoadRepresentation representation_;
|
||||||
|
AtomicMemoryOrder order_;
|
||||||
|
};
|
||||||
|
|
||||||
|
V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
|
||||||
|
bool operator!=(AtomicLoadParameters, AtomicLoadParameters);
|
||||||
|
|
||||||
|
size_t hash_value(AtomicLoadParameters);
|
||||||
|
|
||||||
|
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
|
||||||
|
|
||||||
|
V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
|
||||||
|
V8_WARN_UNUSED_RESULT;
|
||||||
|
|
||||||
enum class MemoryAccessKind {
|
enum class MemoryAccessKind {
|
||||||
kNormal,
|
kNormal,
|
||||||
kUnaligned,
|
kUnaligned,
|
||||||
@ -131,6 +158,43 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
|
|||||||
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
|
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
|
||||||
Operator const*) V8_WARN_UNUSED_RESULT;
|
Operator const*) V8_WARN_UNUSED_RESULT;
|
||||||
|
|
||||||
|
// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
|
||||||
|
class AtomicStoreParameters final {
|
||||||
|
public:
|
||||||
|
AtomicStoreParameters(MachineRepresentation representation,
|
||||||
|
WriteBarrierKind write_barrier_kind,
|
||||||
|
AtomicMemoryOrder order)
|
||||||
|
: store_representation_(representation, write_barrier_kind),
|
||||||
|
order_(order) {}
|
||||||
|
|
||||||
|
MachineRepresentation representation() const {
|
||||||
|
return store_representation_.representation();
|
||||||
|
}
|
||||||
|
WriteBarrierKind write_barrier_kind() const {
|
||||||
|
return store_representation_.write_barrier_kind();
|
||||||
|
}
|
||||||
|
AtomicMemoryOrder order() const { return order_; }
|
||||||
|
|
||||||
|
StoreRepresentation store_representation() const {
|
||||||
|
return store_representation_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
StoreRepresentation store_representation_;
|
||||||
|
AtomicMemoryOrder order_;
|
||||||
|
};
|
||||||
|
|
||||||
|
V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
|
||||||
|
bool operator!=(AtomicStoreParameters, AtomicStoreParameters);
|
||||||
|
|
||||||
|
size_t hash_value(AtomicStoreParameters);
|
||||||
|
|
||||||
|
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
|
||||||
|
AtomicStoreParameters);
|
||||||
|
|
||||||
|
V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
|
||||||
|
Operator const*) V8_WARN_UNUSED_RESULT;
|
||||||
|
|
||||||
// An UnalignedStore needs a MachineType.
|
// An UnalignedStore needs a MachineType.
|
||||||
using UnalignedStoreRepresentation = MachineRepresentation;
|
using UnalignedStoreRepresentation = MachineRepresentation;
|
||||||
|
|
||||||
@ -173,9 +237,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
|
|||||||
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
|
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
|
||||||
Operator const* op) V8_WARN_UNUSED_RESULT;
|
Operator const* op) V8_WARN_UNUSED_RESULT;
|
||||||
|
|
||||||
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
|
|
||||||
V8_WARN_UNUSED_RESULT;
|
|
||||||
|
|
||||||
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
|
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
|
||||||
|
|
||||||
class S128ImmediateParameter {
|
class S128ImmediateParameter {
|
||||||
@ -895,13 +956,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
|||||||
const Operator* MemBarrier();
|
const Operator* MemBarrier();
|
||||||
|
|
||||||
// atomic-load [base + index]
|
// atomic-load [base + index]
|
||||||
const Operator* Word32AtomicLoad(LoadRepresentation rep);
|
const Operator* Word32AtomicLoad(AtomicLoadParameters params);
|
||||||
// atomic-load [base + index]
|
// atomic-load [base + index]
|
||||||
const Operator* Word64AtomicLoad(LoadRepresentation rep);
|
const Operator* Word64AtomicLoad(AtomicLoadParameters params);
|
||||||
// atomic-store [base + index], value
|
// atomic-store [base + index], value
|
||||||
const Operator* Word32AtomicStore(MachineRepresentation rep);
|
const Operator* Word32AtomicStore(AtomicStoreParameters params);
|
||||||
// atomic-store [base + index], value
|
// atomic-store [base + index], value
|
||||||
const Operator* Word64AtomicStore(MachineRepresentation rep);
|
const Operator* Word64AtomicStore(AtomicStoreParameters params);
|
||||||
// atomic-exchange [base + index], value
|
// atomic-exchange [base + index], value
|
||||||
const Operator* Word32AtomicExchange(MachineType type);
|
const Operator* Word32AtomicExchange(MachineType type);
|
||||||
// atomic-exchange [base + index], value
|
// atomic-exchange [base + index], value
|
||||||
@ -931,9 +992,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
|||||||
// atomic-xor [base + index], value
|
// atomic-xor [base + index], value
|
||||||
const Operator* Word64AtomicXor(MachineType type);
|
const Operator* Word64AtomicXor(MachineType type);
|
||||||
// atomic-pair-load [base + index]
|
// atomic-pair-load [base + index]
|
||||||
const Operator* Word32AtomicPairLoad();
|
const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
|
||||||
// atomic-pair-sub [base + index], value_high, value-low
|
// atomic-pair-sub [base + index], value_high, value-low
|
||||||
const Operator* Word32AtomicPairStore();
|
const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
|
||||||
// atomic-pair-add [base + index], value_high, value_low
|
// atomic-pair-add [base + index], value_high, value_low
|
||||||
const Operator* Word32AtomicPairAdd();
|
const Operator* Word32AtomicPairAdd();
|
||||||
// atomic-pair-sub [base + index], value_high, value-low
|
// atomic-pair-sub [base + index], value_high, value-low
|
||||||
|
@ -239,20 +239,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Atomic memory operations.
|
// Atomic memory operations.
|
||||||
Node* AtomicLoad(MachineType type, Node* base, Node* index) {
|
Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
|
||||||
DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
|
DCHECK_NE(rep.representation().representation(),
|
||||||
return AddNode(machine()->Word32AtomicLoad(type), base, index);
|
MachineRepresentation::kWord64);
|
||||||
|
return AddNode(machine()->Word32AtomicLoad(rep), base, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* AtomicLoad64(Node* base, Node* index) {
|
Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
|
||||||
if (machine()->Is64()) {
|
if (machine()->Is64()) {
|
||||||
// This uses Uint64() intentionally: AtomicLoad is not implemented for
|
// This uses Uint64() intentionally: AtomicLoad is not implemented for
|
||||||
// Int64(), which is fine because the machine instruction only cares
|
// Int64(), which is fine because the machine instruction only cares
|
||||||
// about words.
|
// about words.
|
||||||
return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
|
return AddNode(machine()->Word64AtomicLoad(rep), base, index);
|
||||||
index);
|
|
||||||
} else {
|
} else {
|
||||||
return AddNode(machine()->Word32AtomicPairLoad(), base, index);
|
return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,22 +262,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
|||||||
#define VALUE_HALVES value, value_high
|
#define VALUE_HALVES value, value_high
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
|
Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
|
||||||
Node* value) {
|
Node* value) {
|
||||||
DCHECK(!IsMapOffsetConstantMinusTag(index));
|
DCHECK(!IsMapOffsetConstantMinusTag(index));
|
||||||
DCHECK_NE(rep, MachineRepresentation::kWord64);
|
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
|
||||||
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
|
return AddNode(machine()->Word32AtomicStore(params), base, index, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* AtomicStore64(Node* base, Node* index, Node* value, Node* value_high) {
|
Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
|
||||||
|
Node* value, Node* value_high) {
|
||||||
if (machine()->Is64()) {
|
if (machine()->Is64()) {
|
||||||
DCHECK_NULL(value_high);
|
DCHECK_NULL(value_high);
|
||||||
return AddNode(
|
return AddNode(machine()->Word64AtomicStore(params), base, index, value);
|
||||||
machine()->Word64AtomicStore(MachineRepresentation::kWord64), base,
|
|
||||||
index, value);
|
|
||||||
} else {
|
} else {
|
||||||
return AddNode(machine()->Word32AtomicPairStore(), base, index,
|
DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
|
||||||
VALUE_HALVES);
|
params.representation() != MachineRepresentation::kTaggedSigned &&
|
||||||
|
params.representation() != MachineRepresentation::kTagged);
|
||||||
|
return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
|
||||||
|
index, VALUE_HALVES);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5175,16 +5175,26 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
|||||||
const Operator* (MachineOperatorBuilder::*)(MachineType);
|
const Operator* (MachineOperatorBuilder::*)(MachineType);
|
||||||
using OperatorByRep =
|
using OperatorByRep =
|
||||||
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
|
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
|
||||||
|
using OperatorByAtomicLoadRep =
|
||||||
|
const Operator* (MachineOperatorBuilder::*)(AtomicLoadParameters);
|
||||||
|
using OperatorByAtomicStoreRep =
|
||||||
|
const Operator* (MachineOperatorBuilder::*)(AtomicStoreParameters);
|
||||||
|
|
||||||
const Type type;
|
const Type type;
|
||||||
const MachineType machine_type;
|
const MachineType machine_type;
|
||||||
const OperatorByType operator_by_type = nullptr;
|
const OperatorByType operator_by_type = nullptr;
|
||||||
const OperatorByRep operator_by_rep = nullptr;
|
const OperatorByRep operator_by_rep = nullptr;
|
||||||
|
const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
|
||||||
|
const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
|
||||||
|
|
||||||
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
|
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
|
||||||
: type(t), machine_type(m), operator_by_type(o) {}
|
: type(t), machine_type(m), operator_by_type(o) {}
|
||||||
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
|
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
|
||||||
: type(t), machine_type(m), operator_by_rep(o) {}
|
: type(t), machine_type(m), operator_by_rep(o) {}
|
||||||
|
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
|
||||||
|
: type(t), machine_type(m), operator_by_atomic_load_params(o) {}
|
||||||
|
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
|
||||||
|
: type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
|
||||||
|
|
||||||
// Constexpr, hence just a table lookup in most compilers.
|
// Constexpr, hence just a table lookup in most compilers.
|
||||||
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
|
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
|
||||||
@ -5293,11 +5303,21 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
|
|||||||
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
|
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
|
||||||
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
|
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
|
||||||
if (info.type != AtomicOpInfo::kSpecial) {
|
if (info.type != AtomicOpInfo::kSpecial) {
|
||||||
const Operator* op =
|
const Operator* op;
|
||||||
info.operator_by_type
|
if (info.operator_by_type) {
|
||||||
? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
|
op = (mcgraph()->machine()->*info.operator_by_type)(info.machine_type);
|
||||||
: (mcgraph()->machine()->*info.operator_by_rep)(
|
} else if (info.operator_by_rep) {
|
||||||
info.machine_type.representation());
|
op = (mcgraph()->machine()->*info.operator_by_rep)(
|
||||||
|
info.machine_type.representation());
|
||||||
|
} else if (info.operator_by_atomic_load_params) {
|
||||||
|
op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
|
||||||
|
AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
|
||||||
|
} else {
|
||||||
|
op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
|
||||||
|
AtomicStoreParameters(info.machine_type.representation(),
|
||||||
|
WriteBarrierKind::kNoWriteBarrier,
|
||||||
|
AtomicMemoryOrder::kSeqCst));
|
||||||
|
}
|
||||||
|
|
||||||
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
|
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
|
||||||
int num_actual_inputs = info.type;
|
int num_actual_inputs = info.type;
|
||||||
|
@ -89,6 +89,7 @@ v8_source_set("cctest_sources") {
|
|||||||
"compiler/function-tester.cc",
|
"compiler/function-tester.cc",
|
||||||
"compiler/function-tester.h",
|
"compiler/function-tester.h",
|
||||||
"compiler/node-observer-tester.h",
|
"compiler/node-observer-tester.h",
|
||||||
|
"compiler/test-atomic-load-store-codegen.cc",
|
||||||
"compiler/test-basic-block-profiler.cc",
|
"compiler/test-basic-block-profiler.cc",
|
||||||
"compiler/test-branch-combine.cc",
|
"compiler/test-branch-combine.cc",
|
||||||
"compiler/test-calls-with-arraylike-or-spread.cc",
|
"compiler/test-calls-with-arraylike-or-spread.cc",
|
||||||
|
@ -623,6 +623,7 @@
|
|||||||
'codegen-tester/*': [SKIP],
|
'codegen-tester/*': [SKIP],
|
||||||
'test-accessor-assembler/*': [SKIP],
|
'test-accessor-assembler/*': [SKIP],
|
||||||
'test-assembler-*': [SKIP],
|
'test-assembler-*': [SKIP],
|
||||||
|
'test-atomic-load-store-codegen/*': [SKIP],
|
||||||
'test-basic-block-profiler/*': [SKIP],
|
'test-basic-block-profiler/*': [SKIP],
|
||||||
'test-branch-combine/*': [SKIP],
|
'test-branch-combine/*': [SKIP],
|
||||||
'test-calls-with-arraylike-or-spread/*': [SKIP],
|
'test-calls-with-arraylike-or-spread/*': [SKIP],
|
||||||
|
398
test/cctest/compiler/test-atomic-load-store-codegen.cc
Normal file
398
test/cctest/compiler/test-atomic-load-store-codegen.cc
Normal file
@ -0,0 +1,398 @@
|
|||||||
|
// Copyright 2021 the V8 project authors. All rights reserved. Use of this
|
||||||
|
// source code is governed by a BSD-style license that can be found in the
|
||||||
|
// LICENSE file.
|
||||||
|
|
||||||
|
#include "src/base/bits.h"
|
||||||
|
#include "src/objects/objects-inl.h"
|
||||||
|
#include "test/cctest/cctest.h"
|
||||||
|
#include "test/cctest/compiler/codegen-tester.h"
|
||||||
|
#include "test/cctest/compiler/value-helper.h"
|
||||||
|
|
||||||
|
namespace v8 {
|
||||||
|
namespace internal {
|
||||||
|
namespace compiler {
|
||||||
|
|
||||||
|
#if V8_TARGET_LITTLE_ENDIAN
|
||||||
|
#define LSB(addr, bytes) addr
|
||||||
|
#elif V8_TARGET_BIG_ENDIAN
|
||||||
|
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
|
||||||
|
#else
|
||||||
|
#error "Unknown Architecture"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define TEST_ATOMIC_LOAD_INTEGER(ctype, itype, mach_type, order) \
|
||||||
|
do { \
|
||||||
|
ctype buffer[1]; \
|
||||||
|
\
|
||||||
|
RawMachineAssemblerTester<ctype> m; \
|
||||||
|
Node* base = m.PointerConstant(&buffer[0]); \
|
||||||
|
Node* index = m.Int32Constant(0); \
|
||||||
|
AtomicLoadParameters params(mach_type, order); \
|
||||||
|
if (mach_type.MemSize() == 8) { \
|
||||||
|
m.Return(m.AtomicLoad64(params, base, index)); \
|
||||||
|
} else { \
|
||||||
|
m.Return(m.AtomicLoad(params, base, index)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
FOR_INPUTS(ctype, itype, i) { \
|
||||||
|
buffer[0] = i; \
|
||||||
|
CHECK_EQ(i, m.Call()); \
|
||||||
|
} \
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
TEST(AcquireLoadInteger) {
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(int8_t, int8, MachineType::Int8(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint8_t, uint8, MachineType::Uint8(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(int16_t, int16, MachineType::Int16(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint16_t, uint16, MachineType::Uint16(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(int32_t, int32, MachineType::Int32(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint32_t, uint32, MachineType::Uint32(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
#if V8_TARGET_ARCH_64_BIT
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint64_t, uint64, MachineType::Uint64(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SeqCstLoadInteger) {
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(int8_t, int8, MachineType::Int8(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint8_t, uint8, MachineType::Uint8(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(int16_t, int16, MachineType::Int16(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint16_t, uint16, MachineType::Uint16(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(int32_t, int32, MachineType::Int32(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint32_t, uint32, MachineType::Uint32(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
#if V8_TARGET_ARCH_64_BIT
|
||||||
|
TEST_ATOMIC_LOAD_INTEGER(uint64_t, uint64, MachineType::Uint64(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
|
||||||
|
template <typename CType>
|
||||||
|
void CheckEq(CType in_value, CType out_value) {
|
||||||
|
CHECK_EQ(in_value, out_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
|
// Specializations for checking the result of compressing store.
|
||||||
|
template <>
|
||||||
|
void CheckEq<Object>(Object in_value, Object out_value) {
|
||||||
|
// Compare only lower 32-bits of the value because tagged load/stores are
|
||||||
|
// 32-bit operations anyway.
|
||||||
|
CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
|
||||||
|
static_cast<Tagged_t>(out_value.ptr()));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
|
||||||
|
return CheckEq<Object>(in_value, out_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
void CheckEq<Smi>(Smi in_value, Smi out_value) {
|
||||||
|
return CheckEq<Object>(in_value, out_value);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
template <typename TaggedT>
|
||||||
|
void InitBuffer(TaggedT* buffer, size_t length, MachineType type) {
|
||||||
|
const size_t kBufferSize = sizeof(TaggedT) * length;
|
||||||
|
|
||||||
|
// Tagged field loads require values to be properly tagged because of
|
||||||
|
// pointer decompression that may be happenning during load.
|
||||||
|
Isolate* isolate = CcTest::InitIsolateOnce();
|
||||||
|
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
|
||||||
|
if (type.IsTaggedSigned()) {
|
||||||
|
for (size_t i = 0; i < length; i++) {
|
||||||
|
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
|
||||||
|
if (!type.IsTaggedPointer()) {
|
||||||
|
// Also add some Smis if we are checking AnyTagged case.
|
||||||
|
for (size_t i = 0; i < length / 2; i++) {
|
||||||
|
smi_view[i] =
|
||||||
|
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename TaggedT>
|
||||||
|
void AtomicLoadTagged(MachineType type, AtomicMemoryOrder order) {
|
||||||
|
const int kNumElems = 16;
|
||||||
|
TaggedT buffer[kNumElems];
|
||||||
|
|
||||||
|
InitBuffer(buffer, kNumElems, type);
|
||||||
|
|
||||||
|
for (int i = 0; i < kNumElems; i++) {
|
||||||
|
BufferedRawMachineAssemblerTester<TaggedT> m;
|
||||||
|
TaggedT* base_pointer = &buffer[0];
|
||||||
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
|
base_pointer = reinterpret_cast<TaggedT*>(LSB(base_pointer, kTaggedSize));
|
||||||
|
}
|
||||||
|
Node* base = m.PointerConstant(base_pointer);
|
||||||
|
Node* index = m.Int32Constant(i * sizeof(buffer[0]));
|
||||||
|
AtomicLoadParameters params(type, order);
|
||||||
|
Node* load;
|
||||||
|
if (kTaggedSize == 8) {
|
||||||
|
load = m.AtomicLoad64(params, base, index);
|
||||||
|
} else {
|
||||||
|
load = m.AtomicLoad(params, base, index);
|
||||||
|
}
|
||||||
|
m.Return(load);
|
||||||
|
CheckEq<TaggedT>(buffer[i], m.Call());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(AcquireLoadTagged) {
|
||||||
|
AtomicLoadTagged<Smi>(MachineType::TaggedSigned(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
AtomicLoadTagged<HeapObject>(MachineType::TaggedPointer(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
AtomicLoadTagged<Object>(MachineType::AnyTagged(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SeqCstLoadTagged) {
|
||||||
|
AtomicLoadTagged<Smi>(MachineType::TaggedSigned(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
AtomicLoadTagged<HeapObject>(MachineType::TaggedPointer(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
AtomicLoadTagged<Object>(MachineType::AnyTagged(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TEST_ATOMIC_STORE_INTEGER(ctype, itype, mach_type, order) \
|
||||||
|
do { \
|
||||||
|
ctype buffer[1]; \
|
||||||
|
buffer[0] = static_cast<ctype>(-1); \
|
||||||
|
\
|
||||||
|
BufferedRawMachineAssemblerTester<int32_t> m(mach_type); \
|
||||||
|
Node* value = m.Parameter(0); \
|
||||||
|
Node* base = m.PointerConstant(&buffer[0]); \
|
||||||
|
Node* index = m.Int32Constant(0); \
|
||||||
|
AtomicStoreParameters params(mach_type.representation(), kNoWriteBarrier, \
|
||||||
|
order); \
|
||||||
|
if (mach_type.MemSize() == 8) { \
|
||||||
|
m.AtomicStore64(params, base, index, value, nullptr); \
|
||||||
|
} else { \
|
||||||
|
m.AtomicStore(params, base, index, value); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
int32_t OK = 0x29000; \
|
||||||
|
m.Return(m.Int32Constant(OK)); \
|
||||||
|
\
|
||||||
|
FOR_INPUTS(ctype, itype, i) { \
|
||||||
|
CHECK_EQ(OK, m.Call(i)); \
|
||||||
|
CHECK_EQ(i, buffer[0]); \
|
||||||
|
} \
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
TEST(ReleaseStoreInteger) {
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(int8_t, int8, MachineType::Int8(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint8_t, uint8, MachineType::Uint8(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(int16_t, int16, MachineType::Int16(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint16_t, uint16, MachineType::Uint16(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(int32_t, int32, MachineType::Int32(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint32_t, uint32, MachineType::Uint32(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
#if V8_TARGET_ARCH_64_BIT
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint64_t, uint64, MachineType::Uint64(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SeqCstStoreInteger) {
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(int8_t, int8, MachineType::Int8(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint8_t, uint8, MachineType::Uint8(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(int16_t, int16, MachineType::Int16(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint16_t, uint16, MachineType::Uint16(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(int32_t, int32, MachineType::Int32(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint32_t, uint32, MachineType::Uint32(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
#if V8_TARGET_ARCH_64_BIT
|
||||||
|
TEST_ATOMIC_STORE_INTEGER(uint64_t, uint64, MachineType::Uint64(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
template <typename TaggedT>
|
||||||
|
void AtomicStoreTagged(MachineType type, AtomicMemoryOrder order) {
|
||||||
|
// This tests that tagged values are correctly transferred by atomic loads and
|
||||||
|
// stores from in_buffer to out_buffer. For each particular element in
|
||||||
|
// in_buffer, it is copied to a different index in out_buffer, and all other
|
||||||
|
// indices are zapped, to test instructions of the correct width are emitted.
|
||||||
|
|
||||||
|
const int kNumElems = 16;
|
||||||
|
TaggedT in_buffer[kNumElems];
|
||||||
|
TaggedT out_buffer[kNumElems];
|
||||||
|
uintptr_t zap_data[] = {kZapValue, kZapValue};
|
||||||
|
TaggedT zap_value;
|
||||||
|
|
||||||
|
STATIC_ASSERT(sizeof(TaggedT) <= sizeof(zap_data));
|
||||||
|
MemCopy(&zap_value, &zap_data, sizeof(TaggedT));
|
||||||
|
InitBuffer(in_buffer, kNumElems, type);
|
||||||
|
|
||||||
|
#ifdef V8_TARGET_BIG_ENDIAN
|
||||||
|
int offset = sizeof(TaggedT) - ElementSizeInBytes(type.representation());
|
||||||
|
#else
|
||||||
|
int offset = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (int32_t x = 0; x < kNumElems; x++) {
|
||||||
|
int32_t y = kNumElems - x - 1;
|
||||||
|
|
||||||
|
RawMachineAssemblerTester<int32_t> m;
|
||||||
|
int32_t OK = 0x29000 + x;
|
||||||
|
Node* in_base = m.PointerConstant(in_buffer);
|
||||||
|
Node* in_index = m.IntPtrConstant(x * sizeof(TaggedT) + offset);
|
||||||
|
Node* out_base = m.PointerConstant(out_buffer);
|
||||||
|
Node* out_index = m.IntPtrConstant(y * sizeof(TaggedT) + offset);
|
||||||
|
|
||||||
|
Node* load;
|
||||||
|
AtomicLoadParameters load_params(type, order);
|
||||||
|
AtomicStoreParameters store_params(type.representation(), kNoWriteBarrier,
|
||||||
|
order);
|
||||||
|
if (kTaggedSize == 4) {
|
||||||
|
load = m.AtomicLoad(load_params, in_base, in_index);
|
||||||
|
m.AtomicStore(store_params, out_base, out_index, load);
|
||||||
|
} else {
|
||||||
|
DCHECK(m.machine()->Is64());
|
||||||
|
load = m.AtomicLoad64(load_params, in_base, in_index);
|
||||||
|
m.AtomicStore64(store_params, out_base, out_index, load, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Return(m.Int32Constant(OK));
|
||||||
|
|
||||||
|
for (int32_t z = 0; z < kNumElems; z++) {
|
||||||
|
out_buffer[z] = zap_value;
|
||||||
|
}
|
||||||
|
CHECK_NE(in_buffer[x], out_buffer[y]);
|
||||||
|
CHECK_EQ(OK, m.Call());
|
||||||
|
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
|
||||||
|
CheckEq<TaggedT>(in_buffer[x], out_buffer[y]);
|
||||||
|
for (int32_t z = 0; z < kNumElems; z++) {
|
||||||
|
if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(ReleaseStoreTagged) {
|
||||||
|
AtomicStoreTagged<Smi>(MachineType::TaggedSigned(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
AtomicStoreTagged<HeapObject>(MachineType::TaggedPointer(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
AtomicStoreTagged<Object>(MachineType::AnyTagged(),
|
||||||
|
AtomicMemoryOrder::kAcqRel);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SeqCstStoreTagged) {
|
||||||
|
AtomicStoreTagged<Smi>(MachineType::TaggedSigned(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
AtomicStoreTagged<HeapObject>(MachineType::TaggedPointer(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
AtomicStoreTagged<Object>(MachineType::AnyTagged(),
|
||||||
|
AtomicMemoryOrder::kSeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if V8_TARGET_ARCH_32_BIT
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
void TestAtomicPairLoadInteger(AtomicMemoryOrder order) {
|
||||||
|
uint64_t buffer[1];
|
||||||
|
uint32_t high;
|
||||||
|
uint32_t low;
|
||||||
|
|
||||||
|
BufferedRawMachineAssemblerTester<int32_t> m;
|
||||||
|
Node* base = m.PointerConstant(&buffer[0]);
|
||||||
|
Node* index = m.Int32Constant(0);
|
||||||
|
|
||||||
|
Node* pair_load = m.AtomicLoad64(
|
||||||
|
AtomicLoadParameters(MachineType::Uint64(), order), base, index);
|
||||||
|
m.StoreToPointer(&low, MachineRepresentation::kWord32,
|
||||||
|
m.Projection(0, pair_load));
|
||||||
|
m.StoreToPointer(&high, MachineRepresentation::kWord32,
|
||||||
|
m.Projection(1, pair_load));
|
||||||
|
|
||||||
|
int32_t OK = 0x29000;
|
||||||
|
m.Return(m.Int32Constant(OK));
|
||||||
|
|
||||||
|
FOR_UINT64_INPUTS(i) {
|
||||||
|
buffer[0] = i;
|
||||||
|
CHECK_EQ(OK, m.Call());
|
||||||
|
CHECK_EQ(i, make_uint64(high, low));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(AcquirePairLoadInteger) {
|
||||||
|
TestAtomicPairLoadInteger(AtomicMemoryOrder::kAcqRel);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SeqCstPairLoadInteger) {
|
||||||
|
TestAtomicPairLoadInteger(AtomicMemoryOrder::kSeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
void TestAtomicPairStoreInteger(AtomicMemoryOrder order) {
|
||||||
|
uint64_t buffer[1];
|
||||||
|
|
||||||
|
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
|
||||||
|
MachineType::Uint32());
|
||||||
|
Node* base = m.PointerConstant(&buffer[0]);
|
||||||
|
Node* index = m.Int32Constant(0);
|
||||||
|
|
||||||
|
m.AtomicStore64(AtomicStoreParameters(MachineRepresentation::kWord64,
|
||||||
|
kNoWriteBarrier, order),
|
||||||
|
base, index, m.Parameter(0), m.Parameter(1));
|
||||||
|
|
||||||
|
int32_t OK = 0x29000;
|
||||||
|
m.Return(m.Int32Constant(OK));
|
||||||
|
|
||||||
|
FOR_UINT64_INPUTS(i) {
|
||||||
|
CHECK_EQ(OK, m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
|
||||||
|
static_cast<uint32_t>(i >> 32)));
|
||||||
|
CHECK_EQ(i, buffer[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(ReleasePairStoreInteger) {
|
||||||
|
TestAtomicPairStoreInteger(AtomicMemoryOrder::kAcqRel);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SeqCstPairStoreInteger) {
|
||||||
|
TestAtomicPairStoreInteger(AtomicMemoryOrder::kSeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // V8_TARGET_ARCH_32_BIT
|
||||||
|
|
||||||
|
} // namespace compiler
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace v8
|
Loading…
Reference in New Issue
Block a user