Revert "[compiler] Support acq/rel accesses and atomic accesses on tagged"

This reverts commit faf2208a0b.

Reason for revert: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20-%20arm64%20-%20sim%20-%20pointer%20compression/10870/overview

Original change's description:
> [compiler] Support acq/rel accesses and atomic accesses on tagged
>
> This CL adds an AtomicMemoryOrder parameter to the various atomic load
> and store operators. Currently only acquire release (kAcqRel) and
> sequentially consistent (kSeqCst) orders are supported.
>
> Additionally, atomic loads and stores are extended to work with tagged
> values.
>
> This CL is a pre-requisite for supporting atomic accesses in Torque,
> which is in turn a pre-requisite for prototyping shared strings.
>
> Bug: v8:11995
> Change-Id: Ic77d2640e2dc7e5581b1211a054c93210c219355
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3101765
> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> Reviewed-by: Zhi An Ng <zhin@chromium.org>
> Commit-Queue: Shu-yu Guo <syg@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#76393}

Bug: v8:11995
Change-Id: Id9936672f9e96c509b1cdf866de1ac5303996945
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3107229
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/main@{#76394}
This commit is contained in:
Nico Hartmann 2021-08-19 15:48:36 +00:00 committed by V8 LUCI CQ
parent faf2208a0b
commit 746d62d4b9
35 changed files with 882 additions and 1955 deletions

View File

@ -2449,7 +2449,6 @@ v8_header_set("v8_internal_headers") {
"src/codegen/assembler-arch.h",
"src/codegen/assembler-inl.h",
"src/codegen/assembler.h",
"src/codegen/atomic-memory-order.h",
"src/codegen/bailout-reason.h",
"src/codegen/callable.h",
"src/codegen/code-comments.h",

View File

@ -204,28 +204,26 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
Return(SmiFromInt32(AtomicLoad<Int8T>(AtomicMemoryOrder::kSeqCst,
backing_store, index_word)));
Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
BIND(&u8);
Return(SmiFromInt32(AtomicLoad<Uint8T>(AtomicMemoryOrder::kSeqCst,
backing_store, index_word)));
Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
BIND(&i16);
Return(SmiFromInt32(AtomicLoad<Int16T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
Return(
SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
BIND(&u16);
Return(SmiFromInt32(AtomicLoad<Uint16T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
Return(
SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
BIND(&i32);
Return(ChangeInt32ToTagged(AtomicLoad<Int32T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
Return(ChangeInt32ToTagged(
AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
Return(ChangeUint32ToTagged(
AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64);
Goto(&u64);
@ -237,12 +235,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
}
#else
BIND(&i64);
Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
Return(BigIntFromSigned64(
AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
Return(BigIntFromUnsigned64(
AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
#endif
// This shouldn't happen, we've already validated the type.
@ -309,18 +307,18 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&u8);
AtomicStore(MachineRepresentation::kWord8, AtomicMemoryOrder::kSeqCst,
backing_store, index_word, value_word32);
AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
value_word32);
Return(value_integer);
BIND(&u16);
AtomicStore(MachineRepresentation::kWord16, AtomicMemoryOrder::kSeqCst,
backing_store, WordShl(index_word, 1), value_word32);
AtomicStore(MachineRepresentation::kWord16, backing_store,
WordShl(index_word, 1), value_word32);
Return(value_integer);
BIND(&u32);
AtomicStore(MachineRepresentation::kWord32, AtomicMemoryOrder::kSeqCst,
backing_store, WordShl(index_word, 2), value_word32);
AtomicStore(MachineRepresentation::kWord32, backing_store,
WordShl(index_word, 2), value_word32);
Return(value_integer);
BIND(&u64);
@ -342,8 +340,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
TVARIABLE(UintPtrT, var_high);
BigIntToRawBytes(value_bigint, &var_low, &var_high);
TNode<UintPtrT> high = Is64() ? TNode<UintPtrT>() : var_high.value();
AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
WordShl(index_word, 3), var_low.value(), high);
AtomicStore64(backing_store, WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint);
#endif

View File

@ -2919,18 +2919,6 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
void TurboAssembler::AtomicStoreTaggedField(const Register& value,
const Register& dst_base,
const Register& dst_index,
const Register& temp) {
Add(temp, dst_base, dst_index);
if (COMPRESS_POINTERS_BOOL) {
Stlr(value.W(), temp);
} else {
Stlr(value, temp);
}
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
@ -2962,40 +2950,6 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
Add(destination, kPtrComprCageBaseRegister, destination);
}
void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
const Register& base,
const Register& index,
const Register& temp) {
ASM_CODE_COMMENT(this);
Add(temp, base, index);
Ldar(destination.W(), temp);
if (FLAG_debug_code) {
// Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
Add(destination, destination,
((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
}
}
void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination,
const Register& base,
const Register& index,
const Register& temp) {
ASM_CODE_COMMENT(this);
Add(temp, base, index);
Ldar(destination.W(), temp);
Add(destination, kPtrComprCageBaseRegister, destination);
}
void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination,
const Register& base,
const Register& index,
const Register& temp) {
ASM_CODE_COMMENT(this);
Add(temp, base, index);
Ldar(destination.W(), temp);
Add(destination, kPtrComprCageBaseRegister, destination);
}
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
ASM_CODE_COMMENT(this);

View File

@ -1371,9 +1371,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand);
void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
const Register& dst_index, const Register& temp);
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
@ -1383,17 +1380,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
void AtomicDecompressTaggedSigned(const Register& destination,
const Register& base, const Register& index,
const Register& temp);
void AtomicDecompressTaggedPointer(const Register& destination,
const Register& base,
const Register& index,
const Register& temp);
void AtomicDecompressAnyTagged(const Register& destination,
const Register& base, const Register& index,
const Register& temp);
// Restore FP and LR from the values stored in the current frame. This will
// authenticate the LR when pointer authentication is enabled.
void RestoreFPAndLR();

View File

@ -1,35 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
#define V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
#include <ostream>
#include "src/base/logging.h"
namespace v8 {
namespace internal {
// Atomic memory orders supported by the compiler.
enum class AtomicMemoryOrder : uint8_t { kAcqRel, kSeqCst };
inline size_t hash_value(AtomicMemoryOrder order) {
return static_cast<uint8_t>(order);
}
inline std::ostream& operator<<(std::ostream& os, AtomicMemoryOrder order) {
switch (order) {
case AtomicMemoryOrder::kAcqRel:
return os << "kAcqRel";
case AtomicMemoryOrder::kSeqCst:
return os << "kSeqCst";
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_

View File

@ -688,14 +688,6 @@ void Assembler::movq(XMMRegister dst, Operand src) {
emit_operand(dst, src);
}
void Assembler::movq(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0xD6);
emit_operand(src, dst);
}
void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.

View File

@ -535,7 +535,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movzx_w(Register dst, Operand src);
void movq(XMMRegister dst, Operand src);
void movq(Operand dst, XMMRegister src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {

View File

@ -294,17 +294,6 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
}
}
void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
Register value) {
if (COMPRESS_POINTERS_BOOL) {
movl(kScratchRegister, value);
xchgl(kScratchRegister, dst_field_operand);
} else {
movq(kScratchRegister, value);
xchgq(kScratchRegister, dst_field_operand);
}
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
ASM_CODE_COMMENT(this);

View File

@ -667,7 +667,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
void AtomicStoreTaggedField(Operand dst_field_operand, Register value);
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);

View File

@ -329,11 +329,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order) \
do { \
__ dmb(ISH); \
__ asm_instr(i.InputRegister(0), i.InputOffset(1)); \
if (order == AtomicMemoryOrder::kSeqCst) __ dmb(ISH); \
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
do { \
__ dmb(ISH); \
__ asm_instr(i.InputRegister(2), \
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
__ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
@ -926,24 +927,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode;
if (arch_opcode == kArchStoreWithWriteBarrier) {
mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
} else {
mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
}
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Operand offset(0);
if (arch_opcode == kArchAtomicStoreWithWriteBarrier) {
__ dmb(ISH);
}
if (addressing_mode == kMode_Offset_RI) {
int32_t immediate = i.InputInt32(1);
offset = Operand(immediate);
@ -954,12 +946,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset = Operand(reg);
__ str(value, MemOperand(object, reg));
}
if (arch_opcode == kArchAtomicStoreWithWriteBarrier &&
AtomicMemoryOrderField::decode(instr->opcode()) ==
AtomicMemoryOrder::kSeqCst) {
__ dmb(ISH);
}
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
@ -3328,16 +3314,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(strb,
AtomicMemoryOrderField::decode(opcode));
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
break;
case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(strh,
AtomicMemoryOrderField::decode(opcode));
ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
break;
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(str,
AtomicMemoryOrderField::decode(opcode));
ASSEMBLE_ATOMIC_STORE_INTEGER(str);
break;
case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);

View File

@ -430,18 +430,17 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
size_t input_count, InstructionOperand* inputs, Node* index) {
ArmOperandGenerator g(selector);
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
if (g.CanBeImmediate(index, opcode)) {
inputs[input_count++] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_Offset_RI);
} else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
} else if ((opcode == kArmStr) &&
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
&inputs[3])) {
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
if (arch_opcode == kArmVst1S128) {
if (opcode == kArmVst1S128) {
// Inputs are value, base, index, only care about base and index.
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
} else {
@ -641,60 +640,13 @@ void InstructionSelector::VisitProtectedLoad(Node* node) {
UNIMPLEMENTED();
}
namespace {
ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kFloat32:
return kArmVstrF32;
case MachineRepresentation::kFloat64:
return kArmVstrF64;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
return kArmStrb;
case MachineRepresentation::kWord16:
return kArmStrh;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
return kArmStr;
case MachineRepresentation::kSimd128:
return kArmVst1S128;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
}
ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
return kAtomicStoreWord8;
case MachineRepresentation::kWord16:
return kAtomicStoreWord16;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
return kAtomicStoreWord32;
default:
UNREACHABLE();
}
}
void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order) {
ArmOperandGenerator g(selector);
void InstructionSelector::VisitStore(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
@ -720,44 +672,58 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionCode code;
if (!atomic_order) {
code = kArchStoreWithWriteBarrier;
code |= MiscField::encode(static_cast<int>(record_write_mode));
} else {
code = kArchAtomicStoreWithWriteBarrier;
code |= AtomicMemoryOrderField::encode(*atomic_order);
code |= AtomicStoreRecordWriteModeField::encode(record_write_mode);
}
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
selector->Emit(code, 0, nullptr, input_count, inputs);
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionCode opcode = kArchNop;
if (!atomic_order) {
opcode = GetStoreOpcode(rep);
} else {
// Release stores emit DMB ISH; STR while sequentially consistent stores
// emit DMB ISH; STR; DMB ISH.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
opcode = GetAtomicStoreOpcode(rep);
opcode |= AtomicMemoryOrderField::encode(*atomic_order);
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kArmVstrF32;
break;
case MachineRepresentation::kFloat64:
opcode = kArmVstrF64;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = kArmStrb;
break;
case MachineRepresentation::kWord16:
opcode = kArmStrh;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kArmStr;
break;
case MachineRepresentation::kSimd128:
opcode = kArmVst1S128;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
ExternalReferenceMatcher m(base);
if (m.HasResolvedValue() &&
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
selector->Emit(opcode, 0, nullptr, input_count, inputs);
Emit(opcode, 0, nullptr, input_count, inputs);
return;
}
}
@ -766,17 +732,10 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = g.UseRegister(base);
EmitStore(selector, opcode, input_count, inputs, index);
EmitStore(this, opcode, input_count, inputs, index);
}
}
} // namespace
void InstructionSelector::VisitStore(Node* node) {
VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
base::nullopt);
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@ -2271,11 +2230,7 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
// The memory order is ignored as both acquire and sequentially consistent
// loads can emit LDR; DMB ISH.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@ -2287,9 +2242,6 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
@ -2301,9 +2253,34 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
VisitStoreCommon(this, node, store_params.store_representation(),
store_params.order());
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {

View File

@ -969,25 +969,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bind(ool->exit());
break;
}
case kArchAtomicStoreWithWriteBarrier: {
DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
Register offset = i.InputRegister(1);
Register value = i.InputRegister(2);
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
__ AtomicStoreTaggedField(value, object, offset, i.TempRegister(0));
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
eq, ool->entry());
__ Bind(ool->exit());
break;
}
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@ -1830,18 +1811,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdarDecompressTaggedSigned:
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdarDecompressTaggedPointer:
__ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdarDecompressAnyTagged:
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
@ -1849,12 +1818,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StlrCompressTagged:
// To be consistent with other STLR instructions, the value is stored at
// the 3rd input register instead of the 1st.
__ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());

View File

@ -11,404 +11,400 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Arm64Add) \
V(Arm64Add32) \
V(Arm64And) \
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
V(Arm64Clz) \
V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
V(Arm64Cmn32) \
V(Arm64Cnt) \
V(Arm64Cnt32) \
V(Arm64Cnt64) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
V(Arm64Or32) \
V(Arm64Orn) \
V(Arm64Orn32) \
V(Arm64Eor) \
V(Arm64Eor32) \
V(Arm64Eon) \
V(Arm64Eon32) \
V(Arm64Sadalp) \
V(Arm64Saddlp) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smull) \
V(Arm64Smull2) \
V(Arm64Uadalp) \
V(Arm64Uaddlp) \
V(Arm64Umull) \
V(Arm64Umull2) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
V(Arm64Msub32) \
V(Arm64Mneg) \
V(Arm64Mneg32) \
V(Arm64Idiv) \
V(Arm64Idiv32) \
V(Arm64Udiv) \
V(Arm64Udiv32) \
V(Arm64Imod) \
V(Arm64Imod32) \
V(Arm64Umod) \
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
V(Arm64Lsr32) \
V(Arm64Asr) \
V(Arm64Asr32) \
V(Arm64Ror) \
V(Arm64Ror32) \
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
V(Arm64Ubfiz32) \
V(Arm64Bfi) \
V(Arm64Rbit) \
V(Arm64Rbit32) \
V(Arm64Rev) \
V(Arm64Rev32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
V(Arm64Float32Mul) \
V(Arm64Float32Div) \
V(Arm64Float32Abs) \
V(Arm64Float32Abd) \
V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32Fnmul) \
V(Arm64Float32RoundDown) \
V(Arm64Float32Max) \
V(Arm64Float32Min) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Max) \
V(Arm64Float64Min) \
V(Arm64Float64Abs) \
V(Arm64Float64Abd) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64Fnmul) \
V(Arm64Float64RoundDown) \
V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \
V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \
V(Arm64Float64ToInt32) \
V(Arm64Float32ToUint32) \
V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \
V(Arm64Float32ToUint64) \
V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat32) \
V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat32) \
V(Arm64Uint32ToFloat64) \
V(Arm64Uint64ToFloat32) \
V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
V(Arm64StrD) \
V(Arm64LdrQ) \
V(Arm64StrQ) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
V(Arm64LdrsbW) \
V(Arm64Strb) \
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
V(Arm64LdrshW) \
V(Arm64Strh) \
V(Arm64Ldrsw) \
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
V(Arm64LdrDecompressTaggedSigned) \
V(Arm64LdrDecompressTaggedPointer) \
V(Arm64LdrDecompressAnyTagged) \
V(Arm64LdarDecompressTaggedSigned) \
V(Arm64LdarDecompressTaggedPointer) \
V(Arm64LdarDecompressAnyTagged) \
V(Arm64Str) \
V(Arm64StrCompressTagged) \
V(Arm64StlrCompressTagged) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64Sxtl) \
V(Arm64Sxtl2) \
V(Arm64Uxtl) \
V(Arm64Uxtl2) \
V(Arm64F64x2Splat) \
V(Arm64F64x2ExtractLane) \
V(Arm64F64x2ReplaceLane) \
V(Arm64F64x2Abs) \
V(Arm64F64x2Neg) \
V(Arm64F64x2Sqrt) \
V(Arm64F64x2Add) \
V(Arm64F64x2Sub) \
V(Arm64F64x2Mul) \
V(Arm64F64x2MulElement) \
V(Arm64F64x2Div) \
V(Arm64F64x2Min) \
V(Arm64F64x2Max) \
V(Arm64F64x2Eq) \
V(Arm64F64x2Ne) \
V(Arm64F64x2Lt) \
V(Arm64F64x2Le) \
V(Arm64F64x2Qfma) \
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
V(Arm64F64x2ConvertLowI32x4S) \
V(Arm64F64x2ConvertLowI32x4U) \
V(Arm64F64x2PromoteLowF32x4) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
V(Arm64F32x4SConvertI32x4) \
V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4Abs) \
V(Arm64F32x4Neg) \
V(Arm64F32x4Sqrt) \
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Add) \
V(Arm64F32x4Sub) \
V(Arm64F32x4Mul) \
V(Arm64F32x4MulElement) \
V(Arm64F32x4Div) \
V(Arm64F32x4Min) \
V(Arm64F32x4Max) \
V(Arm64F32x4Eq) \
V(Arm64F32x4Ne) \
V(Arm64F32x4Lt) \
V(Arm64F32x4Le) \
V(Arm64F32x4Qfma) \
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
V(Arm64F32x4DemoteF64x2Zero) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
V(Arm64I64x2Abs) \
V(Arm64I64x2Neg) \
V(Arm64I64x2Shl) \
V(Arm64I64x2ShrS) \
V(Arm64I64x2Add) \
V(Arm64I64x2Sub) \
V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
V(Arm64I64x2Ne) \
V(Arm64I64x2GtS) \
V(Arm64I64x2GeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4Splat) \
V(Arm64I32x4ExtractLane) \
V(Arm64I32x4ReplaceLane) \
V(Arm64I32x4SConvertF32x4) \
V(Arm64I32x4Neg) \
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
V(Arm64I32x4Add) \
V(Arm64I32x4Sub) \
V(Arm64I32x4Mul) \
V(Arm64I32x4Mla) \
V(Arm64I32x4Mls) \
V(Arm64I32x4MinS) \
V(Arm64I32x4MaxS) \
V(Arm64I32x4Eq) \
V(Arm64I32x4Ne) \
V(Arm64I32x4GtS) \
V(Arm64I32x4GeS) \
V(Arm64I32x4UConvertF32x4) \
V(Arm64I32x4ShrU) \
V(Arm64I32x4MinU) \
V(Arm64I32x4MaxU) \
V(Arm64I32x4GtU) \
V(Arm64I32x4GeU) \
V(Arm64I32x4Abs) \
V(Arm64I32x4BitMask) \
V(Arm64I32x4DotI16x8S) \
V(Arm64I32x4TruncSatF64x2SZero) \
V(Arm64I32x4TruncSatF64x2UZero) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
V(Arm64I16x8ReplaceLane) \
V(Arm64I16x8Neg) \
V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \
V(Arm64I16x8Add) \
V(Arm64I16x8AddSatS) \
V(Arm64I16x8Sub) \
V(Arm64I16x8SubSatS) \
V(Arm64I16x8Mul) \
V(Arm64I16x8Mla) \
V(Arm64I16x8Mls) \
V(Arm64I16x8MinS) \
V(Arm64I16x8MaxS) \
V(Arm64I16x8Eq) \
V(Arm64I16x8Ne) \
V(Arm64I16x8GtS) \
V(Arm64I16x8GeS) \
V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \
V(Arm64I16x8AddSatU) \
V(Arm64I16x8SubSatU) \
V(Arm64I16x8MinU) \
V(Arm64I16x8MaxU) \
V(Arm64I16x8GtU) \
V(Arm64I16x8GeU) \
V(Arm64I16x8RoundingAverageU) \
V(Arm64I16x8Q15MulRSatS) \
V(Arm64I16x8Abs) \
V(Arm64I16x8BitMask) \
V(Arm64I8x16Splat) \
V(Arm64I8x16ExtractLaneU) \
V(Arm64I8x16ExtractLaneS) \
V(Arm64I8x16ReplaceLane) \
V(Arm64I8x16Neg) \
V(Arm64I8x16Shl) \
V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16Add) \
V(Arm64I8x16AddSatS) \
V(Arm64I8x16Sub) \
V(Arm64I8x16SubSatS) \
V(Arm64I8x16Mla) \
V(Arm64I8x16Mls) \
V(Arm64I8x16MinS) \
V(Arm64I8x16MaxS) \
V(Arm64I8x16Eq) \
V(Arm64I8x16Ne) \
V(Arm64I8x16GtS) \
V(Arm64I8x16GeS) \
V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \
V(Arm64I8x16AddSatU) \
V(Arm64I8x16SubSatU) \
V(Arm64I8x16MinU) \
V(Arm64I8x16MaxU) \
V(Arm64I8x16GtU) \
V(Arm64I8x16GeU) \
V(Arm64I8x16RoundingAverageU) \
V(Arm64I8x16Abs) \
V(Arm64I8x16BitMask) \
V(Arm64S128Const) \
V(Arm64S128Zero) \
V(Arm64S128Dup) \
V(Arm64S128And) \
V(Arm64S128Or) \
V(Arm64S128Xor) \
V(Arm64S128Not) \
V(Arm64S128Select) \
V(Arm64S128AndNot) \
V(Arm64Ssra) \
V(Arm64Usra) \
V(Arm64S32x4ZipLeft) \
V(Arm64S32x4ZipRight) \
V(Arm64S32x4UnzipLeft) \
V(Arm64S32x4UnzipRight) \
V(Arm64S32x4TransposeLeft) \
V(Arm64S32x4TransposeRight) \
V(Arm64S32x4Shuffle) \
V(Arm64S16x8ZipLeft) \
V(Arm64S16x8ZipRight) \
V(Arm64S16x8UnzipLeft) \
V(Arm64S16x8UnzipRight) \
V(Arm64S16x8TransposeLeft) \
V(Arm64S16x8TransposeRight) \
V(Arm64S8x16ZipLeft) \
V(Arm64S8x16ZipRight) \
V(Arm64S8x16UnzipLeft) \
V(Arm64S8x16UnzipRight) \
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
V(Arm64I8x16Swizzle) \
V(Arm64I8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
V(Arm64S16x2Reverse) \
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
V(Arm64I64x2AllTrue) \
V(Arm64I32x4AllTrue) \
V(Arm64I16x8AllTrue) \
V(Arm64I8x16AllTrue) \
V(Arm64LoadSplat) \
V(Arm64LoadLane) \
V(Arm64StoreLane) \
V(Arm64S128Load8x8S) \
V(Arm64S128Load8x8U) \
V(Arm64S128Load16x4S) \
V(Arm64S128Load16x4U) \
V(Arm64S128Load32x2S) \
V(Arm64S128Load32x2U) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64) \
V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicAndUint64) \
V(Arm64Word64AtomicOrUint64) \
V(Arm64Word64AtomicXorUint64) \
V(Arm64Word64AtomicExchangeUint64) \
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Arm64Add) \
V(Arm64Add32) \
V(Arm64And) \
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
V(Arm64Clz) \
V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
V(Arm64Cmn32) \
V(Arm64Cnt) \
V(Arm64Cnt32) \
V(Arm64Cnt64) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
V(Arm64Or32) \
V(Arm64Orn) \
V(Arm64Orn32) \
V(Arm64Eor) \
V(Arm64Eor32) \
V(Arm64Eon) \
V(Arm64Eon32) \
V(Arm64Sadalp) \
V(Arm64Saddlp) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smull) \
V(Arm64Smull2) \
V(Arm64Uadalp) \
V(Arm64Uaddlp) \
V(Arm64Umull) \
V(Arm64Umull2) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
V(Arm64Msub32) \
V(Arm64Mneg) \
V(Arm64Mneg32) \
V(Arm64Idiv) \
V(Arm64Idiv32) \
V(Arm64Udiv) \
V(Arm64Udiv32) \
V(Arm64Imod) \
V(Arm64Imod32) \
V(Arm64Umod) \
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
V(Arm64Lsr32) \
V(Arm64Asr) \
V(Arm64Asr32) \
V(Arm64Ror) \
V(Arm64Ror32) \
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
V(Arm64Ubfiz32) \
V(Arm64Bfi) \
V(Arm64Rbit) \
V(Arm64Rbit32) \
V(Arm64Rev) \
V(Arm64Rev32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
V(Arm64Float32Mul) \
V(Arm64Float32Div) \
V(Arm64Float32Abs) \
V(Arm64Float32Abd) \
V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32Fnmul) \
V(Arm64Float32RoundDown) \
V(Arm64Float32Max) \
V(Arm64Float32Min) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Max) \
V(Arm64Float64Min) \
V(Arm64Float64Abs) \
V(Arm64Float64Abd) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64Fnmul) \
V(Arm64Float64RoundDown) \
V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \
V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \
V(Arm64Float64ToInt32) \
V(Arm64Float32ToUint32) \
V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \
V(Arm64Float32ToUint64) \
V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat32) \
V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat32) \
V(Arm64Uint32ToFloat64) \
V(Arm64Uint64ToFloat32) \
V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
V(Arm64StrD) \
V(Arm64LdrQ) \
V(Arm64StrQ) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
V(Arm64LdrsbW) \
V(Arm64Strb) \
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
V(Arm64LdrshW) \
V(Arm64Strh) \
V(Arm64Ldrsw) \
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
V(Arm64LdrDecompressTaggedSigned) \
V(Arm64LdrDecompressTaggedPointer) \
V(Arm64LdrDecompressAnyTagged) \
V(Arm64Str) \
V(Arm64StrCompressTagged) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64Sxtl) \
V(Arm64Sxtl2) \
V(Arm64Uxtl) \
V(Arm64Uxtl2) \
V(Arm64F64x2Splat) \
V(Arm64F64x2ExtractLane) \
V(Arm64F64x2ReplaceLane) \
V(Arm64F64x2Abs) \
V(Arm64F64x2Neg) \
V(Arm64F64x2Sqrt) \
V(Arm64F64x2Add) \
V(Arm64F64x2Sub) \
V(Arm64F64x2Mul) \
V(Arm64F64x2MulElement) \
V(Arm64F64x2Div) \
V(Arm64F64x2Min) \
V(Arm64F64x2Max) \
V(Arm64F64x2Eq) \
V(Arm64F64x2Ne) \
V(Arm64F64x2Lt) \
V(Arm64F64x2Le) \
V(Arm64F64x2Qfma) \
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
V(Arm64F64x2ConvertLowI32x4S) \
V(Arm64F64x2ConvertLowI32x4U) \
V(Arm64F64x2PromoteLowF32x4) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
V(Arm64F32x4SConvertI32x4) \
V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4Abs) \
V(Arm64F32x4Neg) \
V(Arm64F32x4Sqrt) \
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Add) \
V(Arm64F32x4Sub) \
V(Arm64F32x4Mul) \
V(Arm64F32x4MulElement) \
V(Arm64F32x4Div) \
V(Arm64F32x4Min) \
V(Arm64F32x4Max) \
V(Arm64F32x4Eq) \
V(Arm64F32x4Ne) \
V(Arm64F32x4Lt) \
V(Arm64F32x4Le) \
V(Arm64F32x4Qfma) \
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
V(Arm64F32x4DemoteF64x2Zero) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
V(Arm64I64x2Abs) \
V(Arm64I64x2Neg) \
V(Arm64I64x2Shl) \
V(Arm64I64x2ShrS) \
V(Arm64I64x2Add) \
V(Arm64I64x2Sub) \
V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
V(Arm64I64x2Ne) \
V(Arm64I64x2GtS) \
V(Arm64I64x2GeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4Splat) \
V(Arm64I32x4ExtractLane) \
V(Arm64I32x4ReplaceLane) \
V(Arm64I32x4SConvertF32x4) \
V(Arm64I32x4Neg) \
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
V(Arm64I32x4Add) \
V(Arm64I32x4Sub) \
V(Arm64I32x4Mul) \
V(Arm64I32x4Mla) \
V(Arm64I32x4Mls) \
V(Arm64I32x4MinS) \
V(Arm64I32x4MaxS) \
V(Arm64I32x4Eq) \
V(Arm64I32x4Ne) \
V(Arm64I32x4GtS) \
V(Arm64I32x4GeS) \
V(Arm64I32x4UConvertF32x4) \
V(Arm64I32x4ShrU) \
V(Arm64I32x4MinU) \
V(Arm64I32x4MaxU) \
V(Arm64I32x4GtU) \
V(Arm64I32x4GeU) \
V(Arm64I32x4Abs) \
V(Arm64I32x4BitMask) \
V(Arm64I32x4DotI16x8S) \
V(Arm64I32x4TruncSatF64x2SZero) \
V(Arm64I32x4TruncSatF64x2UZero) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
V(Arm64I16x8ReplaceLane) \
V(Arm64I16x8Neg) \
V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \
V(Arm64I16x8Add) \
V(Arm64I16x8AddSatS) \
V(Arm64I16x8Sub) \
V(Arm64I16x8SubSatS) \
V(Arm64I16x8Mul) \
V(Arm64I16x8Mla) \
V(Arm64I16x8Mls) \
V(Arm64I16x8MinS) \
V(Arm64I16x8MaxS) \
V(Arm64I16x8Eq) \
V(Arm64I16x8Ne) \
V(Arm64I16x8GtS) \
V(Arm64I16x8GeS) \
V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \
V(Arm64I16x8AddSatU) \
V(Arm64I16x8SubSatU) \
V(Arm64I16x8MinU) \
V(Arm64I16x8MaxU) \
V(Arm64I16x8GtU) \
V(Arm64I16x8GeU) \
V(Arm64I16x8RoundingAverageU) \
V(Arm64I16x8Q15MulRSatS) \
V(Arm64I16x8Abs) \
V(Arm64I16x8BitMask) \
V(Arm64I8x16Splat) \
V(Arm64I8x16ExtractLaneU) \
V(Arm64I8x16ExtractLaneS) \
V(Arm64I8x16ReplaceLane) \
V(Arm64I8x16Neg) \
V(Arm64I8x16Shl) \
V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16Add) \
V(Arm64I8x16AddSatS) \
V(Arm64I8x16Sub) \
V(Arm64I8x16SubSatS) \
V(Arm64I8x16Mla) \
V(Arm64I8x16Mls) \
V(Arm64I8x16MinS) \
V(Arm64I8x16MaxS) \
V(Arm64I8x16Eq) \
V(Arm64I8x16Ne) \
V(Arm64I8x16GtS) \
V(Arm64I8x16GeS) \
V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \
V(Arm64I8x16AddSatU) \
V(Arm64I8x16SubSatU) \
V(Arm64I8x16MinU) \
V(Arm64I8x16MaxU) \
V(Arm64I8x16GtU) \
V(Arm64I8x16GeU) \
V(Arm64I8x16RoundingAverageU) \
V(Arm64I8x16Abs) \
V(Arm64I8x16BitMask) \
V(Arm64S128Const) \
V(Arm64S128Zero) \
V(Arm64S128Dup) \
V(Arm64S128And) \
V(Arm64S128Or) \
V(Arm64S128Xor) \
V(Arm64S128Not) \
V(Arm64S128Select) \
V(Arm64S128AndNot) \
V(Arm64Ssra) \
V(Arm64Usra) \
V(Arm64S32x4ZipLeft) \
V(Arm64S32x4ZipRight) \
V(Arm64S32x4UnzipLeft) \
V(Arm64S32x4UnzipRight) \
V(Arm64S32x4TransposeLeft) \
V(Arm64S32x4TransposeRight) \
V(Arm64S32x4Shuffle) \
V(Arm64S16x8ZipLeft) \
V(Arm64S16x8ZipRight) \
V(Arm64S16x8UnzipLeft) \
V(Arm64S16x8UnzipRight) \
V(Arm64S16x8TransposeLeft) \
V(Arm64S16x8TransposeRight) \
V(Arm64S8x16ZipLeft) \
V(Arm64S8x16ZipRight) \
V(Arm64S8x16UnzipLeft) \
V(Arm64S8x16UnzipRight) \
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
V(Arm64I8x16Swizzle) \
V(Arm64I8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
V(Arm64S16x2Reverse) \
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
V(Arm64I64x2AllTrue) \
V(Arm64I32x4AllTrue) \
V(Arm64I16x8AllTrue) \
V(Arm64I8x16AllTrue) \
V(Arm64LoadSplat) \
V(Arm64LoadLane) \
V(Arm64StoreLane) \
V(Arm64S128Load8x8S) \
V(Arm64S128Load8x8U) \
V(Arm64S128Load16x4S) \
V(Arm64S128Load16x4U) \
V(Arm64S128Load32x2S) \
V(Arm64S128Load32x2U) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64) \
V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicAndUint64) \
V(Arm64Word64AtomicOrUint64) \
V(Arm64Word64AtomicXorUint64) \
V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.

View File

@ -377,9 +377,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
case kArm64LdarDecompressTaggedSigned:
case kArm64LdarDecompressTaggedPointer:
case kArm64LdarDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
case kArm64LoadLane:
@ -402,7 +399,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
case kArm64StlrCompressTagged:
case kArm64DmbIsh:
case kArm64DsbIsb:
case kArm64StoreLane:

View File

@ -144,6 +144,21 @@ class Arm64OperandGenerator final : public OperandGenerator {
namespace {
ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
return kAtomicStoreWord8;
case MachineRepresentation::kWord16:
return kAtomicStoreWord16;
case MachineRepresentation::kWord32:
return kAtomicStoreWord32;
case MachineRepresentation::kWord64:
return kArm64Word64AtomicStoreWord64;
default:
UNREACHABLE();
}
}
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@ -2603,135 +2618,30 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
AtomicWidth width) {
ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister()};
// The memory order is ignored as both acquire and sequentially consistent
// loads can emit LDAR.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
InstructionCode code;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
code = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
code = kArm64Word64AtomicLoadUint64;
break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
code = kArm64LdarDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
code = kArm64LdarDecompressTaggedPointer;
break;
case MachineRepresentation::kTagged:
code = kArm64LdarDecompressAnyTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
if (kTaggedSize == 8) {
code = kArm64Word64AtomicLoadUint64;
} else {
code = kAtomicLoadWord32;
}
break;
#endif
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
DCHECK(COMPRESS_POINTERS_BOOL);
code = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
code |=
AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
AtomicWidth width) {
ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
// The memory order is ignored as both release and sequentially consistent
// stores can emit STLR.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
MachineRepresentation rep = store_params.representation();
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand temps[] = {g.TempRegister()};
InstructionCode code;
if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
DCHECK(CanBeTaggedOrCompressedPointer(rep));
DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
code = kArchAtomicStoreWithWriteBarrier;
code |= MiscField::encode(static_cast<int>(record_write_mode));
} else {
switch (rep) {
case MachineRepresentation::kWord8:
code = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
code = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
code = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
DCHECK_EQ(width, AtomicWidth::kWord64);
code = kArm64Word64AtomicStoreWord64;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
V8_FALLTHROUGH;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
CHECK(COMPRESS_POINTERS_BOOL);
DCHECK_EQ(width, AtomicWidth::kWord32);
code = kArm64StlrCompressTagged;
break;
default:
UNREACHABLE();
}
code |= AtomicWidthField::encode(width);
}
code |= AddressingModeField::encode(kMode_MRR);
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
AtomicWidthField::encode(width);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
temps);
}
@ -3292,19 +3202,55 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
VisitAtomicLoad(this, node, AtomicWidth::kWord32);
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
VisitAtomicLoad(this, node, AtomicWidth::kWord64);
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
opcode = kArm64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
}
VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
VisitAtomicStore(this, node, AtomicWidth::kWord32);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
DCHECK_NE(rep, MachineRepresentation::kWord64);
VisitAtomicStore(this, node, GetAtomicStoreOpcode(rep), AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
VisitAtomicStore(this, node, AtomicWidth::kWord64);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
VisitAtomicStore(this, node, GetAtomicStoreOpcode(rep), AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {

View File

@ -957,8 +957,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
case kArchStoreWithWriteBarrier: // Fall thrugh.
case kArchAtomicStoreWithWriteBarrier: {
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@ -970,12 +969,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
if (arch_opcode == kArchStoreWithWriteBarrier) {
__ mov(operand, value);
} else {
__ mov(scratch0, value);
__ xchg(scratch0, operand);
}
__ mov(operand, value);
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@ -3841,31 +3835,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairLoad: {
__ movq(kScratchDoubleReg, i.MemoryOperand());
__ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
__ Pextrd(i.OutputRegister(1), kScratchDoubleReg, 1);
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ movq(tmp, i.MemoryOperand());
__ Pextrd(i.OutputRegister(0), tmp, 0);
__ Pextrd(i.OutputRegister(1), tmp, 1);
break;
}
case kIA32Word32ReleasePairStore: {
__ push(ebx);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(1));
__ push(ebx);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(3);
__ movq(kScratchDoubleReg, MemOperand(esp, 0));
__ pop(ebx);
__ pop(ebx);
__ pop(ebx);
frame_access_state()->IncreaseSPDelta(-3);
__ movq(i.MemoryOperand(2), kScratchDoubleReg);
break;
}
case kIA32Word32SeqCstPairStore: {
case kIA32Word32AtomicPairStore: {
Label store;
__ bind(&store);
__ mov(eax, i.MemoryOperand(2));
__ mov(edx, i.NextMemoryOperand(2));
__ mov(i.TempRegister(0), i.MemoryOperand(2));
__ mov(i.TempRegister(1), i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));

View File

@ -402,8 +402,7 @@ namespace compiler {
V(IA32I16x8AllTrue) \
V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32ReleasePairStore) \
V(IA32Word32SeqCstPairStore) \
V(IA32Word32AtomicPairStore) \
V(IA32Word32AtomicPairAdd) \
V(IA32Word32AtomicPairSub) \
V(IA32Word32AtomicPairAnd) \

View File

@ -423,8 +423,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairLoad:
return kIsLoadOperation;
case kIA32Word32ReleasePairStore:
case kIA32Word32SeqCstPairStore:
case kIA32Word32AtomicPairStore:
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:

View File

@ -246,41 +246,6 @@ class IA32OperandGenerator final : public OperandGenerator {
namespace {
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
case MachineRepresentation::kSimd128:
opcode = kIA32Movdqu;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
return opcode;
}
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* input = node->InputAt(0);
@ -570,8 +535,41 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionCode opcode) {
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
case MachineRepresentation::kSimd128:
opcode = kIA32Movdqu;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
IA32OperandGenerator g(this);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
@ -583,97 +581,20 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(!load_rep.IsMapWord());
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
namespace {
ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kFloat32:
return kIA32Movss;
case MachineRepresentation::kFloat64:
return kIA32Movsd;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
return kIA32Movb;
case MachineRepresentation::kWord16:
return kIA32Movw;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
return kIA32Movl;
case MachineRepresentation::kSimd128:
return kIA32Movdqu;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
}
ArchOpcode GetSeqCstStoreOpcode(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
return kAtomicExchangeInt8;
case MachineRepresentation::kWord16:
return kAtomicExchangeInt16;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
return kAtomicExchangeWord32;
default:
UNREACHABLE();
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
? g.UseFixed(value, edx)
: g.UseUniqueRegister(value);
InstructionOperand inputs[] = {
value_operand, g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {
(rep == MachineRepresentation::kWord8)
// Using DefineSameAsFirst requires the register to be unallocated.
? g.DefineAsFixed(node, edx)
: g.DefineSameAsFirst(node)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
}
void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order) {
IA32OperandGenerator g(selector);
void InstructionSelector::VisitStore(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
const bool is_seqcst =
atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
@ -690,23 +611,48 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
: kArchStoreWithWriteBarrier;
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
temps);
} else if (is_seqcst) {
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(rep), rep);
Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
} else {
// Release and non-atomic stores emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = kIA32Movb;
break;
case MachineRepresentation::kWord16:
opcode = kIA32Movw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
case MachineRepresentation::kSimd128:
opcode = kIA32Movdqu;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
} else if (!atomic_order && (rep == MachineRepresentation::kWord8 ||
rep == MachineRepresentation::kBit)) {
} else if (rep == MachineRepresentation::kWord8 ||
rep == MachineRepresentation::kBit) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@ -717,20 +663,13 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
GetStoreOpcode(rep) | AddressingModeField::encode(addressing_mode);
opcode | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
input_count, inputs);
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
inputs);
}
}
} // namespace
void InstructionSelector::VisitStore(Node* node) {
VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
base::nullopt);
}
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@ -1686,6 +1625,29 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
? g.UseFixed(value, edx)
: g.UseUniqueRegister(value);
InstructionOperand inputs[] = {
value_operand, g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {
(rep == MachineRepresentation::kWord8)
// Using DefineSameAsFirst requires the register to be unallocated.
? g.DefineAsFixed(node, edx)
: g.DefineSameAsFirst(node)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
}
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
AddressingMode addressing_mode;
@ -1995,25 +1957,32 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
load_rep.representation() == MachineRepresentation::kWord32 ||
load_rep.representation() == MachineRepresentation::kTaggedSigned ||
load_rep.representation() == MachineRepresentation::kTaggedPointer ||
load_rep.representation() == MachineRepresentation::kTagged);
load_rep.representation() == MachineRepresentation::kWord32);
USE(load_rep);
// The memory order is ignored as both acquire and sequentially consistent
// loads can emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
VisitLoad(node, node, GetLoadOpcode(load_rep));
VisitLoad(node);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
VisitStoreCommon(this, node, store_params.store_representation(),
store_params.order());
IA32OperandGenerator g(this);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicExchangeWord32;
break;
default:
UNREACHABLE();
}
VisitAtomicExchange(this, node, opcode, rep);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@ -2106,8 +2075,6 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
// Both acquire and sequentially consistent loads can emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
IA32OperandGenerator g(this);
AddressingMode mode;
Node* base = node->InputAt(0);
@ -2119,9 +2086,10 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
g.GetEffectiveIndexOperand(index, &mode)};
InstructionCode code =
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
InstructionOperand temps[] = {g.TempDoubleRegister()};
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
g.DefineAsRegister(projection1)};
Emit(code, 2, outputs, 2, inputs);
Emit(code, 2, outputs, 2, inputs, 1, temps);
} else if (projection0 || projection1) {
// Only one word is needed, so it's enough to load just that.
ArchOpcode opcode = kIA32Movl;
@ -2142,45 +2110,25 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
// Release pair stores emit a MOVQ via a double register, and sequentially
// consistent stores emit CMPXCHG8B.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
if (order == AtomicMemoryOrder::kAcqRel) {
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegisterOrSlotOrConstant(value),
g.UseUniqueRegisterOrSlotOrConstant(value_high),
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode),
};
InstructionCode code = kIA32Word32ReleasePairStore |
AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs);
} else {
DCHECK_EQ(order, AtomicMemoryOrder::kSeqCst);
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
// Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction.
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionCode code = kIA32Word32SeqCstPairStore |
AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
// Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction.
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionCode code =
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {

View File

@ -32,7 +32,6 @@
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
#include "src/base/bit-field.h"
#include "src/codegen/atomic-memory-order.h"
#include "src/compiler/write-barrier-kind.h"
namespace v8 {
@ -102,7 +101,6 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchAtomicStoreWithWriteBarrier) \
V(ArchStackSlot) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
@ -267,16 +265,6 @@ enum MemoryAccessMode {
enum class AtomicWidth { kWord32, kWord64 };
inline size_t AtomicWidthSize(AtomicWidth width) {
switch (width) {
case AtomicWidth::kWord32:
return 4;
case AtomicWidth::kWord64:
return 8;
}
UNREACHABLE();
}
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
@ -302,16 +290,10 @@ using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
// AtomicWidthField overlaps with MiscField and is used for the various Atomic
// AtomicOperandWidth overlaps with MiscField and is used for the various Atomic
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
// architectures are assumed to be 32bit wide.
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
// AtomicMemoryOrderField overlaps with MiscField and is used for the various
// Atomic opcodes. This field is not used on all architectures. It is used on
// architectures where the codegen for kSeqCst and kAcqRel differ only by
// emitting fences.
using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
using MiscField = base::BitField<int, 22, 10>;
// This static assertion serves as an early warning if we are about to exhaust

View File

@ -328,7 +328,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kIsBarrier;
case kArchStoreWithWriteBarrier:
case kArchAtomicStoreWithWriteBarrier:
return kHasSideEffect;
case kAtomicLoadInt8:

View File

@ -1869,14 +1869,12 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
LoadRepresentation type = params.representation();
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
LoadRepresentation type = params.representation();
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}

View File

@ -1292,8 +1292,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(result, result);
break;
}
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@ -1305,12 +1304,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
if (arch_opcode == kArchStoreWithWriteBarrier) {
__ StoreTaggedField(operand, value);
} else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
__ AtomicStoreTaggedField(operand, value);
}
__ StoreTaggedField(operand, value);
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@ -1318,7 +1312,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
// TODO(syg): Support non-relaxed memory orders in TSAN.
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
break;

View File

@ -341,8 +341,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
UNREACHABLE();
}
ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
ArchOpcode GetAtomicStoreOp(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
return kAtomicExchangeUint8;
case MachineRepresentation::kWord16:
@ -351,15 +351,6 @@ ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
return kAtomicExchangeWord32;
case MachineRepresentation::kWord64:
return kX64Word64AtomicExchangeUint64;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
if (COMPRESS_POINTERS_BOOL) return kAtomicExchangeWord32;
return kX64Word64AtomicExchangeUint64;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
CHECK(COMPRESS_POINTERS_BOOL);
return kAtomicExchangeWord32;
default:
UNREACHABLE();
}
@ -508,38 +499,15 @@ void InstructionSelector::VisitLoad(Node* node) {
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
namespace {
// Shared routine for Word32/Word64 Atomic Exchange
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegister(value), g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order,
base::Optional<AtomicWidth> atomic_width) {
X64OperandGenerator g(selector);
void InstructionSelector::VisitStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
const bool is_seqcst =
atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
@ -556,19 +524,11 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
: kArchStoreWithWriteBarrier;
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
arraysize(temps), temps);
} else if (is_seqcst) {
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(store_rep),
*atomic_width);
Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
} else {
// Release and non-atomic stores emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
@ -598,18 +558,11 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
input_count, inputs, temp_count, temps);
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
inputs, temp_count, temps);
}
}
} // namespace
void InstructionSelector::VisitStore(Node* node) {
return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
base::nullopt, base::nullopt);
}
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
@ -2387,6 +2340,23 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
// Shared routine for Word32/Word64 Atomic Exchange
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegister(value), g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
} // namespace
// Shared routine for word comparison against zero.
@ -2754,44 +2724,29 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
LoadRepresentation load_rep = atomic_load_params.representation();
DCHECK(IsIntegral(load_rep.representation()) ||
IsAnyTagged(load_rep.representation()) ||
(COMPRESS_POINTERS_BOOL &&
CanBeCompressedPointer(load_rep.representation())));
DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
DCHECK(!load_rep.IsMapWord());
// The memory order is ignored as both acquire and sequentially consistent
// loads can emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
VisitLoad(node, node, GetLoadOpcode(load_rep));
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
load_rep.representation() == MachineRepresentation::kWord32);
USE(load_rep);
VisitLoad(node);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
DCHECK(!atomic_load_params.representation().IsMapWord());
// The memory order is ignored as both acquire and sequentially consistent
// loads can emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
VisitLoad(node, node, GetLoadOpcode(atomic_load_params.representation()));
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
USE(load_rep);
VisitLoad(node);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 4);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
AtomicWidth::kWord32);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
DCHECK_NE(rep, MachineRepresentation::kWord64);
VisitAtomicExchange(this, node, GetAtomicStoreOp(rep), AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 8);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
AtomicWidth::kWord64);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
VisitAtomicExchange(this, node, GetAtomicStoreOp(rep), AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {

View File

@ -679,25 +679,22 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
return BitcastWordToTagged(Load<RawPtrT>(base, offset));
}
Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order,
TNode<RawPtrT> base, TNode<WordT> offset) {
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
offset);
return raw_assembler()->AtomicLoad(type, base, offset);
}
template <class Type>
TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order,
TNode<RawPtrT> base,
TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
TNode<WordT> offset) {
return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(
AtomicLoadParameters(MachineType::Uint64(), order), base, offset));
return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset) {
@ -862,22 +859,16 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
BitcastTaggedToWord(tagged_value));
}
void CodeAssembler::AtomicStore(MachineRepresentation rep,
AtomicMemoryOrder order, TNode<RawPtrT> base,
void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->AtomicStore(
AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order),
base, offset, value);
raw_assembler()->AtomicStore(rep, base, offset, value);
}
void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<UintPtrT> value,
void CodeAssembler::AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
TNode<UintPtrT> value,
TNode<UintPtrT> value_high) {
raw_assembler()->AtomicStore64(
AtomicStoreParameters(MachineRepresentation::kWord64,
WriteBarrierKind::kNoWriteBarrier, order),
base, offset, value, value_high);
raw_assembler()->AtomicStore64(base, offset, value, value_high);
}
#define ATOMIC_FUNCTION(name) \

View File

@ -17,7 +17,6 @@
#include "src/base/optional.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
@ -744,14 +743,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
TNode<Type> AtomicLoad(AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset) {
TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
return UncheckedCast<Type>(
AtomicLoad(MachineTypeOf<Type>::value, order, base, offset));
AtomicLoad(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
TNode<Type> AtomicLoad64(AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset);
TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
TNode<Object> LoadFullTagged(Node* base);
@ -812,14 +809,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<HeapObject> object,
int offset, Node* value);
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order,
TNode<RawPtrT> base, TNode<WordT> offset,
TNode<Word32T> value);
void AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
void AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<UintPtrT> value,
TNode<UintPtrT> value_high);
void AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
TNode<UintPtrT> value, TNode<UintPtrT> value_high);
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
TNode<UintPtrT> offset, TNode<Word32T> value);
@ -1358,8 +1353,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs);
Node* AtomicLoad(MachineType type, AtomicMemoryOrder order,
TNode<RawPtrT> base, TNode<WordT> offset);
Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset);

View File

@ -944,31 +944,29 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64AtomicLoad: {
DCHECK_EQ(4, node->InputCount());
AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
MachineType type = AtomicOpType(node->op());
DefaultLowering(node, true);
if (params.representation() == MachineType::Uint64()) {
NodeProperties::ChangeOp(
node, machine()->Word32AtomicPairLoad(params.order()));
if (type == MachineType::Uint64()) {
NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
ReplaceNodeWithProjections(node);
} else {
NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
case IrOpcode::kWord64AtomicStore: {
DCHECK_EQ(5, node->InputCount());
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
if (params.representation() == MachineRepresentation::kWord64) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
if (rep == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
NodeProperties::ChangeOp(
node, machine()->Word32AtomicPairStore(params.order()));
NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
} else {
DefaultLowering(node, true);
NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(params));
NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
}
break;
}

View File

@ -121,11 +121,6 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord64AtomicLoad:
representation_vector_[node->id()] =
PromoteRepresentation(AtomicLoadParametersOf(node->op())
.representation()
.representation());
break;
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
@ -158,8 +153,8 @@ class MachineRepresentationInferrer {
}
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord64AtomicStore:
representation_vector_[node->id()] = PromoteRepresentation(
AtomicStoreParametersOf(node->op()).representation());
representation_vector_[node->id()] =
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
break;
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord32AtomicPairStore:
@ -590,12 +585,9 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
((node->opcode() == IrOpcode::kStore &&
IsAnyTagged(StoreRepresentationOf(node->op())
.representation())) ||
(node->opcode() == IrOpcode::kWord32AtomicStore &&
IsAnyTagged(AtomicStoreParametersOf(node->op())
.representation())))) {
node->opcode() == IrOpcode::kStore &&
IsAnyTagged(
StoreRepresentationOf(node->op()).representation())) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
CheckValueInputIsTagged(node, 2);

View File

@ -32,41 +32,6 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
return lhs.store_representation() == rhs.store_representation() &&
lhs.order() == rhs.order();
}
bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
return !(lhs == rhs);
}
size_t hash_value(AtomicStoreParameters params) {
return base::hash_combine(hash_value(params.store_representation()),
params.order());
}
std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
return os << params.store_representation() << ", " << params.order();
}
bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
return lhs.representation() == rhs.representation() &&
lhs.order() == rhs.order();
}
bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
return !(lhs == rhs);
}
size_t hash_value(AtomicLoadParameters params) {
return base::hash_combine(params.representation(), params.order());
}
std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
return os << params.representation() << ", " << params.order();
}
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
@ -156,29 +121,20 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
IrOpcode::kWord32AtomicLoad == op->opcode() ||
IrOpcode::kWord64AtomicLoad == op->opcode() ||
IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
AtomicLoadParameters AtomicLoadParametersOf(Operator const* op) {
DCHECK(IrOpcode::kWord32AtomicLoad == op->opcode() ||
IrOpcode::kWord64AtomicLoad == op->opcode());
return OpParameter<AtomicLoadParameters>(op);
}
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
AtomicStoreParameters const& AtomicStoreParametersOf(Operator const* op) {
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
IrOpcode::kWord64AtomicStore == op->opcode());
return OpParameter<AtomicStoreParameters>(op);
}
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
@ -225,6 +181,12 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
return OpParameter<StackSlotRepresentation>(op);
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
IrOpcode::kWord64AtomicStore == op->opcode());
return OpParameter<MachineRepresentation>(op);
}
MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@ -687,30 +649,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Load32Zero) \
V(S128Load64Zero)
#if TAGGED_SIZE_8_BYTES
#define ATOMIC_TAGGED_TYPE_LIST(V)
#define ATOMIC64_TAGGED_TYPE_LIST(V) \
V(TaggedSigned) \
V(TaggedPointer) \
V(AnyTagged) \
V(CompressedPointer) \
V(AnyCompressed)
#else
#define ATOMIC_TAGGED_TYPE_LIST(V) \
V(TaggedSigned) \
V(TaggedPointer) \
V(AnyTagged) \
V(CompressedPointer) \
V(AnyCompressed)
#define ATOMIC64_TAGGED_TYPE_LIST(V)
#endif // TAGGED_SIZE_8_BYTES
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
@ -726,28 +664,6 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
#if TAGGED_SIZE_8_BYTES
#define ATOMIC_TAGGED_REPRESENTATION_LIST(V)
#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V) \
V(kTaggedSigned) \
V(kTaggedPointer) \
V(kTagged)
#else
#define ATOMIC_TAGGED_REPRESENTATION_LIST(V) \
V(kTaggedSigned) \
V(kTaggedPointer) \
V(kTagged) \
V(kCompressedPointer) \
V(kCompressed)
#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V)
#endif // TAGGED_SIZE_8_BYTES
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
@ -1051,63 +967,55 @@ struct MachineOperatorGlobalCache {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
#define ATOMIC_LOAD(Type) \
struct Word32SeqCstLoad##Type##Operator \
: public Operator1<AtomicLoadParameters> { \
Word32SeqCstLoad##Type##Operator() \
: Operator1<AtomicLoadParameters>( \
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
AtomicLoadParameters(MachineType::Type(), \
AtomicMemoryOrder::kSeqCst)) {} \
}; \
Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
#define ATOMIC_LOAD(Type) \
struct Word32AtomicLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
Word32AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_LOAD(Type) \
struct Word64SeqCstLoad##Type##Operator \
: public Operator1<AtomicLoadParameters> { \
Word64SeqCstLoad##Type##Operator() \
: Operator1<AtomicLoadParameters>( \
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
AtomicLoadParameters(MachineType::Type(), \
AtomicMemoryOrder::kSeqCst)) {} \
}; \
Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
#define ATOMIC_LOAD(Type) \
struct Word64AtomicLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
Word64AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
struct Word32SeqCstStore##Type##Operator \
: public Operator1<AtomicStoreParameters> { \
Word32SeqCstStore##Type##Operator() \
: Operator1<AtomicStoreParameters>( \
struct Word32AtomicStore##Type##Operator \
: public Operator1<MachineRepresentation> { \
Word32AtomicStore##Type##Operator() \
: Operator1<MachineRepresentation>( \
IrOpcode::kWord32AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
AtomicStoreParameters(MachineRepresentation::Type, \
kNoWriteBarrier, \
AtomicMemoryOrder::kSeqCst)) {} \
MachineRepresentation::Type) {} \
}; \
Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
#define ATOMIC_STORE(Type) \
struct Word64SeqCstStore##Type##Operator \
: public Operator1<AtomicStoreParameters> { \
Word64SeqCstStore##Type##Operator() \
: Operator1<AtomicStoreParameters>( \
struct Word64AtomicStore##Type##Operator \
: public Operator1<MachineRepresentation> { \
Word64AtomicStore##Type##Operator() \
: Operator1<MachineRepresentation>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
AtomicStoreParameters(MachineRepresentation::Type, \
kNoWriteBarrier, \
AtomicMemoryOrder::kSeqCst)) {} \
MachineRepresentation::Type) {} \
}; \
Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
@ -1167,23 +1075,21 @@ struct MachineOperatorGlobalCache {
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
struct Word32SeqCstPairLoadOperator : public Operator1<AtomicMemoryOrder> {
Word32SeqCstPairLoadOperator()
: Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairLoad,
Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0,
AtomicMemoryOrder::kSeqCst) {}
struct Word32AtomicPairLoadOperator : public Operator {
Word32AtomicPairLoadOperator()
: Operator(IrOpcode::kWord32AtomicPairLoad,
Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
};
Word32SeqCstPairLoadOperator kWord32SeqCstPairLoad;
Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
struct Word32SeqCstPairStoreOperator : public Operator1<AtomicMemoryOrder> {
Word32SeqCstPairStoreOperator()
: Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairStore,
Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairStore", 4, 1, 1, 0, 1,
0, AtomicMemoryOrder::kSeqCst) {}
struct Word32AtomicPairStoreOperator : public Operator {
Word32AtomicPairStoreOperator()
: Operator(IrOpcode::kWord32AtomicPairStore,
Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
};
Word32SeqCstPairStoreOperator kWord32SeqCstPairStore;
Word32AtomicPairStoreOperator kWord32AtomicPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
@ -1643,47 +1549,23 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
AtomicLoadParameters params) {
#define CACHED_LOAD(Type) \
if (params.representation() == MachineType::Type() && \
params.order() == AtomicMemoryOrder::kSeqCst) { \
return &cache_.kWord32SeqCstLoad##Type; \
}
ATOMIC_TYPE_LIST(CACHED_LOAD)
#undef CACHED_LOAD
#define LOAD(Type) \
if (params.representation() == MachineType::Type()) { \
return zone_->New<Operator1<AtomicLoadParameters>>( \
IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kWord32AtomicLoad##Type; \
}
ATOMIC_TYPE_LIST(LOAD)
ATOMIC_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicStore(
AtomicStoreParameters params) {
#define CACHED_STORE(kRep) \
if (params.representation() == MachineRepresentation::kRep && \
params.order() == AtomicMemoryOrder::kSeqCst) { \
return &cache_.kWord32SeqCstStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
#undef CACHED_STORE
#define STORE(kRep) \
if (params.representation() == MachineRepresentation::kRep) { \
return zone_->New<Operator1<AtomicStoreParameters>>( \
IrOpcode::kWord32AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, params); \
MachineRepresentation rep) {
#define STORE(kRep) \
if (rep == MachineRepresentation::kRep) { \
return &cache_.kWord32AtomicStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(STORE)
ATOMIC_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
@ -1760,49 +1642,24 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
AtomicLoadParameters params) {
#define CACHED_LOAD(Type) \
if (params.representation() == MachineType::Type() && \
params.order() == AtomicMemoryOrder::kSeqCst) { \
return &cache_.kWord64SeqCstLoad##Type; \
}
ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
#undef CACHED_LOAD
#define LOAD(Type) \
if (params.representation() == MachineType::Type()) { \
return zone_->New<Operator1<AtomicLoadParameters>>( \
IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kWord64AtomicLoad##Type; \
}
ATOMIC_U64_TYPE_LIST(LOAD)
ATOMIC64_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicStore(
AtomicStoreParameters params) {
#define CACHED_STORE(kRep) \
if (params.representation() == MachineRepresentation::kRep && \
params.order() == AtomicMemoryOrder::kSeqCst) { \
return &cache_.kWord64SeqCstStore##kRep; \
}
ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
#undef CACHED_STORE
#define STORE(kRep) \
if (params.representation() == MachineRepresentation::kRep) { \
return zone_->New<Operator1<AtomicStoreParameters>>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, params); \
MachineRepresentation rep) {
#define STORE(kRep) \
if (rep == MachineRepresentation::kRep) { \
return &cache_.kWord64AtomicStore##kRep; \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
ATOMIC64_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
@ -1877,24 +1734,12 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad(
AtomicMemoryOrder order) {
if (order == AtomicMemoryOrder::kSeqCst) {
return &cache_.kWord32SeqCstPairLoad;
}
return zone_->New<Operator1<AtomicMemoryOrder>>(
IrOpcode::kWord32AtomicPairLoad, Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0, order);
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
return &cache_.kWord32AtomicPairLoad;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairStore(
AtomicMemoryOrder order) {
if (order == AtomicMemoryOrder::kSeqCst) {
return &cache_.kWord32SeqCstPairStore;
}
return zone_->New<Operator1<AtomicMemoryOrder>>(
IrOpcode::kWord32AtomicPairStore, Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairStore", 4, 1, 1, 0, 1, 0, order);
const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
return &cache_.kWord32AtomicPairStore;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
@ -2018,12 +1863,8 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
#undef ATOMIC_TYPE_LIST
#undef ATOMIC_U64_TYPE_LIST
#undef ATOMIC_U32_TYPE_LIST
#undef ATOMIC_TAGGED_TYPE_LIST
#undef ATOMIC64_TAGGED_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
#undef ATOMIC_TAGGED_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
#undef ATOMIC64_TAGGED_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
#undef LOAD_TRANSFORM_LIST

View File

@ -8,7 +8,6 @@
#include "src/base/compiler-specific.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/globals.h"
#include "src/compiler/write-barrier-kind.h"
@ -51,32 +50,6 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
// order.
class AtomicLoadParameters final {
public:
AtomicLoadParameters(LoadRepresentation representation,
AtomicMemoryOrder order)
: representation_(representation), order_(order) {}
LoadRepresentation representation() const { return representation_; }
AtomicMemoryOrder order() const { return order_; }
private:
LoadRepresentation representation_;
AtomicMemoryOrder order_;
};
V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
bool operator!=(AtomicLoadParameters, AtomicLoadParameters);
size_t hash_value(AtomicLoadParameters);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
enum class MemoryAccessKind {
kNormal,
kUnaligned,
@ -158,43 +131,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
class AtomicStoreParameters final {
public:
AtomicStoreParameters(MachineRepresentation representation,
WriteBarrierKind write_barrier_kind,
AtomicMemoryOrder order)
: store_representation_(representation, write_barrier_kind),
order_(order) {}
MachineRepresentation representation() const {
return store_representation_.representation();
}
WriteBarrierKind write_barrier_kind() const {
return store_representation_.write_barrier_kind();
}
AtomicMemoryOrder order() const { return order_; }
StoreRepresentation store_representation() const {
return store_representation_;
}
private:
StoreRepresentation store_representation_;
AtomicMemoryOrder order_;
};
V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
bool operator!=(AtomicStoreParameters, AtomicStoreParameters);
size_t hash_value(AtomicStoreParameters);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
AtomicStoreParameters);
V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
// An UnalignedStore needs a MachineType.
using UnalignedStoreRepresentation = MachineRepresentation;
@ -237,6 +173,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
Operator const* op) V8_WARN_UNUSED_RESULT;
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
V8_WARN_UNUSED_RESULT;
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
class S128ImmediateParameter {
@ -956,13 +895,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* MemBarrier();
// atomic-load [base + index]
const Operator* Word32AtomicLoad(AtomicLoadParameters params);
const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-load [base + index]
const Operator* Word64AtomicLoad(AtomicLoadParameters params);
const Operator* Word64AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
const Operator* Word32AtomicStore(AtomicStoreParameters params);
const Operator* Word32AtomicStore(MachineRepresentation rep);
// atomic-store [base + index], value
const Operator* Word64AtomicStore(AtomicStoreParameters params);
const Operator* Word64AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
@ -992,9 +931,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index]
const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
const Operator* Word32AtomicPairLoad();
// atomic-pair-sub [base + index], value_high, value-low
const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
const Operator* Word32AtomicPairStore();
// atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low

View File

@ -239,20 +239,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Atomic memory operations.
Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
DCHECK_NE(rep.representation().representation(),
MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicLoad(rep), base, index);
Node* AtomicLoad(MachineType type, Node* base, Node* index) {
DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
Node* AtomicLoad64(Node* base, Node* index) {
if (machine()->Is64()) {
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
return AddNode(machine()->Word64AtomicLoad(rep), base, index);
return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
index);
} else {
return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
return AddNode(machine()->Word32AtomicPairLoad(), base, index);
}
}
@ -262,24 +262,22 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#define VALUE_HALVES value, value_high
#endif
Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
DCHECK(!IsMapOffsetConstantMinusTag(index));
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicStore(params), base, index, value);
DCHECK_NE(rep, MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}
Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
Node* value, Node* value_high) {
Node* AtomicStore64(Node* base, Node* index, Node* value, Node* value_high) {
if (machine()->Is64()) {
DCHECK_NULL(value_high);
return AddNode(machine()->Word64AtomicStore(params), base, index, value);
return AddNode(
machine()->Word64AtomicStore(MachineRepresentation::kWord64), base,
index, value);
} else {
DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
params.representation() != MachineRepresentation::kTaggedSigned &&
params.representation() != MachineRepresentation::kTagged);
return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
index, VALUE_HALVES);
return AddNode(machine()->Word32AtomicPairStore(), base, index,
VALUE_HALVES);
}
}

View File

@ -5175,26 +5175,16 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const Operator* (MachineOperatorBuilder::*)(MachineType);
using OperatorByRep =
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
using OperatorByAtomicLoadRep =
const Operator* (MachineOperatorBuilder::*)(AtomicLoadParameters);
using OperatorByAtomicStoreRep =
const Operator* (MachineOperatorBuilder::*)(AtomicStoreParameters);
const Type type;
const MachineType machine_type;
const OperatorByType operator_by_type = nullptr;
const OperatorByRep operator_by_rep = nullptr;
const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
: type(t), machine_type(m), operator_by_rep(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
: type(t), machine_type(m), operator_by_atomic_load_params(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
: type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
// Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
@ -5303,21 +5293,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (info.type != AtomicOpInfo::kSpecial) {
const Operator* op;
if (info.operator_by_type) {
op = (mcgraph()->machine()->*info.operator_by_type)(info.machine_type);
} else if (info.operator_by_rep) {
op = (mcgraph()->machine()->*info.operator_by_rep)(
info.machine_type.representation());
} else if (info.operator_by_atomic_load_params) {
op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
} else {
op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
AtomicStoreParameters(info.machine_type.representation(),
WriteBarrierKind::kNoWriteBarrier,
AtomicMemoryOrder::kSeqCst));
}
const Operator* op =
info.operator_by_type
? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
: (mcgraph()->machine()->*info.operator_by_rep)(
info.machine_type.representation());
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
int num_actual_inputs = info.type;

View File

@ -89,7 +89,6 @@ v8_source_set("cctest_sources") {
"compiler/function-tester.cc",
"compiler/function-tester.h",
"compiler/node-observer-tester.h",
"compiler/test-atomic-load-store-codegen.cc",
"compiler/test-basic-block-profiler.cc",
"compiler/test-branch-combine.cc",
"compiler/test-calls-with-arraylike-or-spread.cc",

View File

@ -623,7 +623,6 @@
'codegen-tester/*': [SKIP],
'test-accessor-assembler/*': [SKIP],
'test-assembler-*': [SKIP],
'test-atomic-load-store-codegen/*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
'test-branch-combine/*': [SKIP],
'test-calls-with-arraylike-or-spread/*': [SKIP],

View File

@ -1,398 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
#include "src/base/bits.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
namespace v8 {
namespace internal {
namespace compiler {
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
#else
#error "Unknown Architecture"
#endif
#define TEST_ATOMIC_LOAD_INTEGER(ctype, itype, mach_type, order) \
do { \
ctype buffer[1]; \
\
RawMachineAssemblerTester<ctype> m; \
Node* base = m.PointerConstant(&buffer[0]); \
Node* index = m.Int32Constant(0); \
AtomicLoadParameters params(mach_type, order); \
if (mach_type.MemSize() == 8) { \
m.Return(m.AtomicLoad64(params, base, index)); \
} else { \
m.Return(m.AtomicLoad(params, base, index)); \
} \
\
FOR_INPUTS(ctype, itype, i) { \
buffer[0] = i; \
CHECK_EQ(i, m.Call()); \
} \
} while (false)
TEST(AcquireLoadInteger) {
TEST_ATOMIC_LOAD_INTEGER(int8_t, int8, MachineType::Int8(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_LOAD_INTEGER(uint8_t, uint8, MachineType::Uint8(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_LOAD_INTEGER(int16_t, int16, MachineType::Int16(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_LOAD_INTEGER(uint16_t, uint16, MachineType::Uint16(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_LOAD_INTEGER(int32_t, int32, MachineType::Int32(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_LOAD_INTEGER(uint32_t, uint32, MachineType::Uint32(),
AtomicMemoryOrder::kAcqRel);
#if V8_TARGET_ARCH_64_BIT
TEST_ATOMIC_LOAD_INTEGER(uint64_t, uint64, MachineType::Uint64(),
AtomicMemoryOrder::kAcqRel);
#endif
}
TEST(SeqCstLoadInteger) {
TEST_ATOMIC_LOAD_INTEGER(int8_t, int8, MachineType::Int8(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_LOAD_INTEGER(uint8_t, uint8, MachineType::Uint8(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_LOAD_INTEGER(int16_t, int16, MachineType::Int16(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_LOAD_INTEGER(uint16_t, uint16, MachineType::Uint16(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_LOAD_INTEGER(int32_t, int32, MachineType::Int32(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_LOAD_INTEGER(uint32_t, uint32, MachineType::Uint32(),
AtomicMemoryOrder::kSeqCst);
#if V8_TARGET_ARCH_64_BIT
TEST_ATOMIC_LOAD_INTEGER(uint64_t, uint64, MachineType::Uint64(),
AtomicMemoryOrder::kSeqCst);
#endif
}
namespace {
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
template <typename CType>
void CheckEq(CType in_value, CType out_value) {
CHECK_EQ(in_value, out_value);
}
#ifdef V8_COMPRESS_POINTERS
// Specializations for checking the result of compressing store.
template <>
void CheckEq<Object>(Object in_value, Object out_value) {
// Compare only lower 32-bits of the value because tagged load/stores are
// 32-bit operations anyway.
CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
static_cast<Tagged_t>(out_value.ptr()));
}
template <>
void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
return CheckEq<Object>(in_value, out_value);
}
template <>
void CheckEq<Smi>(Smi in_value, Smi out_value) {
return CheckEq<Object>(in_value, out_value);
}
#endif
template <typename TaggedT>
void InitBuffer(TaggedT* buffer, size_t length, MachineType type) {
const size_t kBufferSize = sizeof(TaggedT) * length;
// Tagged field loads require values to be properly tagged because of
// pointer decompression that may be happenning during load.
Isolate* isolate = CcTest::InitIsolateOnce();
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
if (type.IsTaggedSigned()) {
for (size_t i = 0; i < length; i++) {
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
} else {
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
if (!type.IsTaggedPointer()) {
// Also add some Smis if we are checking AnyTagged case.
for (size_t i = 0; i < length / 2; i++) {
smi_view[i] =
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
}
}
}
template <typename TaggedT>
void AtomicLoadTagged(MachineType type, AtomicMemoryOrder order) {
const int kNumElems = 16;
TaggedT buffer[kNumElems];
InitBuffer(buffer, kNumElems, type);
for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<TaggedT> m;
TaggedT* base_pointer = &buffer[0];
if (COMPRESS_POINTERS_BOOL) {
base_pointer = reinterpret_cast<TaggedT*>(LSB(base_pointer, kTaggedSize));
}
Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant(i * sizeof(buffer[0]));
AtomicLoadParameters params(type, order);
Node* load;
if (kTaggedSize == 8) {
load = m.AtomicLoad64(params, base, index);
} else {
load = m.AtomicLoad(params, base, index);
}
m.Return(load);
CheckEq<TaggedT>(buffer[i], m.Call());
}
}
} // namespace
TEST(AcquireLoadTagged) {
AtomicLoadTagged<Smi>(MachineType::TaggedSigned(),
AtomicMemoryOrder::kAcqRel);
AtomicLoadTagged<HeapObject>(MachineType::TaggedPointer(),
AtomicMemoryOrder::kAcqRel);
AtomicLoadTagged<Object>(MachineType::AnyTagged(),
AtomicMemoryOrder::kAcqRel);
}
TEST(SeqCstLoadTagged) {
AtomicLoadTagged<Smi>(MachineType::TaggedSigned(),
AtomicMemoryOrder::kSeqCst);
AtomicLoadTagged<HeapObject>(MachineType::TaggedPointer(),
AtomicMemoryOrder::kSeqCst);
AtomicLoadTagged<Object>(MachineType::AnyTagged(),
AtomicMemoryOrder::kSeqCst);
}
#define TEST_ATOMIC_STORE_INTEGER(ctype, itype, mach_type, order) \
do { \
ctype buffer[1]; \
buffer[0] = static_cast<ctype>(-1); \
\
BufferedRawMachineAssemblerTester<int32_t> m(mach_type); \
Node* value = m.Parameter(0); \
Node* base = m.PointerConstant(&buffer[0]); \
Node* index = m.Int32Constant(0); \
AtomicStoreParameters params(mach_type.representation(), kNoWriteBarrier, \
order); \
if (mach_type.MemSize() == 8) { \
m.AtomicStore64(params, base, index, value, nullptr); \
} else { \
m.AtomicStore(params, base, index, value); \
} \
\
int32_t OK = 0x29000; \
m.Return(m.Int32Constant(OK)); \
\
FOR_INPUTS(ctype, itype, i) { \
CHECK_EQ(OK, m.Call(i)); \
CHECK_EQ(i, buffer[0]); \
} \
} while (false)
TEST(ReleaseStoreInteger) {
TEST_ATOMIC_STORE_INTEGER(int8_t, int8, MachineType::Int8(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_STORE_INTEGER(uint8_t, uint8, MachineType::Uint8(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_STORE_INTEGER(int16_t, int16, MachineType::Int16(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_STORE_INTEGER(uint16_t, uint16, MachineType::Uint16(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_STORE_INTEGER(int32_t, int32, MachineType::Int32(),
AtomicMemoryOrder::kAcqRel);
TEST_ATOMIC_STORE_INTEGER(uint32_t, uint32, MachineType::Uint32(),
AtomicMemoryOrder::kAcqRel);
#if V8_TARGET_ARCH_64_BIT
TEST_ATOMIC_STORE_INTEGER(uint64_t, uint64, MachineType::Uint64(),
AtomicMemoryOrder::kAcqRel);
#endif
}
TEST(SeqCstStoreInteger) {
TEST_ATOMIC_STORE_INTEGER(int8_t, int8, MachineType::Int8(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_STORE_INTEGER(uint8_t, uint8, MachineType::Uint8(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_STORE_INTEGER(int16_t, int16, MachineType::Int16(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_STORE_INTEGER(uint16_t, uint16, MachineType::Uint16(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_STORE_INTEGER(int32_t, int32, MachineType::Int32(),
AtomicMemoryOrder::kSeqCst);
TEST_ATOMIC_STORE_INTEGER(uint32_t, uint32, MachineType::Uint32(),
AtomicMemoryOrder::kSeqCst);
#if V8_TARGET_ARCH_64_BIT
TEST_ATOMIC_STORE_INTEGER(uint64_t, uint64, MachineType::Uint64(),
AtomicMemoryOrder::kSeqCst);
#endif
}
namespace {
template <typename TaggedT>
void AtomicStoreTagged(MachineType type, AtomicMemoryOrder order) {
// This tests that tagged values are correctly transferred by atomic loads and
// stores from in_buffer to out_buffer. For each particular element in
// in_buffer, it is copied to a different index in out_buffer, and all other
// indices are zapped, to test instructions of the correct width are emitted.
const int kNumElems = 16;
TaggedT in_buffer[kNumElems];
TaggedT out_buffer[kNumElems];
uintptr_t zap_data[] = {kZapValue, kZapValue};
TaggedT zap_value;
STATIC_ASSERT(sizeof(TaggedT) <= sizeof(zap_data));
MemCopy(&zap_value, &zap_data, sizeof(TaggedT));
InitBuffer(in_buffer, kNumElems, type);
#ifdef V8_TARGET_BIG_ENDIAN
int offset = sizeof(TaggedT) - ElementSizeInBytes(type.representation());
#else
int offset = 0;
#endif
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* in_base = m.PointerConstant(in_buffer);
Node* in_index = m.IntPtrConstant(x * sizeof(TaggedT) + offset);
Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y * sizeof(TaggedT) + offset);
Node* load;
AtomicLoadParameters load_params(type, order);
AtomicStoreParameters store_params(type.representation(), kNoWriteBarrier,
order);
if (kTaggedSize == 4) {
load = m.AtomicLoad(load_params, in_base, in_index);
m.AtomicStore(store_params, out_base, out_index, load);
} else {
DCHECK(m.machine()->Is64());
load = m.AtomicLoad64(load_params, in_base, in_index);
m.AtomicStore64(store_params, out_base, out_index, load, nullptr);
}
m.Return(m.Int32Constant(OK));
for (int32_t z = 0; z < kNumElems; z++) {
out_buffer[z] = zap_value;
}
CHECK_NE(in_buffer[x], out_buffer[y]);
CHECK_EQ(OK, m.Call());
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
CheckEq<TaggedT>(in_buffer[x], out_buffer[y]);
for (int32_t z = 0; z < kNumElems; z++) {
if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
}
}
}
} // namespace
TEST(ReleaseStoreTagged) {
AtomicStoreTagged<Smi>(MachineType::TaggedSigned(),
AtomicMemoryOrder::kAcqRel);
AtomicStoreTagged<HeapObject>(MachineType::TaggedPointer(),
AtomicMemoryOrder::kAcqRel);
AtomicStoreTagged<Object>(MachineType::AnyTagged(),
AtomicMemoryOrder::kAcqRel);
}
TEST(SeqCstStoreTagged) {
AtomicStoreTagged<Smi>(MachineType::TaggedSigned(),
AtomicMemoryOrder::kSeqCst);
AtomicStoreTagged<HeapObject>(MachineType::TaggedPointer(),
AtomicMemoryOrder::kSeqCst);
AtomicStoreTagged<Object>(MachineType::AnyTagged(),
AtomicMemoryOrder::kSeqCst);
}
#if V8_TARGET_ARCH_32_BIT
namespace {
void TestAtomicPairLoadInteger(AtomicMemoryOrder order) {
uint64_t buffer[1];
uint32_t high;
uint32_t low;
BufferedRawMachineAssemblerTester<int32_t> m;
Node* base = m.PointerConstant(&buffer[0]);
Node* index = m.Int32Constant(0);
Node* pair_load = m.AtomicLoad64(
AtomicLoadParameters(MachineType::Uint64(), order), base, index);
m.StoreToPointer(&low, MachineRepresentation::kWord32,
m.Projection(0, pair_load));
m.StoreToPointer(&high, MachineRepresentation::kWord32,
m.Projection(1, pair_load));
int32_t OK = 0x29000;
m.Return(m.Int32Constant(OK));
FOR_UINT64_INPUTS(i) {
buffer[0] = i;
CHECK_EQ(OK, m.Call());
CHECK_EQ(i, make_uint64(high, low));
}
}
} // namespace
TEST(AcquirePairLoadInteger) {
TestAtomicPairLoadInteger(AtomicMemoryOrder::kAcqRel);
}
TEST(SeqCstPairLoadInteger) {
TestAtomicPairLoadInteger(AtomicMemoryOrder::kSeqCst);
}
namespace {
void TestAtomicPairStoreInteger(AtomicMemoryOrder order) {
uint64_t buffer[1];
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
Node* base = m.PointerConstant(&buffer[0]);
Node* index = m.Int32Constant(0);
m.AtomicStore64(AtomicStoreParameters(MachineRepresentation::kWord64,
kNoWriteBarrier, order),
base, index, m.Parameter(0), m.Parameter(1));
int32_t OK = 0x29000;
m.Return(m.Int32Constant(OK));
FOR_UINT64_INPUTS(i) {
CHECK_EQ(OK, m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
static_cast<uint32_t>(i >> 32)));
CHECK_EQ(i, buffer[0]);
}
}
} // namespace
TEST(ReleasePairStoreInteger) {
TestAtomicPairStoreInteger(AtomicMemoryOrder::kAcqRel);
}
TEST(SeqCstPairStoreInteger) {
TestAtomicPairStoreInteger(AtomicMemoryOrder::kSeqCst);
}
#endif // V8_TARGET_ARCH_32_BIT
} // namespace compiler
} // namespace internal
} // namespace v8