Revert "Reland "ARM64: Add NEON support""

This reverts commit 8faf3d6f25.

Reason: blocks roll https://codereview.chromium.org/2820753003/

TBR=martyn.capewell@arm.com,jarin@chromium.org,bmeurer@chromium.org,machenbach@chromium.org

NOTRY=true

Review-Url: https://codereview.chromium.org/2819093002
Cr-Commit-Position: refs/heads/master@{#44660}
This commit is contained in:
hablich 2017-04-15 03:27:17 -07:00 committed by Commit bot
parent a9e04c5ff1
commit c5aad5f284
41 changed files with 2621 additions and 30238 deletions

1
.gitignore vendored
View File

@ -46,7 +46,6 @@
/src/inspector/build/closure-compiler
/src/inspector/build/closure-compiler.tar.gz
/test/benchmarks/data
/test/cctest/traces-arm64
/test/fuzzer/wasm
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs

View File

@ -2132,7 +2132,6 @@ v8_source_set("v8_base") {
"src/arm64/macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/simulator-logic-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",

2
DEPS
View File

@ -37,8 +37,6 @@ deps = {
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/test/cctest/traces-arm64":
Var("chromium_url") + "/external/git.linaro.org/arm/vixl-simulator-traces.git" + "@" + "6168e7e1eec52c9cb0a62f87f94df0582dc48aa8",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17",
"v8/test/wasm-js":

View File

@ -57,15 +57,6 @@ inline int CPURegister::SizeInBytes() const {
return reg_size / 8;
}
inline bool CPURegister::Is8Bits() const {
DCHECK(IsValid());
return reg_size == 8;
}
inline bool CPURegister::Is16Bits() const {
DCHECK(IsValid());
return reg_size == 16;
}
inline bool CPURegister::Is32Bits() const {
DCHECK(IsValid());
@ -78,13 +69,9 @@ inline bool CPURegister::Is64Bits() const {
return reg_size == 64;
}
inline bool CPURegister::Is128Bits() const {
DCHECK(IsValid());
return reg_size == 128;
}
inline bool CPURegister::IsValid() const {
if (IsValidRegister() || IsValidVRegister()) {
if (IsValidRegister() || IsValidFPRegister()) {
DCHECK(!IsNone());
return true;
} else {
@ -100,14 +87,14 @@ inline bool CPURegister::IsValidRegister() const {
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
}
inline bool CPURegister::IsValidVRegister() const {
return IsVRegister() &&
((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) ||
(reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) ||
(reg_size == kQRegSizeInBits)) &&
(reg_code < kNumberOfVRegisters);
inline bool CPURegister::IsValidFPRegister() const {
return IsFPRegister() &&
((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
(reg_code < kNumberOfFPRegisters);
}
inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0.
DCHECK((reg_type != kNoRegister) || (reg_code == 0));
@ -133,7 +120,11 @@ inline bool CPURegister::IsRegister() const {
return reg_type == kRegister;
}
inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsFPRegister() const {
return reg_type == kFPRegister;
}
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size == other.reg_size) && (reg_type == other.reg_type);
@ -209,7 +200,7 @@ inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return csp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
DCHECK(code < kNumberOfRegisters);
return Register::Create(code, kXRegSizeInBits);
}
}
@ -219,40 +210,23 @@ inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wcsp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
DCHECK(code < kNumberOfRegisters);
return Register::Create(code, kWRegSizeInBits);
}
}
inline VRegister VRegister::BRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kBRegSizeInBits);
inline FPRegister FPRegister::SRegFromCode(unsigned code) {
DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kSRegSizeInBits);
}
inline VRegister VRegister::HRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kHRegSizeInBits);
inline FPRegister FPRegister::DRegFromCode(unsigned code) {
DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kDRegSizeInBits);
}
inline VRegister VRegister::SRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kSRegSizeInBits);
}
inline VRegister VRegister::DRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kDRegSizeInBits);
}
inline VRegister VRegister::QRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kQRegSizeInBits);
}
inline VRegister VRegister::VRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kVRegSizeInBits);
}
inline Register CPURegister::W() const {
DCHECK(IsValidRegister());
@ -265,34 +239,16 @@ inline Register CPURegister::X() const {
return Register::XRegFromCode(reg_code);
}
inline VRegister CPURegister::V() const {
DCHECK(IsValidVRegister());
return VRegister::VRegFromCode(reg_code);
inline FPRegister CPURegister::S() const {
DCHECK(IsValidFPRegister());
return FPRegister::SRegFromCode(reg_code);
}
inline VRegister CPURegister::B() const {
DCHECK(IsValidVRegister());
return VRegister::BRegFromCode(reg_code);
}
inline VRegister CPURegister::H() const {
DCHECK(IsValidVRegister());
return VRegister::HRegFromCode(reg_code);
}
inline VRegister CPURegister::S() const {
DCHECK(IsValidVRegister());
return VRegister::SRegFromCode(reg_code);
}
inline VRegister CPURegister::D() const {
DCHECK(IsValidVRegister());
return VRegister::DRegFromCode(reg_code);
}
inline VRegister CPURegister::Q() const {
DCHECK(IsValidVRegister());
return VRegister::QRegFromCode(reg_code);
inline FPRegister CPURegister::D() const {
DCHECK(IsValidFPRegister());
return FPRegister::DRegFromCode(reg_code);
}
@ -535,7 +491,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
DCHECK((addrmode == Offset) || (addrmode == PostIndex));
DCHECK(addrmode == Offset);
regoffset_ = offset.reg();
shift_ = offset.shift();
@ -921,20 +877,21 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x : LDR_w;
} else {
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kBRegSizeInBits:
return LDR_b;
case kHRegSizeInBits:
return LDR_h;
case kSRegSizeInBits:
return LDR_s;
case kDRegSizeInBits:
return LDR_d;
default:
DCHECK(rt.IsQ());
return LDR_q;
}
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDR_d : LDR_s;
}
}
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDP_x : LDP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDP_d : LDP_s;
}
}
@ -944,29 +901,11 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? STR_x : STR_w;
} else {
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kBRegSizeInBits:
return STR_b;
case kHRegSizeInBits:
return STR_h;
case kSRegSizeInBits:
return STR_s;
case kDRegSizeInBits:
return STR_d;
default:
DCHECK(rt.IsQ());
return STR_q;
}
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STR_d : STR_s;
}
}
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
LoadStorePairLBit);
}
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
@ -975,16 +914,8 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
if (rt.IsRegister()) {
return rt.Is64Bits() ? STP_x : STP_w;
} else {
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kSRegSizeInBits:
return STP_s;
case kDRegSizeInBits:
return STP_d;
default:
DCHECK(rt.IsQ());
return STP_q;
}
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STP_d : STP_s;
}
}
@ -993,7 +924,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
} else {
DCHECK(rt.IsVRegister());
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
}
}
@ -1177,8 +1108,9 @@ Instr Assembler::ImmLS(int imm9) {
return truncate_to_int9(imm9) << ImmLS_offset;
}
Instr Assembler::ImmLSPair(int imm7, unsigned size) {
DCHECK_EQ((imm7 >> size) << size, imm7);
Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
DCHECK(((imm7 >> size) << size) == imm7);
int scaled_imm7 = imm7 >> size;
DCHECK(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
@ -1220,17 +1152,10 @@ Instr Assembler::ImmBarrierType(int imm2) {
return imm2 << ImmBarrierType_offset;
}
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
unsigned size = static_cast<Instr>(op >> LSSize_offset);
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeLog2;
}
}
return size;
LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
return static_cast<LSDataSize>(op >> SizeLS_offset);
}
@ -1245,7 +1170,11 @@ Instr Assembler::ShiftMoveWide(int shift) {
return shift << ShiftMoveWide_offset;
}
Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
Instr Assembler::FPType(FPRegister fd) {
return fd.Is64Bits() ? FP64 : FP32;
}
Instr Assembler::FPScale(unsigned scale) {
DCHECK(is_uint6(scale));

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -147,8 +147,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
VRegister double_scratch, Label* slow,
Condition cond) {
FPRegister double_scratch,
Label* slow, Condition cond) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@ -292,9 +292,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// See call site for description.
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left,
Register right, VRegister left_d,
VRegister right_d, Label* slow,
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register left,
Register right,
FPRegister left_d,
FPRegister right_d,
Label* slow,
bool strict) {
DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) ||
@ -473,8 +476,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// In case 3, we have found out that we were dealing with a number-number
// comparison. The double values of the numbers have been loaded, right into
// rhs_d, left into lhs_d.
VRegister rhs_d = d0;
VRegister lhs_d = d1;
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
@ -610,7 +613,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CPURegList saved_regs = kCallerSaved;
CPURegList saved_fp_regs = kCallerSavedV;
CPURegList saved_fp_regs = kCallerSavedFP;
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@ -683,12 +686,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12));
Register saved_lr = x19;
VRegister result_double = d0;
VRegister base_double = d0;
VRegister exponent_double = d1;
VRegister base_double_copy = d2;
VRegister scratch1_double = d6;
VRegister scratch0_double = d7;
FPRegister result_double = d0;
FPRegister base_double = d0;
FPRegister exponent_double = d1;
FPRegister base_double_copy = d2;
FPRegister scratch1_double = d6;
FPRegister scratch0_double = d7;
// A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer;
@ -1646,8 +1649,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
Register result = x0;
Register rhs = x0;
Register lhs = x1;
VRegister rhs_d = d0;
VRegister lhs_d = d1;
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
@ -2106,7 +2109,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedV) {
saved_fp_regs_(kCallerSavedFP) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved

File diff suppressed because it is too large Load Diff

View File

@ -213,11 +213,6 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
DecodeNEONLoadStore(instr);
return;
}
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
@ -231,6 +226,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
} else {
V::VisitLoadStoreAcquireRelease(instr);
}
} else {
DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
@ -516,14 +513,16 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
DecodeNEONVectorDataProcessing(instr);
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bits(31, 30) == 0x3) {
if (instr->Bit(29) == 1) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeNEONScalarDataProcessing(instr);
} else {
if (instr->Bit(29) == 0) {
if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
@ -630,190 +629,25 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing3Source(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
}
}
template <typename V>
void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
DCHECK(instr->Bits(29, 25) == 0x6);
if (instr->Bit(31) == 0) {
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
V::VisitUnallocated(instr);
return;
}
if (instr->Bit(23) == 0) {
if (instr->Bits(20, 16) == 0) {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStruct(instr);
} else {
V::VisitNEONLoadStoreSingleStruct(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStructPostIndex(instr);
} else {
V::VisitNEONLoadStoreSingleStructPostIndex(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
V::VisitUnimplemented(instr);
}
template <typename V>
void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0x7);
if (instr->Bit(31) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONTable(instr);
} else {
V::VisitNEONPerm(instr);
}
} else {
V::VisitNEONExtract(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEON3Different(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEON2RegMisc(instr);
} else {
if (instr->Bits(30, 29) == 0x2) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONAcrossLanes(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEON3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Bits(22, 19) == 0) {
V::VisitNEONModifiedImmediate(instr);
} else {
V::VisitNEONShiftImmediate(instr);
}
} else {
V::VisitUnallocated(instr);
}
}
}
} else {
V::VisitUnallocated(instr);
}
}
template <typename V>
void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0xF);
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONScalarCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONScalar3Diff(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEONScalar2RegMisc(instr);
} else {
if (instr->Bit(29) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONScalarPairwise(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEONScalar3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONScalarByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
V::VisitNEONScalarShiftImmediate(instr);
} else {
V::VisitUnallocated(instr);
}
}
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
DCHECK(instr->Bits(27, 25) == 0x7);
V::VisitUnimplemented(instr);
}

View File

@ -16,72 +16,50 @@ namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(NEON2RegMisc) \
V(NEON3Different) \
V(NEON3Same) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONScalar2RegMisc) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(NEONPerm) \
V(Unallocated) \
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
@ -131,8 +109,6 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
void VisitNEONShiftImmediate(const Instruction* instr);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
@ -197,17 +173,12 @@ class Decoder : public V {
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeNEONLoadStore(Instruction* instr);
void DecodeAdvSIMDLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeNEONVectorDataProcessing(Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(Instruction* instr);
void DecodeAdvSIMDDataProcessing(Instruction* instr);
};

View File

@ -99,13 +99,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kVRegister, kSRegSizeInBits,
CPURegister::kFPRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,6 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
@ -30,13 +29,6 @@ class DisassemblingDecoder : public DecoderVisitor {
protected:
virtual void ProcessOutput(Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
virtual void AppendRegisterNameToOutput(const CPURegister& reg);
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);

View File

@ -21,7 +21,7 @@ bool Instruction::IsLoad() const {
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
@ -32,12 +32,8 @@ bool Instruction::IsLoad() const {
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_b:
case LDR_h:
case LDR_s:
case LDR_d:
case LDR_q:
return true;
case LDR_d: return true;
default: return false;
}
}
@ -52,18 +48,14 @@ bool Instruction::IsStore() const {
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_b:
case STR_h:
case STR_s:
case STR_d:
case STR_q:
return true;
case STR_d: return true;
default: return false;
}
}
@ -147,48 +139,43 @@ uint64_t Instruction::ImmLogical() {
return 0;
}
uint32_t Instruction::ImmNEONabcdefgh() const {
return ImmNEONabc() << 5 | ImmNEONdefgh();
float Instruction::ImmFP32() {
// ImmFP: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return rawbits_to_float(result);
}
float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }
double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
double Instruction::ImmFP64() {
// ImmFP: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
double Instruction::ImmNEONFP64() const {
return Imm8ToFP64(ImmNEONabcdefgh());
return rawbits_to_double(result);
}
unsigned CalcLSDataSize(LoadStoreOp op) {
DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
kInstructionSize * 8);
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeLog2;
}
}
return size;
}
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
switch (op) {
case STP_q:
case LDP_q:
return kQRegSizeLog2;
case STP_x:
case LDP_x:
case STP_d:
case LDP_d:
return kXRegSizeLog2;
default:
return kWRegSizeLog2;
case LDP_d: return LSDoubleWord;
default: return LSWord;
}
}
@ -347,416 +334,7 @@ uint64_t InstructionSequence::InlineData() const {
return payload;
}
VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
switch (vform) {
case kFormat8H:
return kFormat8B;
case kFormat4S:
return kFormat4H;
case kFormat2D:
return kFormat2S;
case kFormatH:
return kFormatB;
case kFormatS:
return kFormatH;
case kFormatD:
return kFormatS;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
switch (vform) {
case kFormat8B:
return kFormat8H;
case kFormat4H:
return kFormat4S;
case kFormat2S:
return kFormat2D;
case kFormatB:
return kFormatH;
case kFormatH:
return kFormatS;
case kFormatS:
return kFormatD;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatFillQ(VectorFormat vform) {
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return kFormat16B;
case kFormatH:
case kFormat4H:
case kFormat8H:
return kFormat8H;
case kFormatS:
case kFormat2S:
case kFormat4S:
return kFormat4S;
case kFormatD:
case kFormat1D:
case kFormat2D:
return kFormat2D;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
switch (vform) {
case kFormat4H:
return kFormat8B;
case kFormat8H:
return kFormat16B;
case kFormat2S:
return kFormat4H;
case kFormat4S:
return kFormat8H;
case kFormat1D:
return kFormat2S;
case kFormat2D:
return kFormat4S;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
switch (vform) {
case kFormat8B:
return kFormat16B;
case kFormat4H:
return kFormat8H;
case kFormat2S:
return kFormat4S;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
switch (vform) {
case kFormat16B:
return kFormat8B;
case kFormat8H:
return kFormat4H;
case kFormat4S:
return kFormat2S;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
switch (laneSize) {
case 8:
return kFormatB;
case 16:
return kFormatH;
case 32:
return kFormatS;
case 64:
return kFormatD;
default:
UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
}
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
return RegisterSizeInBitsFromFormat(vform) / 8;
}
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
return kBRegSizeInBits;
case kFormatH:
return kHRegSizeInBits;
case kFormatS:
return kSRegSizeInBits;
case kFormatD:
return kDRegSizeInBits;
case kFormat8B:
case kFormat4H:
case kFormat2S:
case kFormat1D:
return kDRegSizeInBits;
default:
return kQRegSizeInBits;
}
}
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 8;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 16;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 32;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 64;
default:
UNREACHABLE();
return 0;
}
}
int LaneSizeInBytesFromFormat(VectorFormat vform) {
return LaneSizeInBitsFromFormat(vform) / 8;
}
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 0;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 1;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 2;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 3;
default:
UNREACHABLE();
return 0;
}
}
int LaneCountFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormat16B:
return 16;
case kFormat8B:
case kFormat8H:
return 8;
case kFormat4H:
case kFormat4S:
return 4;
case kFormat2S:
case kFormat2D:
return 2;
case kFormat1D:
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return 1;
default:
UNREACHABLE();
return 0;
}
}
int MaxLaneCountFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 16;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 8;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 4;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 2;
default:
UNREACHABLE();
return 0;
}
}
// Does 'vform' indicate a vector format or a scalar format?
bool IsVectorFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return false;
default:
return true;
}
}
int64_t MaxIntFromFormat(VectorFormat vform) {
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
int64_t MinIntFromFormat(VectorFormat vform) {
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
}
uint64_t MaxUintFromFormat(VectorFormat vform) {
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(IntegerFormatMap());
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(format);
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(format0, format1);
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(format0, format1, format2);
}
void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
DCHECK_NOT_NULL(format0);
formats_[0] = format0;
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
}
void NEONFormatDecoder::SetFormatMap(unsigned index,
const NEONFormatMap* format) {
DCHECK_LT(index, arraysize(formats_));
DCHECK_NOT_NULL(format);
formats_[index] = format;
}
const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
}
const char* NEONFormatDecoder::Substitute(const char* string,
SubstitutionMode mode0,
SubstitutionMode mode1,
SubstitutionMode mode2) {
snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
GetSubstitute(1, mode1), GetSubstitute(2, mode2));
return form_buffer_;
}
const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
if ((instrbits_ & NEON_Q) != 0) {
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
return mne_buffer_;
}
return mnemonic;
}
VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
return GetVectorFormat(formats_[format_index]);
}
VectorFormat NEONFormatDecoder::GetVectorFormat(
const NEONFormatMap* format_map) {
static const VectorFormat vform[] = {
kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
kFormat2S, kFormat4S, kFormat1D, kFormat2D, kFormatB,
kFormatH, kFormatS, kFormatD};
DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
return vform[GetNEONFormat(format_map)];
}
const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
if (mode == kFormat) {
return NEONFormatAsString(GetNEONFormat(formats_[index]));
}
DCHECK_EQ(mode, kPlaceholder);
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}
NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
return format_map->map[PickBits(format_map->bits)];
}
const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
"2s", "4s", "1d", "2d", "b",
"h", "s", "d"};
DCHECK_LT(format, arraysize(formats));
return formats[format];
}
const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
(format == NF_D) || (format == NF_UNDEF));
static const char* formats[] = {
"undefined", "undefined", "undefined", "undefined", "undefined",
"undefined", "undefined", "undefined", "undefined", "'B",
"'H", "'S", "'D"};
return formats[format];
}
uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
uint8_t result = 0;
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
if (bits[b] == 0) break;
result <<= 1;
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
}
return result;
}
} // namespace internal
} // namespace v8

View File

@ -23,17 +23,13 @@ typedef uint32_t Instr;
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(ARM64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT16(name, value) extern const uint16_t name = value
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT16(name, value) extern const float16 name
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(ARM64_DEFINE_FP_STATICS)
DEFINE_FLOAT16(kFP16PositiveInfinity, 0x7c00);
DEFINE_FLOAT16(kFP16NegativeInfinity, 0xfc00);
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
@ -51,14 +47,19 @@ DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
// The default NaN values (for FPCR.DN=1).
DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
DEFINE_FLOAT16(kFP16DefaultNaN, 0x7e00);
#undef DEFINE_FLOAT16
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
LSWord = 2,
LSDoubleWord = 3
};
LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
@ -81,10 +82,9 @@ enum FPRounding {
FPNegativeInfinity = 0x2,
FPZero = 0x3,
// The final rounding modes are only available when explicitly specified by
// the instruction (such as with fcvta). They cannot be set in FPCR.
FPTieAway,
FPRoundOdd
// The final rounding mode is only available when explicitly specified by the
// instruction (such as with fcvta). It cannot be set in FPCR.
FPTieAway
};
enum Reg31Mode {
@ -152,29 +152,14 @@ class Instruction {
}
uint64_t ImmLogical();
unsigned ImmNEONabcdefgh() const;
float ImmFP32();
double ImmFP64();
float ImmNEONFP32() const;
double ImmNEONFP64() const;
unsigned SizeLS() const {
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
}
unsigned SizeLSPair() const {
LSDataSize SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
int NEONLSIndex(int access_size_shift) const {
int q = NEONQ();
int s = NEONS();
int size = NEONLSSize();
int index = (q << 3) | (s << 2) | size;
return index >> access_size_shift;
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
@ -196,33 +181,6 @@ class Instruction {
return BranchType() != UnknownBranchType;
}
static float Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return bit_cast<float>(result);
}
static double Imm8ToFP64(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return bit_cast<double>(result);
}
bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
@ -459,48 +417,6 @@ class Instruction {
void SetBranchImmTarget(Instruction* target);
};
// Functions for handling NEON vector format information.
enum VectorFormat {
kFormatUndefined = 0xffffffff,
kFormat8B = NEON_8B,
kFormat16B = NEON_16B,
kFormat4H = NEON_4H,
kFormat8H = NEON_8H,
kFormat2S = NEON_2S,
kFormat4S = NEON_4S,
kFormat1D = NEON_1D,
kFormat2D = NEON_2D,
// Scalar formats. We add the scalar bit to distinguish between scalar and
// vector enumerations; the bit is always set in the encoding of scalar ops
// and always clear for vector ops. Although kFormatD and kFormat1D appear
// to be the same, their meaning is subtly different. The first is a scalar
// operation, the second a vector operation that only affects one lane.
kFormatB = NEON_B | NEONScalar,
kFormatH = NEON_H | NEONScalar,
kFormatS = NEON_S | NEONScalar,
kFormatD = NEON_D | NEONScalar
};
VectorFormat VectorFormatHalfWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatHalfLanes(VectorFormat vform);
VectorFormat ScalarFormatFromLaneSize(int lanesize);
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatFillQ(VectorFormat vform);
VectorFormat ScalarFormatFromFormat(VectorFormat vform);
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
int LaneSizeInBytesFromFormat(VectorFormat vform);
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
int LaneCountFromFormat(VectorFormat vform);
int MaxLaneCountFromFormat(VectorFormat vform);
bool IsVectorFormat(VectorFormat vform);
int64_t MaxIntFromFormat(VectorFormat vform);
int64_t MinIntFromFormat(VectorFormat vform);
uint64_t MaxUintFromFormat(VectorFormat vform);
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
@ -588,7 +504,7 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize;
//
// For example:
//
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
// will print the registers and fp registers only once.
//
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
@ -601,201 +517,24 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize;
// stops tracing the registers.
const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
enum DebugParameters {
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_VREGS = 1 << 3, // Log NEON and floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_FP_REGS = 1 << 3, // Log floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
LOG_NONE = 0,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
// Trace control.
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_OVERRIDE = 3 << 6
};
enum NEONFormat {
NF_UNDEF = 0,
NF_8B = 1,
NF_16B = 2,
NF_4H = 3,
NF_8H = 4,
NF_2S = 5,
NF_4S = 6,
NF_1D = 7,
NF_2D = 8,
NF_B = 9,
NF_H = 10,
NF_S = 11,
NF_D = 12
};
static const unsigned kNEONFormatMaxBits = 6;
struct NEONFormatMap {
// The bit positions in the instruction to consider.
uint8_t bits[kNEONFormatMaxBits];
// Mapping from concatenated bits to format.
NEONFormat map[1 << kNEONFormatMaxBits];
};
class NEONFormatDecoder {
public:
enum SubstitutionMode { kPlaceholder, kFormat };
// Construct a format decoder with increasingly specific format maps for each
// substitution. If no format map is specified, the default is the integer
// format map.
explicit NEONFormatDecoder(const Instruction* instr);
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format);
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
const NEONFormatMap* format1);
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
const NEONFormatMap* format1, const NEONFormatMap* format2);
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1 = NULL,
const NEONFormatMap* format2 = NULL);
void SetFormatMap(unsigned index, const NEONFormatMap* format);
// Substitute %s in the input string with the placeholder string for each
// register, ie. "'B", "'H", etc.
const char* SubstitutePlaceholders(const char* string);
// Substitute %s in the input string with a new string based on the
// substitution mode.
const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
SubstitutionMode mode1 = kFormat,
SubstitutionMode mode2 = kFormat);
// Append a "2" to a mnemonic string based of the state of the Q bit.
const char* Mnemonic(const char* mnemonic);
VectorFormat GetVectorFormat(int format_index = 0);
VectorFormat GetVectorFormat(const NEONFormatMap* format_map);
// Built in mappings for common cases.
// The integer format map uses three bits (Q, size<1:0>) to encode the
// "standard" set of NEON integer vector formats.
static const NEONFormatMap* IntegerFormatMap() {
static const NEONFormatMap map = {
{23, 22, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The long integer format map uses two bits (size<1:0>) to encode the
// long set of NEON integer vector formats. These are used in narrow, wide
// and long operations.
static const NEONFormatMap* LongIntegerFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
return &map;
}
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
// formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap* FPFormatMap() {
// The FP format map assumes two bits (Q, size<0>) are used to encode the
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap map = {{22, 30},
{NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The load/store format map uses three bits (Q, 11, 10) to encode the
// set of NEON vector formats.
static const NEONFormatMap* LoadStoreFormatMap() {
static const NEONFormatMap map = {
{11, 10, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
return &map;
}
// The logical format map uses one bit (Q) to encode the NEON vector format:
// NF_8B, NF_16B.
static const NEONFormatMap* LogicalFormatMap() {
static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
return &map;
}
// The triangular format map uses between two and five bits to encode the NEON
// vector format:
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
static const NEONFormatMap* TriangularFormatMap() {
static const NEONFormatMap map = {
{19, 18, 17, 16, 30},
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
return &map;
}
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
// formats: NF_B, NF_H, NF_S, NF_D.
static const NEONFormatMap* ScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
return &map;
}
// The long scalar format map uses two bits (size<1:0>) to encode the longer
// NEON scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* LongScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
return &map;
}
// The FP scalar format map assumes one bit (size<0>) is used to encode the
// NEON FP scalar formats: NF_S, NF_D.
static const NEONFormatMap* FPScalarFormatMap() {
static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
return &map;
}
// The triangular scalar format map uses between one and four bits to encode
// the NEON FP scalar formats:
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
static const NEONFormatMap* TriangularScalarFormatMap() {
static const NEONFormatMap map = {
{19, 18, 17, 16},
{NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H,
NF_B, NF_S, NF_B, NF_H, NF_B}};
return &map;
}
private:
// Get a pointer to a string that represents the format or placeholder for
// the specified substitution index, based on the format map and instruction.
const char* GetSubstitute(int index, SubstitutionMode mode);
// Get the NEONFormat enumerated value for bits obtained from the
// instruction based on the specified format mapping.
NEONFormat GetNEONFormat(const NEONFormatMap* format_map);
// Convert a NEONFormat into a string.
static const char* NEONFormatAsString(NEONFormat format);
// Convert a NEONFormat into a register placeholder string.
static const char* NEONFormatAsPlaceholder(NEONFormat format);
// Select bits from instrbits_ defined by the bits array, concatenate them,
// and return the value.
uint8_t PickBits(const uint8_t bits[]);
Instr instrbits_;
const NEONFormatMap* formats_[3];
char form_buffer_[64];
char mne_buffer_[16];
};
} // namespace internal
} // namespace v8

View File

@ -377,7 +377,7 @@ void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreMask)) {
switch (instr->Mask(LoadStoreOpMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
@ -595,159 +595,6 @@ void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
counter->Increment();
}
void Instrument::VisitNEON2RegMisc(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Different(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Same(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONAcrossLanes(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONByIndexedElement(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONCopy(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONExtract(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONModifiedImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONPerm(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMisc(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Diff(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Same(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarByIndexedElement(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarCopy(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarPairwise(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarShiftImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONShiftImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONTable(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitUnallocated(Instruction* instr) {
Update();

View File

@ -547,34 +547,42 @@ void MacroAssembler::Extr(const Register& rd,
extr(rd, rn, rm, lsb);
}
void MacroAssembler::Fabs(const VRegister& fd, const VRegister& fn) {
void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
fabs(fd, fn);
}
void MacroAssembler::Fadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fadd(fd, fn, fm);
}
void MacroAssembler::Fccmp(const VRegister& fn, const VRegister& fm,
StatusFlags nzcv, Condition cond) {
void MacroAssembler::Fccmp(const FPRegister& fn,
const FPRegister& fm,
StatusFlags nzcv,
Condition cond) {
DCHECK(allow_macro_instructions_);
DCHECK((cond != al) && (cond != nv));
fccmp(fn, fm, nzcv, cond);
}
void MacroAssembler::Fcmp(const VRegister& fn, const VRegister& fm) {
void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fcmp(fn, fm);
}
void MacroAssembler::Fcmp(const VRegister& fn, double value) {
void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
DCHECK(allow_macro_instructions_);
if (value != 0.0) {
UseScratchRegisterScope temps(this);
VRegister tmp = temps.AcquireSameSizeAs(fn);
FPRegister tmp = temps.AcquireSameSizeAs(fn);
Fmov(tmp, value);
fcmp(fn, tmp);
} else {
@ -582,204 +590,271 @@ void MacroAssembler::Fcmp(const VRegister& fn, double value) {
}
}
void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn,
const VRegister& fm, Condition cond) {
void MacroAssembler::Fcsel(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
Condition cond) {
DCHECK(allow_macro_instructions_);
DCHECK((cond != al) && (cond != nv));
fcsel(fd, fn, fm, cond);
}
void MacroAssembler::Fcvt(const VRegister& fd, const VRegister& fn) {
void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
fcvt(fd, fn);
}
void MacroAssembler::Fcvtas(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtas(rd, fn);
}
void MacroAssembler::Fcvtau(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtau(rd, fn);
}
void MacroAssembler::Fcvtms(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtms(rd, fn);
}
void MacroAssembler::Fcvtmu(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtmu(rd, fn);
}
void MacroAssembler::Fcvtns(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtns(rd, fn);
}
void MacroAssembler::Fcvtnu(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtnu(rd, fn);
}
void MacroAssembler::Fcvtzs(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtzs(rd, fn);
}
void MacroAssembler::Fcvtzu(const Register& rd, const VRegister& fn) {
void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fcvtzu(rd, fn);
}
void MacroAssembler::Fdiv(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fdiv(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fdiv(fd, fn, fm);
}
void MacroAssembler::Fmadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa) {
void MacroAssembler::Fmadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
DCHECK(allow_macro_instructions_);
fmadd(fd, fn, fm, fa);
}
void MacroAssembler::Fmax(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fmax(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fmax(fd, fn, fm);
}
void MacroAssembler::Fmaxnm(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fmaxnm(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fmaxnm(fd, fn, fm);
}
void MacroAssembler::Fmin(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fmin(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fmin(fd, fn, fm);
}
void MacroAssembler::Fminnm(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fminnm(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fminnm(fd, fn, fm);
}
void MacroAssembler::Fmov(VRegister fd, VRegister fn) {
void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
DCHECK(allow_macro_instructions_);
// Only emit an instruction if fd and fn are different, and they are both D
// registers. fmov(s0, s0) is not a no-op because it clears the top word of
// d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
// top of q0, but VRegister does not currently support Q registers.
// top of q0, but FPRegister does not currently support Q registers.
if (!fd.Is(fn) || !fd.Is64Bits()) {
fmov(fd, fn);
}
}
void MacroAssembler::Fmov(VRegister fd, Register rn) {
void MacroAssembler::Fmov(FPRegister fd, Register rn) {
DCHECK(allow_macro_instructions_);
fmov(fd, rn);
}
void MacroAssembler::Fmov(VRegister vd, double imm) {
DCHECK(allow_macro_instructions_);
if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
Fmov(vd, static_cast<float>(imm));
void MacroAssembler::Fmov(FPRegister fd, double imm) {
DCHECK(allow_macro_instructions_);
if (fd.Is32Bits()) {
Fmov(fd, static_cast<float>(imm));
return;
}
DCHECK(vd.Is1D() || vd.Is2D());
DCHECK(fd.Is64Bits());
if (IsImmFP64(imm)) {
fmov(vd, imm);
fmov(fd, imm);
} else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
fmov(fd, xzr);
} else {
uint64_t bits = bit_cast<uint64_t>(imm);
if (vd.IsScalar()) {
if (bits == 0) {
fmov(vd, xzr);
} else {
Ldr(vd, imm);
}
} else {
// TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
Ldr(fd, imm);
}
}
void MacroAssembler::Fmov(VRegister vd, float imm) {
void MacroAssembler::Fmov(FPRegister fd, float imm) {
DCHECK(allow_macro_instructions_);
if (vd.Is1D() || vd.Is2D()) {
Fmov(vd, static_cast<double>(imm));
if (fd.Is64Bits()) {
Fmov(fd, static_cast<double>(imm));
return;
}
DCHECK(vd.Is1S() || vd.Is2S() || vd.Is4S());
DCHECK(fd.Is32Bits());
if (IsImmFP32(imm)) {
fmov(vd, imm);
fmov(fd, imm);
} else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
fmov(fd, wzr);
} else {
uint32_t bits = bit_cast<uint32_t>(imm);
if (vd.IsScalar()) {
if (bits == 0) {
fmov(vd, wzr);
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
// TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
Mov(tmp, bit_cast<uint32_t>(imm));
Fmov(vd, tmp);
}
} else {
// TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
// TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
Mov(tmp, float_to_rawbits(imm));
Fmov(fd, tmp);
}
}
void MacroAssembler::Fmov(Register rd, VRegister fn) {
void MacroAssembler::Fmov(Register rd, FPRegister fn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
fmov(rd, fn);
}
void MacroAssembler::Fmsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa) {
void MacroAssembler::Fmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
DCHECK(allow_macro_instructions_);
fmsub(fd, fn, fm, fa);
}
void MacroAssembler::Fmul(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Fmul(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fmul(fd, fn, fm);
}
void MacroAssembler::Fnmadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa) {
void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
fneg(fd, fn);
}
void MacroAssembler::Fnmadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
DCHECK(allow_macro_instructions_);
fnmadd(fd, fn, fm, fa);
}
void MacroAssembler::Fnmsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa) {
void MacroAssembler::Fnmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
DCHECK(allow_macro_instructions_);
fnmsub(fd, fn, fm, fa);
}
void MacroAssembler::Fsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frinta(fd, fn);
}
void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintm(fd, fn);
}
void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintn(fd, fn);
}
void MacroAssembler::Frintp(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintp(fd, fn);
}
void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
frintz(fd, fn);
}
void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
DCHECK(allow_macro_instructions_);
fsqrt(fd, fn);
}
void MacroAssembler::Fsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
DCHECK(allow_macro_instructions_);
fsub(fd, fn, fm);
}
@ -812,7 +887,7 @@ void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
DCHECK(allow_macro_instructions_);
DCHECK(rt.Is64Bits());
ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
ldr(rt, Immediate(double_to_rawbits(imm)));
}
@ -995,7 +1070,9 @@ void MacroAssembler::Sbfx(const Register& rd,
sbfx(rd, rn, lsb, width);
}
void MacroAssembler::Scvtf(const VRegister& fd, const Register& rn,
void MacroAssembler::Scvtf(const FPRegister& fd,
const Register& rn,
unsigned fbits) {
DCHECK(allow_macro_instructions_);
scvtf(fd, rn, fbits);
@ -1097,7 +1174,9 @@ void MacroAssembler::Ubfx(const Register& rd,
ubfx(rd, rn, lsb, width);
}
void MacroAssembler::Ucvtf(const VRegister& fd, const Register& rn,
void MacroAssembler::Ucvtf(const FPRegister& fd,
const Register& rn,
unsigned fbits) {
DCHECK(allow_macro_instructions_);
ucvtf(fd, rn, fbits);
@ -1239,7 +1318,9 @@ void MacroAssembler::SmiUntag(Register dst, Register src) {
void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src,
void MacroAssembler::SmiUntagToDouble(FPRegister dst,
Register src,
UntagMode mode) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
@ -1248,7 +1329,9 @@ void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src,
Scvtf(dst, src, kSmiShift);
}
void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src,
void MacroAssembler::SmiUntagToFloat(FPRegister dst,
Register src,
UntagMode mode) {
DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {

View File

@ -295,171 +295,6 @@ void MacroAssembler::Mov(const Register& rd,
}
}
void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xff);
int byte2 = ((imm >> 8) & 0xff);
if (byte1 == byte2) {
movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
} else if (byte1 == 0) {
movi(vd, byte2, LSL, 8);
} else if (byte2 == 0) {
movi(vd, byte1);
} else if (byte1 == 0xff) {
mvni(vd, ~byte2 & 0xff, LSL, 8);
} else if (byte2 == 0xff) {
mvni(vd, ~byte1 & 0xff);
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireW();
movz(temp, imm);
dup(vd, temp);
}
}
void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint32(imm));
uint8_t bytes[sizeof(imm)];
memcpy(bytes, &imm, sizeof(imm));
// All bytes are either 0x00 or 0xff.
{
bool all0orff = true;
for (int i = 0; i < 4; ++i) {
if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
all0orff = false;
break;
}
}
if (all0orff == true) {
movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
return;
}
}
// Of the 4 bytes, only one byte is non-zero.
for (int i = 0; i < 4; i++) {
if ((imm & (0xff << (i * 8))) == imm) {
movi(vd, bytes[i], LSL, i * 8);
return;
}
}
// Of the 4 bytes, only one byte is not 0xff.
for (int i = 0; i < 4; i++) {
uint32_t mask = ~(0xff << (i * 8));
if ((imm & mask) == mask) {
mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
return;
}
}
// Immediate is of the form 0x00MMFFFF.
if ((imm & 0xff00ffff) == 0x0000ffff) {
movi(vd, bytes[2], MSL, 16);
return;
}
// Immediate is of the form 0x0000MMFF.
if ((imm & 0xffff00ff) == 0x000000ff) {
movi(vd, bytes[1], MSL, 8);
return;
}
// Immediate is of the form 0xFFMM0000.
if ((imm & 0xff00ffff) == 0xff000000) {
mvni(vd, ~bytes[2] & 0xff, MSL, 16);
return;
}
// Immediate is of the form 0xFFFFMM00.
if ((imm & 0xffff00ff) == 0xffff0000) {
mvni(vd, ~bytes[1] & 0xff, MSL, 8);
return;
}
// Top and bottom 16-bits are equal.
if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
return;
}
// Default case.
{
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireW();
Mov(temp, imm);
dup(vd, temp);
}
}
void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
// All bytes are either 0x00 or 0xff.
{
bool all0orff = true;
for (int i = 0; i < 8; ++i) {
int byteval = (imm >> (i * 8)) & 0xff;
if (byteval != 0 && byteval != 0xff) {
all0orff = false;
break;
}
}
if (all0orff == true) {
movi(vd, imm);
return;
}
}
// Top and bottom 32-bits are equal.
if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
return;
}
// Default case.
{
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, imm);
if (vd.Is1D()) {
mov(vd.D(), 0, temp);
} else {
dup(vd.V2D(), temp);
}
}
}
void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
int shift_amount) {
DCHECK(allow_macro_instructions_);
if (shift_amount != 0 || shift != LSL) {
movi(vd, imm, shift, shift_amount);
} else if (vd.Is8B() || vd.Is16B()) {
// 8-bit immediate.
DCHECK(is_uint8(imm));
movi(vd, imm);
} else if (vd.Is4H() || vd.Is8H()) {
// 16-bit immediate.
Movi16bitHelper(vd, imm);
} else if (vd.Is2S() || vd.Is4S()) {
// 32-bit immediate.
Movi32bitHelper(vd, imm);
} else {
// 64-bit immediate.
Movi64bitHelper(vd, imm);
}
}
void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
// TODO(all): Move 128-bit values in a more efficient way.
DCHECK(vd.Is128Bits());
UseScratchRegisterScope temps(this);
Movi(vd.V2D(), lo);
Register temp = temps.AcquireX();
Mov(temp, hi);
Ins(vd.V2D(), 1, temp);
}
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions_);
@ -731,7 +566,7 @@ void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
int64_t offset = addr.offset();
unsigned size = CalcLSDataSize(op);
LSDataSize size = CalcLSDataSize(op);
// Check if an immediate offset fits in the immediate field of the
// appropriate instruction. If not, emit two instructions to perform
@ -766,7 +601,7 @@ void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
DCHECK(!addr.IsRegisterOffset());
int64_t offset = addr.offset();
unsigned size = CalcLSPairDataSize(op);
LSDataSize size = CalcLSPairDataSize(op);
// Check if the offset fits in the immediate field of the appropriate
// instruction. If not, emit two instructions to perform the operation.
@ -1094,7 +929,8 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
PopPostamble(count, size);
}
void MacroAssembler::Push(const Register& src0, const VRegister& src1) {
void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
PushPreamble(size);
@ -1561,8 +1397,9 @@ void MacroAssembler::AssertFPCRState(Register fpcr) {
}
}
void MacroAssembler::CanonicalizeNaN(const VRegister& dst,
const VRegister& src) {
void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
const FPRegister& src) {
AssertFPCRState();
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
@ -2214,8 +2051,10 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
VRegister scratch_d,
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
FPRegister value,
FPRegister scratch_d,
Label* on_successful_conversion,
Label* on_failed_conversion) {
// Convert to an int and back again, then compare with the original value.
@ -2829,14 +2668,14 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::ExitFramePreserveFPRegs() {
PushCPURegList(kCallerSavedV);
PushCPURegList(kCallerSavedFP);
}
void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
CPURegList saved_fp_regs = kCallerSavedV;
CPURegList saved_fp_regs = kCallerSavedFP;
DCHECK(saved_fp_regs.Count() % 2 == 0);
int offset = ExitFrameConstants::kLastExitFrameField;
@ -3315,7 +3154,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
if (!heap_number_map.IsValid()) {
// If we have a valid value register, use the same type of register to store
// the map so we can use STP to store both in one instruction.
if (value.IsValid() && value.IsVRegister()) {
if (value.IsValid() && value.IsFPRegister()) {
heap_number_map = temps.AcquireD();
} else {
heap_number_map = scratch1;
@ -3324,7 +3163,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
if (emit_debug_code()) {
Register map;
if (heap_number_map.IsVRegister()) {
if (heap_number_map.IsFPRegister()) {
map = scratch1;
Fmov(map, DoubleRegister(heap_number_map));
} else {
@ -3786,14 +3625,14 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
PushCPURegList(CPURegList(
CPURegister::kVRegister, kDRegSizeInBits,
CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopCPURegList(CPURegList(
CPURegister::kVRegister, kDRegSizeInBits,
CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
PopSafepointRegisters();
}
@ -4345,7 +4184,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
static const CPURegList kPCSVarargs =
CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
static const CPURegList kPCSVarargsFP =
CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1);
CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
// We can use caller-saved registers as scratch values, except for the
// arguments and the PCS registers where they might need to go.
@ -4354,7 +4193,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
tmp_list.Remove(kPCSVarargs);
tmp_list.Remove(arg0, arg1, arg2, arg3);
CPURegList fp_tmp_list = kCallerSavedV;
CPURegList fp_tmp_list = kCallerSavedFP;
fp_tmp_list.Remove(kPCSVarargsFP);
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
@ -4379,7 +4218,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
// We might only need a W register here. We need to know the size of the
// argument so we can properly encode it for the simulator call.
if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
} else if (args[i].IsVRegister()) {
} else if (args[i].IsFPRegister()) {
// In C, floats are always cast to doubles for varargs calls.
pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
} else {
@ -4401,8 +4240,8 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Mov(new_arg, old_arg);
args[i] = new_arg;
} else {
VRegister old_arg = VRegister(args[i]);
VRegister new_arg = temps.AcquireSameSizeAs(old_arg);
FPRegister old_arg = FPRegister(args[i]);
FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
Fmov(new_arg, old_arg);
args[i] = new_arg;
}
@ -4416,11 +4255,11 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
if (pcs[i].IsRegister()) {
Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
} else {
DCHECK(pcs[i].IsVRegister());
DCHECK(pcs[i].IsFPRegister());
if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
Fmov(VRegister(pcs[i]), VRegister(args[i]));
Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
} else {
Fcvt(VRegister(pcs[i]), VRegister(args[i]));
Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
}
}
}
@ -4508,11 +4347,11 @@ void MacroAssembler::Printf(const char * format,
// If csp is the stack pointer, PushCPURegList asserts that the size of each
// list is a multiple of 16 bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
PushCPURegList(kCallerSavedFP);
// We can use caller-saved registers as scratch values (except for argN).
CPURegList tmp_list = kCallerSaved;
CPURegList fp_tmp_list = kCallerSavedV;
CPURegList fp_tmp_list = kCallerSavedFP;
tmp_list.Remove(arg0, arg1, arg2, arg3);
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
TmpList()->set_list(tmp_list.list());
@ -4531,7 +4370,7 @@ void MacroAssembler::Printf(const char * format,
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
Add(arg_sp, StackPointer(),
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
@ -4555,7 +4394,7 @@ void MacroAssembler::Printf(const char * format,
}
}
PopCPURegList(kCallerSavedV);
PopCPURegList(kCallerSavedFP);
PopCPURegList(kCallerSaved);
TmpList()->set_list(old_tmp_list);
@ -4669,9 +4508,10 @@ Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
return Register::Create(code, reg.SizeInBits());
}
VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) {
FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
int code = AcquireNextAvailable(availablefp_).code();
return VRegister::Create(code, reg.SizeInBits());
return FPRegister::Create(code, reg.SizeInBits());
}

View File

@ -396,85 +396,88 @@ class MacroAssembler : public Assembler {
const Register& rn,
const Register& rm,
unsigned lsb);
inline void Fabs(const VRegister& fd, const VRegister& fn);
inline void Fadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
inline void Fabs(const FPRegister& fd, const FPRegister& fn);
inline void Fadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fccmp(const FPRegister& fn,
const FPRegister& fm,
StatusFlags nzcv,
Condition cond);
inline void Fcmp(const VRegister& fn, const VRegister& fm);
inline void Fcmp(const VRegister& fn, double value);
inline void Fcsel(const VRegister& fd, const VRegister& fn,
const VRegister& fm, Condition cond);
inline void Fcvt(const VRegister& fd, const VRegister& fn);
void Fcvtl(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions_);
fcvtl(vd, vn);
}
void Fcvtl2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions_);
fcvtl2(vd, vn);
}
void Fcvtn(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions_);
fcvtn(vd, vn);
}
void Fcvtn2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions_);
fcvtn2(vd, vn);
}
void Fcvtxn(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions_);
fcvtxn(vd, vn);
}
void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions_);
fcvtxn2(vd, vn);
}
inline void Fcvtas(const Register& rd, const VRegister& fn);
inline void Fcvtau(const Register& rd, const VRegister& fn);
inline void Fcvtms(const Register& rd, const VRegister& fn);
inline void Fcvtmu(const Register& rd, const VRegister& fn);
inline void Fcvtns(const Register& rd, const VRegister& fn);
inline void Fcvtnu(const Register& rd, const VRegister& fn);
inline void Fcvtzs(const Register& rd, const VRegister& fn);
inline void Fcvtzu(const Register& rd, const VRegister& fn);
inline void Fdiv(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fmadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa);
inline void Fmax(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fmin(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fminnm(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
inline void Fcmp(const FPRegister& fn, double value);
inline void Fcsel(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
Condition cond);
inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
inline void Fcvtas(const Register& rd, const FPRegister& fn);
inline void Fcvtau(const Register& rd, const FPRegister& fn);
inline void Fcvtms(const Register& rd, const FPRegister& fn);
inline void Fcvtmu(const Register& rd, const FPRegister& fn);
inline void Fcvtns(const Register& rd, const FPRegister& fn);
inline void Fcvtnu(const Register& rd, const FPRegister& fn);
inline void Fcvtzs(const Register& rd, const FPRegister& fn);
inline void Fcvtzu(const Register& rd, const FPRegister& fn);
inline void Fdiv(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fmadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa);
inline void Fmax(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fmaxnm(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fmin(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fminnm(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fmov(FPRegister fd, FPRegister fn);
inline void Fmov(FPRegister fd, Register rn);
// Provide explicit double and float interfaces for FP immediate moves, rather
// than relying on implicit C++ casts. This allows signalling NaNs to be
// preserved when the immediate matches the format of fd. Most systems convert
// signalling NaNs to quiet NaNs when converting between float and double.
inline void Fmov(VRegister fd, double imm);
inline void Fmov(VRegister fd, float imm);
inline void Fmov(FPRegister fd, double imm);
inline void Fmov(FPRegister fd, float imm);
// Provide a template to allow other types to be converted automatically.
template <typename T>
void Fmov(VRegister fd, T imm) {
template<typename T>
void Fmov(FPRegister fd, T imm) {
DCHECK(allow_macro_instructions_);
Fmov(fd, static_cast<double>(imm));
}
inline void Fmov(Register rd, VRegister fn);
inline void Fmsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa);
inline void Fmul(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fnmadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa);
inline void Fnmsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm, const VRegister& fa);
inline void Fsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Fmov(Register rd, FPRegister fn);
inline void Fmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa);
inline void Fmul(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Fneg(const FPRegister& fd, const FPRegister& fn);
inline void Fnmadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa);
inline void Fnmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa);
inline void Frinta(const FPRegister& fd, const FPRegister& fn);
inline void Frintm(const FPRegister& fd, const FPRegister& fn);
inline void Frintn(const FPRegister& fd, const FPRegister& fn);
inline void Frintp(const FPRegister& fd, const FPRegister& fn);
inline void Frintz(const FPRegister& fd, const FPRegister& fn);
inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
inline void Fsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm);
inline void Hint(SystemHint code);
inline void Hlt(int code);
inline void Isb();
@ -504,76 +507,6 @@ class MacroAssembler : public Assembler {
const Register& ra);
inline void Mul(const Register& rd, const Register& rn, const Register& rm);
inline void Nop() { nop(); }
void Dup(const VRegister& vd, const VRegister& vn, int index) {
DCHECK(allow_macro_instructions_);
dup(vd, vn, index);
}
void Dup(const VRegister& vd, const Register& rn) {
DCHECK(allow_macro_instructions_);
dup(vd, rn);
}
void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
int vn_index) {
DCHECK(allow_macro_instructions_);
ins(vd, vd_index, vn, vn_index);
}
void Ins(const VRegister& vd, int vd_index, const Register& rn) {
DCHECK(allow_macro_instructions_);
ins(vd, vd_index, rn);
}
void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
int vn_index) {
DCHECK(allow_macro_instructions_);
mov(vd, vd_index, vn, vn_index);
}
void Mov(const VRegister& vd, const VRegister& vn, int index) {
DCHECK(allow_macro_instructions_);
mov(vd, vn, index);
}
void Mov(const VRegister& vd, int vd_index, const Register& rn) {
DCHECK(allow_macro_instructions_);
mov(vd, vd_index, rn);
}
void Mov(const Register& rd, const VRegister& vn, int vn_index) {
DCHECK(allow_macro_instructions_);
mov(rd, vn, vn_index);
}
void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
int shift_amount = 0);
void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
const int shift_amount = 0) {
DCHECK(allow_macro_instructions_);
mvni(vd, imm8, shift, shift_amount);
}
void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
DCHECK(allow_macro_instructions_);
orr(vd, imm8, left_shift);
}
void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions_);
scvtf(vd, vn, fbits);
}
void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions_);
ucvtf(vd, vn, fbits);
}
void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions_);
fcvtzs(vd, vn, fbits);
}
void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions_);
fcvtzu(vd, vn, fbits);
}
void Smov(const Register& rd, const VRegister& vn, int vn_index) {
DCHECK(allow_macro_instructions_);
smov(rd, vn, vn_index);
}
void Umov(const Register& rd, const VRegister& vn, int vn_index) {
DCHECK(allow_macro_instructions_);
umov(rd, vn, vn_index);
}
inline void Rbit(const Register& rd, const Register& rn);
inline void Ret(const Register& xn = lr);
inline void Rev(const Register& rd, const Register& rn);
@ -589,7 +522,8 @@ class MacroAssembler : public Assembler {
const Register& rn,
unsigned lsb,
unsigned width);
inline void Scvtf(const VRegister& fd, const Register& rn,
inline void Scvtf(const FPRegister& fd,
const Register& rn,
unsigned fbits = 0);
inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
inline void Smaddl(const Register& rd,
@ -623,7 +557,8 @@ class MacroAssembler : public Assembler {
const Register& rn,
unsigned lsb,
unsigned width);
inline void Ucvtf(const VRegister& fd, const Register& rn,
inline void Ucvtf(const FPRegister& fd,
const Register& rn,
unsigned fbits = 0);
inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
inline void Umaddl(const Register& rd,
@ -638,516 +573,6 @@ class MacroAssembler : public Assembler {
inline void Uxth(const Register& rd, const Register& rn);
inline void Uxtw(const Register& rd, const Register& rn);
// NEON 3 vector register instructions.
#define NEON_3VREG_MACRO_LIST(V) \
V(add, Add) \
V(addhn, Addhn) \
V(addhn2, Addhn2) \
V(addp, Addp) \
V(and_, And) \
V(bic, Bic) \
V(bif, Bif) \
V(bit, Bit) \
V(bsl, Bsl) \
V(cmeq, Cmeq) \
V(cmge, Cmge) \
V(cmgt, Cmgt) \
V(cmhi, Cmhi) \
V(cmhs, Cmhs) \
V(cmtst, Cmtst) \
V(eor, Eor) \
V(fabd, Fabd) \
V(facge, Facge) \
V(facgt, Facgt) \
V(faddp, Faddp) \
V(fcmeq, Fcmeq) \
V(fcmge, Fcmge) \
V(fcmgt, Fcmgt) \
V(fmaxnmp, Fmaxnmp) \
V(fmaxp, Fmaxp) \
V(fminnmp, Fminnmp) \
V(fminp, Fminp) \
V(fmla, Fmla) \
V(fmls, Fmls) \
V(fmulx, Fmulx) \
V(frecps, Frecps) \
V(frsqrts, Frsqrts) \
V(mla, Mla) \
V(mls, Mls) \
V(mul, Mul) \
V(orn, Orn) \
V(pmul, Pmul) \
V(pmull, Pmull) \
V(pmull2, Pmull2) \
V(raddhn, Raddhn) \
V(raddhn2, Raddhn2) \
V(rsubhn, Rsubhn) \
V(rsubhn2, Rsubhn2) \
V(sqadd, Sqadd) \
V(sqdmlal, Sqdmlal) \
V(sqdmlal2, Sqdmlal2) \
V(sqdmulh, Sqdmulh) \
V(sqdmull, Sqdmull) \
V(sqdmull2, Sqdmull2) \
V(sqrdmulh, Sqrdmulh) \
V(sqrshl, Sqrshl) \
V(sqshl, Sqshl) \
V(sqsub, Sqsub) \
V(srhadd, Srhadd) \
V(srshl, Srshl) \
V(sshl, Sshl) \
V(ssubl, Ssubl) \
V(ssubl2, Ssubl2) \
V(ssubw, Ssubw) \
V(ssubw2, Ssubw2) \
V(sub, Sub) \
V(subhn, Subhn) \
V(subhn2, Subhn2) \
V(trn1, Trn1) \
V(trn2, Trn2) \
V(orr, Orr) \
V(saba, Saba) \
V(sabal, Sabal) \
V(sabal2, Sabal2) \
V(sabd, Sabd) \
V(sabdl, Sabdl) \
V(sabdl2, Sabdl2) \
V(saddl, Saddl) \
V(saddl2, Saddl2) \
V(saddw, Saddw) \
V(saddw2, Saddw2) \
V(shadd, Shadd) \
V(shsub, Shsub) \
V(smax, Smax) \
V(smaxp, Smaxp) \
V(smin, Smin) \
V(sminp, Sminp) \
V(smlal, Smlal) \
V(smlal2, Smlal2) \
V(smlsl, Smlsl) \
V(smlsl2, Smlsl2) \
V(smull, Smull) \
V(smull2, Smull2) \
V(sqdmlsl, Sqdmlsl) \
V(sqdmlsl2, Sqdmlsl2) \
V(uaba, Uaba) \
V(uabal, Uabal) \
V(uabal2, Uabal2) \
V(uabd, Uabd) \
V(uabdl, Uabdl) \
V(uabdl2, Uabdl2) \
V(uaddl, Uaddl) \
V(uaddl2, Uaddl2) \
V(uaddw, Uaddw) \
V(uaddw2, Uaddw2) \
V(uhadd, Uhadd) \
V(uhsub, Uhsub) \
V(umax, Umax) \
V(umin, Umin) \
V(umlsl, Umlsl) \
V(umlsl2, Umlsl2) \
V(umull, Umull) \
V(umull2, Umull2) \
V(umaxp, Umaxp) \
V(uminp, Uminp) \
V(umlal, Umlal) \
V(umlal2, Umlal2) \
V(uqadd, Uqadd) \
V(uqrshl, Uqrshl) \
V(uqshl, Uqshl) \
V(uqsub, Uqsub) \
V(urhadd, Urhadd) \
V(urshl, Urshl) \
V(ushl, Ushl) \
V(usubl, Usubl) \
V(usubl2, Usubl2) \
V(usubw, Usubw) \
V(usubw2, Usubw2) \
V(uzp1, Uzp1) \
V(uzp2, Uzp2) \
V(zip1, Zip1) \
V(zip2, Zip2)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
DCHECK(allow_macro_instructions_); \
ASM(vd, vn, vm); \
}
NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
int index) {
DCHECK(allow_macro_instructions_);
ext(vd, vn, vm, index);
}
// NEON 2 vector register instructions.
#define NEON_2VREG_MACRO_LIST(V) \
V(abs, Abs) \
V(addp, Addp) \
V(addv, Addv) \
V(cls, Cls) \
V(clz, Clz) \
V(cnt, Cnt) \
V(faddp, Faddp) \
V(fcvtas, Fcvtas) \
V(fcvtau, Fcvtau) \
V(fcvtms, Fcvtms) \
V(fcvtmu, Fcvtmu) \
V(fcvtns, Fcvtns) \
V(fcvtnu, Fcvtnu) \
V(fcvtps, Fcvtps) \
V(fcvtpu, Fcvtpu) \
V(fmaxnmp, Fmaxnmp) \
V(fmaxnmv, Fmaxnmv) \
V(fmaxv, Fmaxv) \
V(fminnmp, Fminnmp) \
V(fminnmv, Fminnmv) \
V(fminp, Fminp) \
V(fmaxp, Fmaxp) \
V(fminv, Fminv) \
V(fneg, Fneg) \
V(frecpe, Frecpe) \
V(frecpx, Frecpx) \
V(frinta, Frinta) \
V(frinti, Frinti) \
V(frintm, Frintm) \
V(frintn, Frintn) \
V(frintp, Frintp) \
V(frintx, Frintx) \
V(frintz, Frintz) \
V(frsqrte, Frsqrte) \
V(fsqrt, Fsqrt) \
V(mov, Mov) \
V(mvn, Mvn) \
V(neg, Neg) \
V(not_, Not) \
V(rbit, Rbit) \
V(rev16, Rev16) \
V(rev32, Rev32) \
V(rev64, Rev64) \
V(sadalp, Sadalp) \
V(saddlv, Saddlv) \
V(smaxv, Smaxv) \
V(sminv, Sminv) \
V(saddlp, Saddlp) \
V(sqabs, Sqabs) \
V(sqneg, Sqneg) \
V(sqxtn, Sqxtn) \
V(sqxtn2, Sqxtn2) \
V(sqxtun, Sqxtun) \
V(sqxtun2, Sqxtun2) \
V(suqadd, Suqadd) \
V(sxtl, Sxtl) \
V(sxtl2, Sxtl2) \
V(uadalp, Uadalp) \
V(uaddlp, Uaddlp) \
V(uaddlv, Uaddlv) \
V(umaxv, Umaxv) \
V(uminv, Uminv) \
V(uqxtn, Uqxtn) \
V(uqxtn2, Uqxtn2) \
V(urecpe, Urecpe) \
V(ursqrte, Ursqrte) \
V(usqadd, Usqadd) \
V(uxtl, Uxtl) \
V(uxtl2, Uxtl2) \
V(xtn, Xtn) \
V(xtn2, Xtn2)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn) { \
DCHECK(allow_macro_instructions_); \
ASM(vd, vn); \
}
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
// NEON 2 vector register with immediate instructions.
#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
V(fcmeq, Fcmeq) \
V(fcmge, Fcmge) \
V(fcmgt, Fcmgt) \
V(fcmle, Fcmle) \
V(fcmlt, Fcmlt)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
DCHECK(allow_macro_instructions_); \
ASM(vd, vn, imm); \
}
NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
DCHECK(allow_macro_instructions_);
bic(vd, imm8, left_shift);
}
void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions_);
cmeq(vd, vn, imm);
}
void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions_);
cmge(vd, vn, imm);
}
void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions_);
cmgt(vd, vn, imm);
}
void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions_);
cmle(vd, vn, imm);
}
void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions_);
cmlt(vd, vn, imm);
}
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmul, Fmul) \
V(fmla, Fmla) \
V(fmls, Fmls) \
V(fmulx, Fmulx) \
V(mul, Mul) \
V(mla, Mla) \
V(mls, Mls) \
V(sqdmulh, Sqdmulh) \
V(sqrdmulh, Sqrdmulh) \
V(sqdmull, Sqdmull) \
V(sqdmull2, Sqdmull2) \
V(sqdmlal, Sqdmlal) \
V(sqdmlal2, Sqdmlal2) \
V(sqdmlsl, Sqdmlsl) \
V(sqdmlsl2, Sqdmlsl2) \
V(smull, Smull) \
V(smull2, Smull2) \
V(smlal, Smlal) \
V(smlal2, Smlal2) \
V(smlsl, Smlsl) \
V(smlsl2, Smlsl2) \
V(umull, Umull) \
V(umull2, Umull2) \
V(umlal, Umlal) \
V(umlal2, Umlal2) \
V(umlsl, Umlsl) \
V(umlsl2, Umlsl2)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
int vm_index) { \
DCHECK(allow_macro_instructions_); \
ASM(vd, vn, vm, vm_index); \
}
NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
V(rshrn, Rshrn) \
V(rshrn2, Rshrn2) \
V(shl, Shl) \
V(shll, Shll) \
V(shll2, Shll2) \
V(shrn, Shrn) \
V(shrn2, Shrn2) \
V(sli, Sli) \
V(sqrshrn, Sqrshrn) \
V(sqrshrn2, Sqrshrn2) \
V(sqrshrun, Sqrshrun) \
V(sqrshrun2, Sqrshrun2) \
V(sqshl, Sqshl) \
V(sqshlu, Sqshlu) \
V(sqshrn, Sqshrn) \
V(sqshrn2, Sqshrn2) \
V(sqshrun, Sqshrun) \
V(sqshrun2, Sqshrun2) \
V(sri, Sri) \
V(srshr, Srshr) \
V(srsra, Srsra) \
V(sshll, Sshll) \
V(sshll2, Sshll2) \
V(sshr, Sshr) \
V(ssra, Ssra) \
V(uqrshrn, Uqrshrn) \
V(uqrshrn2, Uqrshrn2) \
V(uqshl, Uqshl) \
V(uqshrn, Uqshrn) \
V(uqshrn2, Uqshrn2) \
V(urshr, Urshr) \
V(ursra, Ursra) \
V(ushll, Ushll) \
V(ushll2, Ushll2) \
V(ushr, Ushr) \
V(usra, Usra)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
DCHECK(allow_macro_instructions_); \
ASM(vd, vn, shift); \
}
NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
void Ld1(const VRegister& vt, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld1(vt, src);
}
void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld1(vt, vt2, src);
}
void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld1(vt, vt2, vt3, src);
}
void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld1(vt, vt2, vt3, vt4, src);
}
void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld1(vt, lane, src);
}
void Ld1r(const VRegister& vt, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld1r(vt, src);
}
void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld2(vt, vt2, src);
}
void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld2(vt, vt2, lane, src);
}
void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld2r(vt, vt2, src);
}
void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld3(vt, vt2, vt3, src);
}
void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
int lane, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld3(vt, vt2, vt3, lane, src);
}
void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld3r(vt, vt2, vt3, src);
}
void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld4(vt, vt2, vt3, vt4, src);
}
void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, int lane, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld4(vt, vt2, vt3, vt4, lane, src);
}
void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, const MemOperand& src) {
DCHECK(allow_macro_instructions_);
ld4r(vt, vt2, vt3, vt4, src);
}
void St1(const VRegister& vt, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st1(vt, dst);
}
void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st1(vt, vt2, dst);
}
void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st1(vt, vt2, vt3, dst);
}
void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st1(vt, vt2, vt3, vt4, dst);
}
void St1(const VRegister& vt, int lane, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st1(vt, lane, dst);
}
void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st2(vt, vt2, dst);
}
void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st3(vt, vt2, vt3, dst);
}
void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st4(vt, vt2, vt3, vt4, dst);
}
void St2(const VRegister& vt, const VRegister& vt2, int lane,
const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st2(vt, vt2, lane, dst);
}
void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
int lane, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st3(vt, vt2, vt3, lane, dst);
}
void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
const VRegister& vt4, int lane, const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
st4(vt, vt2, vt3, vt4, lane, dst);
}
void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbl(vd, vn, vm);
}
void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbl(vd, vn, vn2, vm);
}
void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
const VRegister& vn3, const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbl(vd, vn, vn2, vn3, vm);
}
void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbl(vd, vn, vn2, vn3, vn4, vm);
}
void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbx(vd, vn, vm);
}
void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbx(vd, vn, vn2, vm);
}
void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
const VRegister& vn3, const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbx(vd, vn, vn2, vn3, vm);
}
void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
DCHECK(allow_macro_instructions_);
tbx(vd, vn, vn2, vn3, vn4, vm);
}
// Pseudo-instructions ------------------------------------------------------
// Compute rd = abs(rm).
@ -1198,7 +623,7 @@ class MacroAssembler : public Assembler {
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5 = NoReg,
const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
void Push(const Register& src0, const VRegister& src1);
void Push(const Register& src0, const FPRegister& src1);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
@ -1234,16 +659,16 @@ class MacroAssembler : public Assembler {
PopSizeRegList(regs, kWRegSizeInBits);
}
inline void PushDRegList(RegList regs) {
PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
}
inline void PopDRegList(RegList regs) {
PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
}
inline void PushSRegList(RegList regs) {
PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
}
inline void PopSRegList(RegList regs) {
PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
}
// Push the specified register 'count' times.
@ -1479,8 +904,10 @@ class MacroAssembler : public Assembler {
inline void InitializeRootRegister();
void AssertFPCRState(Register fpcr = NoReg);
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
void CanonicalizeNaN(const FPRegister& reg) {
CanonicalizeNaN(reg, reg);
}
// Load an object from the root table.
void LoadRoot(CPURegister destination,
@ -1530,9 +957,11 @@ class MacroAssembler : public Assembler {
inline void SmiTag(Register smi);
inline void SmiUntag(Register dst, Register src);
inline void SmiUntag(Register smi);
inline void SmiUntagToDouble(VRegister dst, Register src,
inline void SmiUntagToDouble(FPRegister dst,
Register src,
UntagMode mode = kNotSpeculativeUntag);
inline void SmiUntagToFloat(VRegister dst, Register src,
inline void SmiUntagToFloat(FPRegister dst,
Register src,
UntagMode mode = kNotSpeculativeUntag);
// Tag and push in one step.
@ -1614,8 +1043,9 @@ class MacroAssembler : public Assembler {
// are represented as 0 and handled as a success.
//
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt32(Register as_int, VRegister value,
VRegister scratch_d,
void TryRepresentDoubleAsInt32(Register as_int,
FPRegister value,
FPRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL) {
DCHECK(as_int.Is32Bits());
@ -1628,8 +1058,9 @@ class MacroAssembler : public Assembler {
// are represented as 0 and handled as a success.
//
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
VRegister scratch_d,
void TryRepresentDoubleAsInt64(Register as_int,
FPRegister value,
FPRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL) {
DCHECK(as_int.Is64Bits());
@ -1892,9 +1323,11 @@ class MacroAssembler : public Assembler {
// All registers are clobbered.
// If no heap_number_map register is provided, the function will take care of
// loading it.
void AllocateHeapNumber(Register result, Label* gc_required,
Register scratch1, Register scratch2,
CPURegister value = NoVReg,
void AllocateHeapNumber(Register result,
Label* gc_required,
Register scratch1,
Register scratch2,
CPURegister value = NoFPReg,
CPURegister heap_number_map = NoReg,
MutableMode mode = IMMUTABLE);
@ -2367,7 +1800,7 @@ class MacroAssembler : public Assembler {
// Like printf, but print at run-time from generated code.
//
// The caller must ensure that arguments for floating-point placeholders
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// (such as %e, %f or %g) are FPRegisters, and that arguments for integer
// placeholders are Registers.
//
// At the moment it is only possible to print the value of csp if it is the
@ -2461,10 +1894,6 @@ class MacroAssembler : public Assembler {
const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3);
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack (csp) must be prepared by the caller as for a normal
@ -2489,8 +1918,9 @@ class MacroAssembler : public Assembler {
// important it must be checked separately.
//
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt(Register as_int, VRegister value,
VRegister scratch_d,
void TryRepresentDoubleAsInt(Register as_int,
FPRegister value,
FPRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL);
@ -2610,8 +2040,8 @@ class UseScratchRegisterScope {
availablefp_(masm->FPTmpList()),
old_available_(available_->list()),
old_availablefp_(availablefp_->list()) {
DCHECK_EQ(available_->type(), CPURegister::kRegister);
DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
DCHECK(available_->type() == CPURegister::kRegister);
DCHECK(availablefp_->type() == CPURegister::kFPRegister);
}
~UseScratchRegisterScope();
@ -2620,15 +2050,15 @@ class UseScratchRegisterScope {
// automatically when the scope ends.
Register AcquireW() { return AcquireNextAvailable(available_).W(); }
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
Register UnsafeAcquire(const Register& reg) {
return Register(UnsafeAcquire(available_, reg));
}
Register AcquireSameSizeAs(const Register& reg);
VRegister AcquireSameSizeAs(const VRegister& reg);
FPRegister AcquireSameSizeAs(const FPRegister& reg);
private:
static CPURegister AcquireNextAvailable(CPURegList* available);
@ -2637,11 +2067,11 @@ class UseScratchRegisterScope {
// Available scratch registers.
CPURegList* available_; // kRegister
CPURegList* availablefp_; // kVRegister
CPURegList* availablefp_; // kFPRegister
// The state of the available lists at the start of this scope.
RegList old_available_; // kRegister
RegList old_availablefp_; // kVRegister
RegList old_availablefp_; // kFPRegister
};
MemOperand ContextMemOperand(Register context, int index = 0);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -12,78 +12,23 @@ namespace internal {
#define __ assm->
uint32_t float_sign(float val) {
uint32_t bits = bit_cast<uint32_t>(val);
return unsigned_bitextract_32(31, 31, bits);
}
uint32_t float_exp(float val) {
uint32_t bits = bit_cast<uint32_t>(val);
return unsigned_bitextract_32(30, 23, bits);
}
uint32_t float_mantissa(float val) {
uint32_t bits = bit_cast<uint32_t>(val);
return unsigned_bitextract_32(22, 0, bits);
}
uint32_t double_sign(double val) {
uint64_t bits = bit_cast<uint64_t>(val);
return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, bits));
}
uint32_t double_exp(double val) {
uint64_t bits = bit_cast<uint64_t>(val);
return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, bits));
}
uint64_t double_mantissa(double val) {
uint64_t bits = bit_cast<uint64_t>(val);
return unsigned_bitextract_64(51, 0, bits);
}
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
uint32_t bits = sign << kFloatExponentBits | exp;
return bit_cast<float>((bits << kFloatMantissaBits) | mantissa);
}
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
uint64_t bits = sign << kDoubleExponentBits | exp;
return bit_cast<double>((bits << kDoubleMantissaBits) | mantissa);
}
int float16classify(float16 value) {
const uint16_t exponent_max = (1 << kFloat16ExponentBits) - 1;
const uint16_t exponent_mask = exponent_max << kFloat16MantissaBits;
const uint16_t mantissa_mask = (1 << kFloat16MantissaBits) - 1;
const uint16_t exponent = (value & exponent_mask) >> kFloat16MantissaBits;
const uint16_t mantissa = value & mantissa_mask;
if (exponent == 0) {
if (mantissa == 0) {
return FP_ZERO;
}
return FP_SUBNORMAL;
} else if (exponent == exponent_max) {
if (mantissa == 0) {
return FP_INFINITE;
}
return FP_NAN;
}
return FP_NORMAL;
}
int CountLeadingZeros(uint64_t value, int width) {
DCHECK(base::bits::IsPowerOfTwo32(width) && (width <= 64));
if (value == 0) {
return width;
// TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
}
return base::bits::CountLeadingZeros64(value << (64 - width));
return count;
}
int CountLeadingSignBits(int64_t value, int width) {
DCHECK(base::bits::IsPowerOfTwo32(width) && (width <= 64));
// TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
@ -93,32 +38,43 @@ int CountLeadingSignBits(int64_t value, int width) {
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
if (width == 64) {
return static_cast<int>(base::bits::CountTrailingZeros64(value));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
}
return static_cast<int>(base::bits::CountTrailingZeros32(
static_cast<uint32_t>(value & 0xfffffffff)));
return count;
}
int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
DCHECK((width == 32) || (width == 64));
if (width == 64) {
return static_cast<int>(base::bits::CountPopulation64(value));
}
return static_cast<int>(base::bits::CountPopulation32(
static_cast<uint32_t>(value & 0xfffffffff)));
}
int LowestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
return CountTrailingZeros(value, 64) + 1;
}
// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));
int HighestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
return 63 - CountLeadingZeros(value, 64);
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return static_cast<int>(value);
}
@ -128,7 +84,7 @@ uint64_t LargestPowerOf2Divisor(uint64_t value) {
int MaskToBit(uint64_t mask) {
DCHECK_EQ(CountSetBits(mask, 64), 1);
DCHECK(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
}

View File

@ -8,7 +8,6 @@
#include <cmath>
#include "src/arm64/constants-arm64.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
@ -17,26 +16,40 @@ namespace internal {
STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
uint32_t float_sign(float val);
uint32_t float_exp(float val);
uint32_t float_mantissa(float val);
uint32_t double_sign(double val);
uint32_t double_exp(double val);
uint64_t double_mantissa(double val);
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
// An fpclassify() function for 16-bit half-precision floats.
int float16classify(float16 value);
static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
static inline double rawbits_to_double(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
int LowestSetBitPosition(uint64_t value);
int HighestSetBitPosition(uint64_t value);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
@ -73,7 +86,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = bit_cast<uint64_t>(num);
uint64_t raw = double_to_rawbits(num);
if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
return true;
}
@ -82,17 +95,13 @@ inline bool IsSignallingNaN(double num) {
inline bool IsSignallingNaN(float num) {
uint32_t raw = bit_cast<uint32_t>(num);
uint32_t raw = float_to_rawbits(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(float16 num) {
const uint16_t kFP16QuietNaNMask = 0x0200;
return (float16classify(num) == FP_NAN) && ((num & kFP16QuietNaNMask) == 0);
}
template <typename T>
inline bool IsQuietNaN(T num) {
@ -103,14 +112,13 @@ inline bool IsQuietNaN(T num) {
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
DCHECK(std::isnan(num));
return bit_cast<double>(bit_cast<uint64_t>(num) | kDQuietNanMask);
return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
}
inline float ToQuietNaN(float num) {
DCHECK(std::isnan(num));
return bit_cast<float>(bit_cast<uint32_t>(num) |
static_cast<uint32_t>(kSQuietNanMask));
return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
}

View File

@ -257,7 +257,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
// Convert FP-offsets to SP-offsets if it results in better code.
if (Assembler::IsImmLSUnscaled(from_sp) ||
Assembler::IsImmLSScaled(from_sp, 3)) {
Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
@ -1938,11 +1938,11 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
frame->AllocateSavedCalleeRegisterSlots(saved_count *
(kDoubleSize / kPointerSize));
}
@ -2014,11 +2014,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
__ PushCPURegList(saves_fp);
}
// Save registers.
@ -2044,7 +2044,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore fp registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
if (saves_fp.Count() != 0) {
__ PopCPURegList(saves_fp);
@ -2144,7 +2144,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination).S();
FPRegister dst = g.ToDoubleRegister(destination).S();
__ Fmov(dst, src.ToFloat32());
} else {
DCHECK(destination->IsFPStackSlot());
@ -2152,7 +2152,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Str(wzr, g.ToMemOperand(destination, masm()));
} else {
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireS();
FPRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, g.ToMemOperand(destination, masm()));
}
@ -2160,7 +2160,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src.ToFloat64());
} else {
DCHECK(destination->IsFPStackSlot());
@ -2168,16 +2168,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Str(xzr, g.ToMemOperand(destination, masm()));
} else {
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD();
FPRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64());
__ Str(temp, g.ToMemOperand(destination, masm()));
}
}
}
} else if (source->IsFPRegister()) {
VRegister src = g.ToDoubleRegister(source);
FPRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
@ -2190,7 +2190,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Ldr(g.ToDoubleRegister(destination), src);
} else {
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD();
FPRegister temp = scope.AcquireD();
__ Ldr(temp, src);
__ Str(temp, g.ToMemOperand(destination, masm()));
}
@ -2234,10 +2234,10 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Str(temp_1, src);
} else if (source->IsFPRegister()) {
UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD();
VRegister src = g.ToDoubleRegister(source);
FPRegister temp = scope.AcquireD();
FPRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(temp, src);
__ Fmov(src, dst);
__ Fmov(dst, temp);

View File

@ -103,13 +103,13 @@ class Arm64OperandGenerator final : public OperandGenerator {
case kArithmeticImm:
return Assembler::IsImmAddSub(value);
case kLoadStoreImm8:
return IsLoadStoreImmediate(value, 0);
return IsLoadStoreImmediate(value, LSByte);
case kLoadStoreImm16:
return IsLoadStoreImmediate(value, 1);
return IsLoadStoreImmediate(value, LSHalfword);
case kLoadStoreImm32:
return IsLoadStoreImmediate(value, 2);
return IsLoadStoreImmediate(value, LSWord);
case kLoadStoreImm64:
return IsLoadStoreImmediate(value, 3);
return IsLoadStoreImmediate(value, LSDoubleWord);
case kNoImmediate:
return false;
case kShift32Imm: // Fall through.
@ -130,7 +130,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
}
private:
bool IsLoadStoreImmediate(int64_t value, unsigned size) {
bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
return Assembler::IsImmLSScaled(value, size) ||
Assembler::IsImmLSUnscaled(value);
}

View File

@ -44,12 +44,14 @@ void DelayedMasm::Mov(const Register& rd,
__ Mov(rd, operand, discard_mode);
}
void DelayedMasm::Fmov(VRegister fd, VRegister fn) {
void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
EmitPending();
__ Fmov(fd, fn);
}
void DelayedMasm::Fmov(VRegister fd, double imm) {
void DelayedMasm::Fmov(FPRegister fd, double imm) {
EmitPending();
__ Fmov(fd, imm);
}

View File

@ -61,8 +61,8 @@ class DelayedMasm BASE_EMBEDDED {
inline void Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, double imm);
inline void Fmov(FPRegister fd, FPRegister fn);
inline void Fmov(FPRegister fd, double imm);
inline void LoadObject(Register result, Handle<Object> object);
// Instructions which try to merge which the pending instructions.
void StackSlotMove(LOperand* src, LOperand* dst);

View File

@ -179,9 +179,9 @@ class TestAndBranch : public BranchGenerator {
// Test the input and branch if it is non-zero and not a NaN.
class BranchIfNonZeroNumber : public BranchGenerator {
public:
BranchIfNonZeroNumber(LCodeGen* codegen, const VRegister& value,
const VRegister& scratch)
: BranchGenerator(codegen), value_(value), scratch_(scratch) {}
BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
const FPRegister& scratch)
: BranchGenerator(codegen), value_(value), scratch_(scratch) { }
virtual void Emit(Label* label) const {
__ Fabs(scratch_, value_);
@ -198,8 +198,8 @@ class BranchIfNonZeroNumber : public BranchGenerator {
}
private:
const VRegister& value_;
const VRegister& scratch_;
const FPRegister& value_;
const FPRegister& scratch_;
};
@ -547,7 +547,7 @@ void LCodeGen::SaveCallerDoubles() {
while (!iterator.Done()) {
// TODO(all): Is this supposed to save just the callee-saved doubles? It
// looks like it's saving all of them.
VRegister value = VRegister::from_code(iterator.Current());
FPRegister value = FPRegister::from_code(iterator.Current());
__ Poke(value, count * kDoubleSize);
iterator.Advance();
count++;
@ -565,7 +565,7 @@ void LCodeGen::RestoreCallerDoubles() {
while (!iterator.Done()) {
// TODO(all): Is this supposed to restore just the callee-saved doubles? It
// looks like it's restoring all of them.
VRegister value = VRegister::from_code(iterator.Current());
FPRegister value = FPRegister::from_code(iterator.Current());
__ Peek(value, count * kDoubleSize);
iterator.Advance();
count++;
@ -1135,7 +1135,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
(pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize -
StandardFrameConstants::kFixedFrameSizeAboveFp;
int jssp_offset = fp_offset + jssp_offset_to_fp;
if (masm()->IsImmLSScaled(jssp_offset, kPointerSizeLog2)) {
if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
return MemOperand(masm()->StackPointer(), jssp_offset);
}
}
@ -1274,10 +1274,11 @@ void LCodeGen::EmitTestAndBranch(InstrType instr,
EmitBranchGeneric(instr, branch);
}
template <class InstrType>
template<class InstrType>
void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
const VRegister& value,
const VRegister& scratch) {
const FPRegister& value,
const FPRegister& scratch) {
BranchIfNonZeroNumber branch(this, value, scratch);
EmitBranchGeneric(instr, branch);
}
@ -2278,7 +2279,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
DCHECK(instr->hydrogen()->representation().IsDouble());
VRegister object = ToDoubleRegister(instr->object());
FPRegister object = ToDoubleRegister(instr->object());
Register temp = ToRegister(instr->temp());
// If we don't have a NaN, we don't have the hole, so branch now to avoid the
@ -3275,7 +3276,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) {
DCHECK(access.IsInobject());
VRegister result = ToDoubleRegister(instr->result());
FPRegister result = ToDoubleRegister(instr->result());
__ Ldr(result, FieldMemOperand(object, offset));
return;
}
@ -3435,7 +3436,7 @@ void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
// The result is the magnitude (abs) of the smallest value a smi can
// represent, encoded as a double.
__ Mov(result_bits, bit_cast<uint64_t>(static_cast<double>(0x80000000)));
__ Mov(result_bits, double_to_rawbits(0x80000000));
__ B(deferred->allocation_entry());
__ Bind(deferred->exit());
@ -4977,7 +4978,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(access.IsInobject());
DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
VRegister value = ToDoubleRegister(instr->value());
FPRegister value = ToDoubleRegister(instr->value());
__ Str(value, FieldMemOperand(object, offset));
return;
}
@ -5015,7 +5016,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
VRegister value = ToDoubleRegister(instr->value());
FPRegister value = ToDoubleRegister(instr->value());
__ Str(value, FieldMemOperand(object, offset));
} else if (representation.IsSmi() &&
instr->hydrogen()->value()->representation().IsInteger32()) {

View File

@ -159,9 +159,10 @@ class LCodeGen: public LCodeGenBase {
const Register& value,
uint64_t mask);
template <class InstrType>
void EmitBranchIfNonZeroNumber(InstrType instr, const VRegister& value,
const VRegister& scratch);
template<class InstrType>
void EmitBranchIfNonZeroNumber(InstrType instr,
const FPRegister& value,
const FPRegister& scratch);
template<class InstrType>
void EmitBranchIfHeapNumber(InstrType instr,

View File

@ -68,7 +68,7 @@ class LGapResolver BASE_EMBEDDED {
// These two methods switch from one mode to the other.
void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); }
void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); }
const VRegister& SavedFPValueRegister() {
const FPRegister& SavedFPValueRegister() {
// We use the Crankshaft floating-point scratch register to break a cycle
// involving double values as the MacroAssembler will not need it for the
// operations performed by the gap resolver.

View File

@ -1468,7 +1468,6 @@
'arm64/macro-assembler-arm64-inl.h',
'arm64/simulator-arm64.cc',
'arm64/simulator-arm64.h',
'arm64/simulator-logic-arm64.cc',
'arm64/utils-arm64.cc',
'arm64/utils-arm64.h',
'arm64/eh-frame-arm64.cc',

View File

@ -227,9 +227,6 @@ v8_executable("cctest") {
"test-js-arm64-variables.cc",
"test-run-wasm-relocation-arm64.cc",
"test-simulator-arm64.cc",
"test-simulator-neon-arm64.cc",
"test-simulator-neon-inputs-arm64.h",
"test-simulator-neon-traces-arm64.h",
"test-utils-arm64.cc",
"test-utils-arm64.h",
]

View File

@ -257,9 +257,6 @@
'test-utils-arm64.cc',
'test-utils-arm64.h',
'test-assembler-arm64.cc',
'test-simulator-neon-arm64.cc',
'test-simulator-neon-inputs-arm64.h',
'test-simulator-neon-traces-arm64.h',
'test-code-stubs.cc',
'test-code-stubs.h',
'test-code-stubs-arm64.cc',

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,952 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file holds inputs for the instructions tested by test-simulator-a64.
//
#include <stdint.h>
// This header should only be used by test-simulator-arm64.cc, so it
// doesn't need the usual header guard.
#ifdef V8_TEST_SIMULATOR_INPUTS_ARM64_H_
#error This header should be included only once.
#endif
#define V8_TEST_SIMULATOR_INPUTS_ARM64_H_
// clang-format off
// Double values, stored as uint64_t representations. This ensures exact bit
// representation, and avoids the loss of NaNs and suchlike through C++ casts.
#define INPUT_DOUBLE_BASIC \
/* Simple values. */ \
/* 0.0 */ \
0x0000000000000000, \
/* The smallest normal value. */ \
0x0010000000000000, \
/* The value just below 0.5. */ \
0x3fdfffffffffffff, \
/* 0.5 */ \
0x3fe0000000000000, \
/* The value just above 0.5. */ \
0x3fe0000000000001, \
/* The value just below 1.0. */ \
0x3fefffffffffffff, \
/* 1.0 */ \
0x3ff0000000000000, \
/* The value just above 1.0. */ \
0x3ff0000000000001, \
/* 1.5 */ \
0x3ff8000000000000, \
/* 10 */ \
0x4024000000000000, \
/* The largest finite value. */ \
0x7fefffffffffffff, \
\
/* Infinity. */ \
0x7ff0000000000000, \
\
/* NaNs. */ \
/* - Quiet NaNs */ \
0x7ff923456789abcd, \
0x7ff8000000000000, \
/* - Signalling NaNs */ \
0x7ff123456789abcd, \
0x7ff0000000000000, \
\
/* Subnormals. */ \
/* - A recognisable bit pattern. */ \
0x000123456789abcd, \
/* - The largest subnormal value. */ \
0x000fffffffffffff, \
/* - The smallest subnormal value. */ \
0x0000000000000001, \
\
/* The same values again, but negated. */ \
0x8000000000000000, \
0x8010000000000000, \
0xbfdfffffffffffff, \
0xbfe0000000000000, \
0xbfe0000000000001, \
0xbfefffffffffffff, \
0xbff0000000000000, \
0xbff0000000000001, \
0xbff8000000000000, \
0xc024000000000000, \
0xffefffffffffffff, \
0xfff0000000000000, \
0xfff923456789abcd, \
0xfff8000000000000, \
0xfff123456789abcd, \
0xfff0000000000000, \
0x800123456789abcd, \
0x800fffffffffffff, \
0x8000000000000001,
// Extra inputs. Passing these to 3- or 2-op instructions makes the trace file
// very large, so these should only be used with 1-op instructions.
#define INPUT_DOUBLE_CONVERSIONS \
/* Values relevant for conversions to single-precision floats. */ \
0x47efffff00000000, \
/* - The smallest normalized float. */ \
0x3810000000000000, \
/* - Normal floats that need (ties-to-even) rounding. */ \
/* For normalized numbers, bit 29 (0x0000000020000000) is the */ \
/* lowest-order bit which will fit in the float's mantissa. */ \
0x3ff0000000000000, \
0x3ff0000000000001, \
0x3ff0000010000000, \
0x3ff0000010000001, \
0x3ff0000020000000, \
0x3ff0000020000001, \
0x3ff0000030000000, \
0x3ff0000030000001, \
0x3ff0000040000000, \
0x3ff0000040000001, \
0x3ff0000050000000, \
0x3ff0000050000001, \
0x3ff0000060000000, \
/* - A mantissa that overflows into the exponent during rounding. */ \
0x3feffffff0000000, \
/* - The largest double that rounds to a normal float. */ \
0x47efffffefffffff, \
/* - The smallest exponent that's too big for a float. */ \
0x47f0000000000000, \
/* - This exponent is in range, but the value rounds to infinity. */ \
0x47effffff0000000, \
/* - The largest double which is too small for a subnormal float. */ \
0x3690000000000000, \
/* - The largest subnormal float. */ \
0x380fffffc0000000, \
/* - The smallest subnormal float. */ \
0x36a0000000000000, \
/* - Subnormal floats that need (ties-to-even) rounding. */ \
/* For these subnormals, bit 34 (0x0000000400000000) is the */ \
/* lowest-order bit which will fit in the float's mantissa. */ \
0x37c159e000000000, \
0x37c159e000000001, \
0x37c159e200000000, \
0x37c159e200000001, \
0x37c159e400000000, \
0x37c159e400000001, \
0x37c159e600000000, \
0x37c159e600000001, \
0x37c159e800000000, \
0x37c159e800000001, \
0x37c159ea00000000, \
0x37c159ea00000001, \
0x37c159ec00000000, \
/* - The smallest double which rounds up to become a subnormal float. */ \
0x3690000000000001, \
\
/* The same values again, but negated. */ \
0xc7efffff00000000, \
0xb810000000000000, \
0xbff0000000000000, \
0xbff0000000000001, \
0xbff0000010000000, \
0xbff0000010000001, \
0xbff0000020000000, \
0xbff0000020000001, \
0xbff0000030000000, \
0xbff0000030000001, \
0xbff0000040000000, \
0xbff0000040000001, \
0xbff0000050000000, \
0xbff0000050000001, \
0xbff0000060000000, \
0xbfeffffff0000000, \
0xc7efffffefffffff, \
0xc7f0000000000000, \
0xc7effffff0000000, \
0xb690000000000000, \
0xb80fffffc0000000, \
0xb6a0000000000000, \
0xb7c159e000000000, \
0xb7c159e000000001, \
0xb7c159e200000000, \
0xb7c159e200000001, \
0xb7c159e400000000, \
0xb7c159e400000001, \
0xb7c159e600000000, \
0xb7c159e600000001, \
0xb7c159e800000000, \
0xb7c159e800000001, \
0xb7c159ea00000000, \
0xb7c159ea00000001, \
0xb7c159ec00000000, \
0xb690000000000001, \
\
/* Values relevant for conversions to integers (frint). */ \
\
/* - The lowest-order mantissa bit has value 1. */ \
0x4330000000000000, \
0x4330000000000001, \
0x4330000000000002, \
0x4330000000000003, \
0x433fedcba9876543, \
0x433ffffffffffffc, \
0x433ffffffffffffd, \
0x433ffffffffffffe, \
0x433fffffffffffff, \
/* - The lowest-order mantissa bit has value 0.5. */ \
0x4320000000000000, \
0x4320000000000001, \
0x4320000000000002, \
0x4320000000000003, \
0x432fedcba9876543, \
0x432ffffffffffffc, \
0x432ffffffffffffd, \
0x432ffffffffffffe, \
0x432fffffffffffff, \
/* - The lowest-order mantissa bit has value 0.25. */ \
0x4310000000000000, \
0x4310000000000001, \
0x4310000000000002, \
0x4310000000000003, \
0x431fedcba9876543, \
0x431ffffffffffffc, \
0x431ffffffffffffd, \
0x431ffffffffffffe, \
0x431fffffffffffff, \
\
/* The same values again, but negated. */ \
0xc330000000000000, \
0xc330000000000001, \
0xc330000000000002, \
0xc330000000000003, \
0xc33fedcba9876543, \
0xc33ffffffffffffc, \
0xc33ffffffffffffd, \
0xc33ffffffffffffe, \
0xc33fffffffffffff, \
0xc320000000000000, \
0xc320000000000001, \
0xc320000000000002, \
0xc320000000000003, \
0xc32fedcba9876543, \
0xc32ffffffffffffc, \
0xc32ffffffffffffd, \
0xc32ffffffffffffe, \
0xc32fffffffffffff, \
0xc310000000000000, \
0xc310000000000001, \
0xc310000000000002, \
0xc310000000000003, \
0xc31fedcba9876543, \
0xc31ffffffffffffc, \
0xc31ffffffffffffd, \
0xc31ffffffffffffe, \
0xc31fffffffffffff, \
\
/* Values relevant for conversions to integers (fcvt). */ \
0xc3e0000000000001, /* The value just below INT64_MIN. */ \
0xc3e0000000000000, /* INT64_MIN */ \
0xc3dfffffffffffff, /* The value just above INT64_MIN. */ \
0x43dfffffffffffff, /* The value just below INT64_MAX. */ \
/* INT64_MAX is not representable. */ \
0x43e0000000000000, /* The value just above INT64_MAX. */ \
\
0x43efffffffffffff, /* The value just below UINT64_MAX. */ \
/* UINT64_MAX is not representable. */ \
0x43f0000000000000, /* The value just above UINT64_MAX. */ \
\
0xc1e0000000200001, /* The value just below INT32_MIN - 1.0. */ \
0xc1e0000000200000, /* INT32_MIN - 1.0 */ \
0xc1e00000001fffff, /* The value just above INT32_MIN - 1.0. */ \
0xc1e0000000100001, /* The value just below INT32_MIN - 0.5. */ \
0xc1e0000000100000, /* INT32_MIN - 0.5 */ \
0xc1e00000000fffff, /* The value just above INT32_MIN - 0.5. */ \
0xc1e0000000000001, /* The value just below INT32_MIN. */ \
0xc1e0000000000000, /* INT32_MIN */ \
0xc1dfffffffffffff, /* The value just above INT32_MIN. */ \
0xc1dfffffffe00001, /* The value just below INT32_MIN + 0.5. */ \
0xc1dfffffffe00000, /* INT32_MIN + 0.5 */ \
0xc1dfffffffdfffff, /* The value just above INT32_MIN + 0.5. */ \
\
0x41dfffffff7fffff, /* The value just below INT32_MAX - 1.0. */ \
0x41dfffffff800000, /* INT32_MAX - 1.0 */ \
0x41dfffffff800001, /* The value just above INT32_MAX - 1.0. */ \
0x41dfffffff9fffff, /* The value just below INT32_MAX - 0.5. */ \
0x41dfffffffa00000, /* INT32_MAX - 0.5 */ \
0x41dfffffffa00001, /* The value just above INT32_MAX - 0.5. */ \
0x41dfffffffbfffff, /* The value just below INT32_MAX. */ \
0x41dfffffffc00000, /* INT32_MAX */ \
0x41dfffffffc00001, /* The value just above INT32_MAX. */ \
0x41dfffffffdfffff, /* The value just below INT32_MAX + 0.5. */ \
0x41dfffffffe00000, /* INT32_MAX + 0.5 */ \
0x41dfffffffe00001, /* The value just above INT32_MAX + 0.5. */ \
\
0x41efffffffbfffff, /* The value just below UINT32_MAX - 1.0. */ \
0x41efffffffc00000, /* UINT32_MAX - 1.0 */ \
0x41efffffffc00001, /* The value just above UINT32_MAX - 1.0. */ \
0x41efffffffcfffff, /* The value just below UINT32_MAX - 0.5. */ \
0x41efffffffd00000, /* UINT32_MAX - 0.5 */ \
0x41efffffffd00001, /* The value just above UINT32_MAX - 0.5. */ \
0x41efffffffdfffff, /* The value just below UINT32_MAX. */ \
0x41efffffffe00000, /* UINT32_MAX */ \
0x41efffffffe00001, /* The value just above UINT32_MAX. */ \
0x41efffffffefffff, /* The value just below UINT32_MAX + 0.5. */ \
0x41effffffff00000, /* UINT32_MAX + 0.5 */ \
0x41effffffff00001, /* The value just above UINT32_MAX + 0.5. */
// Float values, stored as uint32_t representations. This ensures exact bit
// representation, and avoids the loss of NaNs and suchlike through C++ casts.
#define INPUT_FLOAT_BASIC \
/* Simple values. */ \
0x00000000, /* 0.0 */ \
0x00800000, /* The smallest normal value. */ \
0x3effffff, /* The value just below 0.5. */ \
0x3f000000, /* 0.5 */ \
0x3f000001, /* The value just above 0.5. */ \
0x3f7fffff, /* The value just below 1.0. */ \
0x3f800000, /* 1.0 */ \
0x3f800001, /* The value just above 1.0. */ \
0x3fc00000, /* 1.5 */ \
0x41200000, /* 10 */ \
0x7f8fffff, /* The largest finite value. */ \
\
/* Infinity. */ \
0x7f800000, \
\
/* NaNs. */ \
/* - Quiet NaNs */ \
0x7fd23456, \
0x7fc00000, \
/* - Signalling NaNs */ \
0x7f923456, \
0x7f800001, \
\
/* Subnormals. */ \
/* - A recognisable bit pattern. */ \
0x00123456, \
/* - The largest subnormal value. */ \
0x007fffff, \
/* - The smallest subnormal value. */ \
0x00000001, \
\
/* The same values again, but negated. */ \
0x80000000, \
0x80800000, \
0xbeffffff, \
0xbf000000, \
0xbf000001, \
0xbf7fffff, \
0xbf800000, \
0xbf800001, \
0xbfc00000, \
0xc1200000, \
0xff8fffff, \
0xff800000, \
0xffd23456, \
0xffc00000, \
0xff923456, \
0xff800001, \
0x80123456, \
0x807fffff, \
0x80000001,
// Extra inputs. Passing these to 3- or 2-op instructions makes the trace file
// very large, so these should only be used with 1-op instructions.
#define INPUT_FLOAT_CONVERSIONS \
/* Values relevant for conversions to integers (frint). */ \
/* - The lowest-order mantissa bit has value 1. */ \
0x4b000000, \
0x4b000001, \
0x4b000002, \
0x4b000003, \
0x4b765432, \
0x4b7ffffc, \
0x4b7ffffd, \
0x4b7ffffe, \
0x4b7fffff, \
/* - The lowest-order mantissa bit has value 0.5. */ \
0x4a800000, \
0x4a800001, \
0x4a800002, \
0x4a800003, \
0x4af65432, \
0x4afffffc, \
0x4afffffd, \
0x4afffffe, \
0x4affffff, \
/* - The lowest-order mantissa bit has value 0.25. */ \
0x4a000000, \
0x4a000001, \
0x4a000002, \
0x4a000003, \
0x4a765432, \
0x4a7ffffc, \
0x4a7ffffd, \
0x4a7ffffe, \
0x4a7fffff, \
\
/* The same values again, but negated. */ \
0xcb000000, \
0xcb000001, \
0xcb000002, \
0xcb000003, \
0xcb765432, \
0xcb7ffffc, \
0xcb7ffffd, \
0xcb7ffffe, \
0xcb7fffff, \
0xca800000, \
0xca800001, \
0xca800002, \
0xca800003, \
0xcaf65432, \
0xcafffffc, \
0xcafffffd, \
0xcafffffe, \
0xcaffffff, \
0xca000000, \
0xca000001, \
0xca000002, \
0xca000003, \
0xca765432, \
0xca7ffffc, \
0xca7ffffd, \
0xca7ffffe, \
0xca7fffff, \
\
/* Values relevant for conversions to integers (fcvt). */ \
0xdf000001, /* The value just below INT64_MIN. */ \
0xdf000000, /* INT64_MIN */ \
0xdeffffff, /* The value just above INT64_MIN. */ \
0x5effffff, /* The value just below INT64_MAX. */ \
/* INT64_MAX is not representable. */ \
0x5f000000, /* The value just above INT64_MAX. */ \
\
0x5f7fffff, /* The value just below UINT64_MAX. */ \
/* UINT64_MAX is not representable. */ \
0x5f800000, /* The value just above UINT64_MAX. */ \
\
0xcf000001, /* The value just below INT32_MIN. */ \
0xcf000000, /* INT32_MIN */ \
0xceffffff, /* The value just above INT32_MIN. */ \
0x4effffff, /* The value just below INT32_MAX. */ \
/* INT32_MAX is not representable. */ \
0x4f000000, /* The value just above INT32_MAX. */
#define INPUT_32BITS_FIXEDPOINT_CONVERSIONS \
0x00000000, \
0x00000001, \
0x00800000, \
0x00800001, \
0x00876543, \
0x01000000, \
0x01000001, \
0x01800000, \
0x01800001, \
0x02000000, \
0x02000001, \
0x02800000, \
0x02800001, \
0x03000000, \
0x40000000, \
0x7fffff80, \
0x7fffffc0, \
0x7fffffff, \
0x80000000, \
0x80000100, \
0xffffff00, \
0xffffff80, \
0xffffffff, \
0xffffffff
#define INPUT_64BITS_FIXEDPOINT_CONVERSIONS \
0x0000000000000000, \
0x0000000000000001, \
0x0000000040000000, \
0x0000000100000000, \
0x4000000000000000, \
0x4000000000000400, \
0x000000007fffffff, \
0x00000000ffffffff, \
0x0000000080000000, \
0x0000000080000001, \
0x7ffffffffffffc00, \
0x0123456789abcde0, \
0x0000000012345678, \
0xffffffffc0000000, \
0xffffffff00000000, \
0xc000000000000000, \
0x1000000000000000, \
0x1000000000000001, \
0x1000000000000080, \
0x1000000000000081, \
0x1000000000000100, \
0x1000000000000101, \
0x1000000000000180, \
0x1000000000000181, \
0x1000000000000200, \
0x1000000000000201, \
0x1000000000000280, \
0x1000000000000281, \
0x1000000000000300, \
0x8000000000000000, \
0x8000000000000001, \
0x8000000000000200, \
0x8000000000000201, \
0x8000000000000400, \
0x8000000000000401, \
0x8000000000000600, \
0x8000000000000601, \
0x8000000000000800, \
0x8000000000000801, \
0x8000000000000a00, \
0x8000000000000a01, \
0x8000000000000c00, \
0x7ffffffffffffe00, \
0x7fffffffffffffff, \
0xfffffffffffffc00, \
0xffffffffffffffff
// Float16 - Basic test values.
#define INPUT_FLOAT16_BASIC \
0x3c00, /* 1 0 01111 0000000000 */ \
0x3c01, /* Next smallest float after 1. 0 01111 0000000001 */ \
0xc000, /* -2 1 10000 0000000000 */ \
0x7bff, /* Maximum in half precision. 0 11110 1111111111 */ \
0x0400, /* Minimum positive normal. 0 00001 0000000000 */ \
0x03ff, /* Maximum subnormal. 0 00000 1111111111 */ \
0x0001, /* Minimum positive subnormal. 0 00000 0000000001 */ \
0x0000, /* 0 0 00000 0000000000 */ \
0x8000, /* -0 1 00000 0000000000 */ \
0x7c00, /* inf 0 11111 0000000000 */ \
0xfc00, /* -inf 1 11111 0000000000 */ \
0x3555, /* 1/3 0 01101 0101010101 */ \
0x3e00, /* 1.5 0 01111 1000000000 */ \
0x4900, /* 10 0 10010 0100000000 */ \
0xbe00, /* -1.5 1 01111 1000000000 */ \
0xc900, /* -10 1 10010 0100000000 */ \
// Float16 - Conversion test values.
// Note the second column in the comments shows what the value might
// look like if represented in single precision (32 bit) floating point format.
#define INPUT_FLOAT16_CONVERSIONS \
0x37ff, /* 0.4999999701976776 0x3effffff f16: 0 01101 1111111111 */ \
0x3800, /* 0.4999999701976776 0x3effffff f16: 0 01110 0000000000 */ \
0x3801, /* 0.5000000596046448 0x3f000001 f16: 0 01110 0000000001 */ \
0x3bff, /* 0.9999999403953552 0x3f7fffff f16: 0 01110 1111111111 */ \
0x7c7f, /* nan 0x7f8fffff f16: 0 11111 0001111111 */ \
0x7e91, /* nan 0x7fd23456 f16: 0 11111 1010010001 */ \
0x7e00, /* nan 0x7fc00000 f16: 0 11111 1000000000 */ \
0x7c91, /* nan 0x7f923456 f16: 0 11111 0010010001 */ \
0x8001, /* -1.175494350822288e-38 0x80800000 f16: 1 00000 0000000001 */ \
0xb7ff, /* -0.4999999701976776 0xbeffffff f16: 1 01101 1111111111 */ \
0xb800, /* -0.4999999701976776 0xbeffffff f16: 1 01110 0000000000 */ \
0xb801, /* -0.5000000596046448 0xbf000001 f16: 1 01110 0000000001 */ \
0xbbff, /* -0.9999999403953552 0xbf7fffff f16: 1 01110 1111111111 */ \
0xbc00, /* -0.9999999403953552 0xbf7fffff f16: 1 01111 0000000000 */ \
0xbc01, /* -1.00000011920929 0xbf800001 f16: 1 01111 0000000001 */ \
0xfc7f, /* -nan 0xff8fffff f16: 1 11111 0001111111 */ \
0xfe91, /* -nan 0xffd23456 f16: 1 11111 1010010001 */ \
0xfe00, /* -nan 0xffc00000 f16: 1 11111 1000000000 */ \
0xfc91, /* -nan 0xff923456 f16: 1 11111 0010010001 */ \
0xfbff, /* -8388608 0xcb000000 f16: 1 11110 1111111111 */ \
0x0002, /* 1.192092895507812e-07 0x00000002 f16: 0 00000 0000000010 */ \
0x8002, /* -1.192092895507812e-07 0x80000002 f16: 1 00000 0000000010 */ \
0x8fff, /* -0.0004880428314208984 0x8fffffff f16: 1 00011 1111111111 */ \
0xffff, /* -nan 0xffffffff f16: 1 11111 1111111111 */ \
// Some useful sets of values for testing vector SIMD operations.
#define INPUT_8BITS_IMM_LANECOUNT_FROMZERO \
0x00, \
0x01, \
0x02, \
0x03, \
0x04, \
0x05, \
0x06, \
0x07, \
0x08, \
0x09, \
0x0a, \
0x0b, \
0x0c, \
0x0d, \
0x0e, \
0x0f
#define INPUT_16BITS_IMM_LANECOUNT_FROMZERO \
0x00, \
0x01, \
0x02, \
0x03, \
0x04, \
0x05, \
0x06, \
0x07
#define INPUT_32BITS_IMM_LANECOUNT_FROMZERO \
0x00, \
0x01, \
0x02, \
0x03
#define INPUT_64BITS_IMM_LANECOUNT_FROMZERO \
0x00, \
0x01
#define INPUT_8BITS_IMM_TYPEWIDTH_BASE \
0x01, \
0x02, \
0x03, \
0x04, \
0x05, \
0x06, \
0x07
#define INPUT_16BITS_IMM_TYPEWIDTH_BASE \
INPUT_8BITS_IMM_TYPEWIDTH_BASE, \
0x08, \
0x09, \
0x0a, \
0x0b, \
0x0c, \
0x0d, \
0x0e, \
0x0f
#define INPUT_32BITS_IMM_TYPEWIDTH_BASE \
INPUT_16BITS_IMM_TYPEWIDTH_BASE, \
0x10, \
0x11, \
0x12, \
0x13, \
0x14, \
0x15, \
0x16, \
0x17, \
0x18, \
0x19, \
0x1a, \
0x1b, \
0x1c, \
0x1d, \
0x1e, \
0x1f
#define INPUT_64BITS_IMM_TYPEWIDTH_BASE \
INPUT_32BITS_IMM_TYPEWIDTH_BASE, \
0x20, \
0x21, \
0x22, \
0x23, \
0x24, \
0x25, \
0x26, \
0x27, \
0x28, \
0x29, \
0x2a, \
0x2b, \
0x2c, \
0x2d, \
0x2e, \
0x2f, \
0x30, \
0x31, \
0x32, \
0x33, \
0x34, \
0x35, \
0x36, \
0x37, \
0x38, \
0x39, \
0x3a, \
0x3b, \
0x3c, \
0x3d, \
0x3e, \
0x3f
#define INPUT_8BITS_IMM_TYPEWIDTH \
INPUT_8BITS_IMM_TYPEWIDTH_BASE, \
0x08
#define INPUT_16BITS_IMM_TYPEWIDTH \
INPUT_16BITS_IMM_TYPEWIDTH_BASE, \
0x10
#define INPUT_32BITS_IMM_TYPEWIDTH \
INPUT_32BITS_IMM_TYPEWIDTH_BASE, \
0x20
#define INPUT_64BITS_IMM_TYPEWIDTH \
INPUT_64BITS_IMM_TYPEWIDTH_BASE, \
0x40
#define INPUT_8BITS_IMM_TYPEWIDTH_FROMZERO \
0x00, \
INPUT_8BITS_IMM_TYPEWIDTH_BASE
#define INPUT_16BITS_IMM_TYPEWIDTH_FROMZERO \
0x00, \
INPUT_16BITS_IMM_TYPEWIDTH_BASE
#define INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO \
0x00, \
INPUT_32BITS_IMM_TYPEWIDTH_BASE
#define INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO \
0x00, \
INPUT_64BITS_IMM_TYPEWIDTH_BASE
#define INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH \
0x00, \
INPUT_32BITS_IMM_TYPEWIDTH_BASE, \
0x20
#define INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH \
0x00, \
INPUT_64BITS_IMM_TYPEWIDTH_BASE, \
0x40
#define INPUT_8BITS_BASIC \
0x00, \
0x01, \
0x02, \
0x08, \
0x33, \
0x55, \
0x7d, \
0x7e, \
0x7f, \
0x80, \
0x81, \
0x82, \
0x83, \
0xaa, \
0xcc, \
0xf8, \
0xfd, \
0xfe, \
0xff
// Basic values for vector SIMD operations of types 4H or 8H.
#define INPUT_16BITS_BASIC \
0x0000, \
0x0001, \
0x0002, \
0x0010, \
0x007d, \
0x007e, \
0x007f, \
0x3333, \
0x5555, \
0x7ffd, \
0x7ffe, \
0x7fff, \
0x8000, \
0x8001, \
0xaaaa, \
0xcccc, \
0xff80, \
0xff81, \
0xff82, \
0xff83, \
0xfff0, \
0xfffd, \
0xfffe, \
0xffff
// Basic values for vector SIMD operations of types 2S or 4S.
#define INPUT_32BITS_BASIC \
0x00000000, \
0x00000001, \
0x00000002, \
0x00000020, \
0x0000007d, \
0x0000007e, \
0x0000007f, \
0x00007ffd, \
0x00007ffe, \
0x00007fff, \
0x33333333, \
0x55555555, \
0x7ffffffd, \
0x7ffffffe, \
0x7fffffff, \
0x80000000, \
0x80000001, \
0xaaaaaaaa, \
0xcccccccc, \
0xffff8000, \
0xffff8001, \
0xffff8002, \
0xffff8003, \
0xffffff80, \
0xffffff81, \
0xffffff82, \
0xffffff83, \
0xffffffe0, \
0xfffffffd, \
0xfffffffe, \
0xffffffff
// Basic values for vector SIMD operations of type 2D
#define INPUT_64BITS_BASIC \
0x0000000000000000, \
0x0000000000000001, \
0x0000000000000002, \
0x0000000000000040, \
0x000000000000007d, \
0x000000000000007e, \
0x000000000000007f, \
0x0000000000007ffd, \
0x0000000000007ffe, \
0x0000000000007fff, \
0x000000007ffffffd, \
0x000000007ffffffe, \
0x000000007fffffff, \
0x3333333333333333, \
0x5555555555555555, \
0x7ffffffffffffffd, \
0x7ffffffffffffffe, \
0x7fffffffffffffff, \
0x8000000000000000, \
0x8000000000000001, \
0x8000000000000002, \
0x8000000000000003, \
0xaaaaaaaaaaaaaaaa, \
0xcccccccccccccccc, \
0xffffffff80000000, \
0xffffffff80000001, \
0xffffffff80000002, \
0xffffffff80000003, \
0xffffffffffff8000, \
0xffffffffffff8001, \
0xffffffffffff8002, \
0xffffffffffff8003, \
0xffffffffffffff80, \
0xffffffffffffff81, \
0xffffffffffffff82, \
0xffffffffffffff83, \
0xffffffffffffffc0, \
0xfffffffffffffffd, \
0xfffffffffffffffe, \
0xffffffffffffffff
// clang-format on
// For most 2- and 3-op instructions, use only basic inputs. Because every
// combination is tested, the length of the output trace is very sensitive to
// the length of this list.
static const uint64_t kInputDoubleBasic[] = {INPUT_DOUBLE_BASIC};
static const uint32_t kInputFloatBasic[] = {INPUT_FLOAT_BASIC};
#define INPUT_DOUBLE_ACC_DESTINATION INPUT_DOUBLE_BASIC
#define INPUT_FLOAT_ACC_DESTINATION INPUT_FLOAT_BASIC
static const uint64_t kInputDoubleAccDestination[] = {
INPUT_DOUBLE_ACC_DESTINATION};
static const uint32_t kInputFloatAccDestination[] = {
INPUT_FLOAT_ACC_DESTINATION};
// For conversions, include several extra inputs.
static const uint64_t kInputDoubleConversions[] = {
INPUT_DOUBLE_BASIC INPUT_DOUBLE_CONVERSIONS};
static const uint32_t kInputFloatConversions[] = {
INPUT_FLOAT_BASIC INPUT_FLOAT_CONVERSIONS};
static const uint64_t kInput64bitsFixedPointConversions[] = {
INPUT_64BITS_BASIC, INPUT_64BITS_FIXEDPOINT_CONVERSIONS};
static const uint32_t kInput32bitsFixedPointConversions[] = {
INPUT_32BITS_BASIC, INPUT_32BITS_FIXEDPOINT_CONVERSIONS};
static const uint16_t kInputFloat16Conversions[] = {
INPUT_FLOAT16_BASIC INPUT_FLOAT16_CONVERSIONS};
static const uint8_t kInput8bitsBasic[] = {INPUT_8BITS_BASIC};
static const uint16_t kInput16bitsBasic[] = {INPUT_16BITS_BASIC};
static const uint32_t kInput32bitsBasic[] = {INPUT_32BITS_BASIC};
static const uint64_t kInput64bitsBasic[] = {INPUT_64BITS_BASIC};
static const int kInput8bitsImmTypeWidth[] = {INPUT_8BITS_IMM_TYPEWIDTH};
static const int kInput16bitsImmTypeWidth[] = {INPUT_16BITS_IMM_TYPEWIDTH};
static const int kInput32bitsImmTypeWidth[] = {INPUT_32BITS_IMM_TYPEWIDTH};
static const int kInput64bitsImmTypeWidth[] = {INPUT_64BITS_IMM_TYPEWIDTH};
static const int kInput8bitsImmTypeWidthFromZero[] = {
INPUT_8BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput16bitsImmTypeWidthFromZero[] = {
INPUT_16BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput32bitsImmTypeWidthFromZero[] = {
INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput64bitsImmTypeWidthFromZero[] = {
INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO};
static const int kInput32bitsImmTypeWidthFromZeroToWidth[] = {
INPUT_32BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH};
static const int kInput64bitsImmTypeWidthFromZeroToWidth[] = {
INPUT_64BITS_IMM_TYPEWIDTH_FROMZERO_TOWIDTH};
// These immediate values are used only in 'shll{2}' tests.
static const int kInput8bitsImmSHLL[] = {8};
static const int kInput16bitsImmSHLL[] = {16};
static const int kInput32bitsImmSHLL[] = {32};
static const double kInputDoubleImmZero[] = {0.0};
static const int kInput8bitsImmZero[] = {0};
static const int kInput16bitsImmZero[] = {0};
static const int kInput32bitsImmZero[] = {0};
static const int kInput64bitsImmZero[] = {0};
static const int kInput8bitsImmLaneCountFromZero[] = {
INPUT_8BITS_IMM_LANECOUNT_FROMZERO};
static const int kInput16bitsImmLaneCountFromZero[] = {
INPUT_16BITS_IMM_LANECOUNT_FROMZERO};
static const int kInput32bitsImmLaneCountFromZero[] = {
INPUT_32BITS_IMM_LANECOUNT_FROMZERO};
static const int kInput64bitsImmLaneCountFromZero[] = {
INPUT_64BITS_IMM_LANECOUNT_FROMZERO};
#define INPUT_8BITS_ACC_DESTINATION INPUT_8BITS_BASIC
#define INPUT_16BITS_ACC_DESTINATION INPUT_16BITS_BASIC
#define INPUT_32BITS_ACC_DESTINATION INPUT_32BITS_BASIC
#define INPUT_64BITS_ACC_DESTINATION INPUT_64BITS_BASIC
static const uint8_t kInput8bitsAccDestination[] = {
INPUT_8BITS_ACC_DESTINATION};
static const uint16_t kInput16bitsAccDestination[] = {
INPUT_16BITS_ACC_DESTINATION};
static const uint32_t kInput32bitsAccDestination[] = {
INPUT_32BITS_ACC_DESTINATION};
static const uint64_t kInput64bitsAccDestination[] = {
INPUT_64BITS_ACC_DESTINATION};
static const int kInputHIndices[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const int kInputSIndices[] = {0, 1, 2, 3};
static const int kInputDIndices[] = {0, 1};

File diff suppressed because it is too large Load Diff

View File

@ -59,30 +59,19 @@ bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
return expected == result;
}
bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
if ((result.h != expected.h) || (result.l != expected.l)) {
printf("Expected 0x%016" PRIx64 "%016" PRIx64
"\t "
"Found 0x%016" PRIx64 "%016" PRIx64 "\n",
expected.h, expected.l, result.h, result.l);
}
return ((expected.h == result.h) && (expected.l == result.l));
}
bool EqualFP32(float expected, const RegisterDump*, float result) {
if (bit_cast<uint32_t>(expected) == bit_cast<uint32_t>(result)) {
if (float_to_rawbits(expected) == float_to_rawbits(result)) {
return true;
} else {
if (std::isnan(expected) || (expected == 0.0)) {
printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
bit_cast<uint32_t>(expected), bit_cast<uint32_t>(result));
float_to_rawbits(expected), float_to_rawbits(result));
} else {
printf("Expected %.9f (0x%08" PRIx32
")\t "
printf("Expected %.9f (0x%08" PRIx32 ")\t "
"Found %.9f (0x%08" PRIx32 ")\n",
expected, bit_cast<uint32_t>(expected), result,
bit_cast<uint32_t>(result));
expected, float_to_rawbits(expected),
result, float_to_rawbits(result));
}
return false;
}
@ -90,19 +79,18 @@ bool EqualFP32(float expected, const RegisterDump*, float result) {
bool EqualFP64(double expected, const RegisterDump*, double result) {
if (bit_cast<uint64_t>(expected) == bit_cast<uint64_t>(result)) {
if (double_to_rawbits(expected) == double_to_rawbits(result)) {
return true;
}
if (std::isnan(expected) || (expected == 0.0)) {
printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
bit_cast<uint64_t>(expected), bit_cast<uint64_t>(result));
double_to_rawbits(expected), double_to_rawbits(result));
} else {
printf("Expected %.17f (0x%016" PRIx64
")\t "
printf("Expected %.17f (0x%016" PRIx64 ")\t "
"Found %.17f (0x%016" PRIx64 ")\n",
expected, bit_cast<uint64_t>(expected), result,
bit_cast<uint64_t>(result));
expected, double_to_rawbits(expected),
result, double_to_rawbits(result));
}
return false;
}
@ -131,31 +119,27 @@ bool Equal64(uint64_t expected,
return Equal64(expected, core, result);
}
bool Equal128(uint64_t expected_h, uint64_t expected_l,
const RegisterDump* core, const VRegister& vreg) {
CHECK(vreg.Is128Bits());
vec128_t expected = {expected_l, expected_h};
vec128_t result = core->qreg(vreg.code());
return Equal128(expected, core, result);
}
bool EqualFP32(float expected, const RegisterDump* core,
const VRegister& fpreg) {
bool EqualFP32(float expected,
const RegisterDump* core,
const FPRegister& fpreg) {
CHECK(fpreg.Is32Bits());
// Retrieve the corresponding D register so we can check that the upper part
// was properly cleared.
uint64_t result_64 = core->dreg_bits(fpreg.code());
if ((result_64 & 0xffffffff00000000L) != 0) {
printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
bit_cast<uint32_t>(expected), expected, result_64);
float_to_rawbits(expected), expected, result_64);
return false;
}
return EqualFP32(expected, core, core->sreg(fpreg.code()));
}
bool EqualFP64(double expected, const RegisterDump* core,
const VRegister& fpreg) {
bool EqualFP64(double expected,
const RegisterDump* core,
const FPRegister& fpreg) {
CHECK(fpreg.Is64Bits());
return EqualFP64(expected, core, core->dreg(fpreg.code()));
}
@ -214,7 +198,7 @@ bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
}
}
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
uint64_t a_bits = a->dreg_bits(i);
uint64_t b_bits = b->dreg_bits(i);
if (a_bits != b_bits) {
@ -254,28 +238,29 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
return list;
}
RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
int reg_size, int reg_count, RegList allowed) {
RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
int reg_size, int reg_count, RegList allowed) {
RegList list = 0;
int i = 0;
for (unsigned n = 0; (n < kNumberOfVRegisters) && (i < reg_count); n++) {
for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
if (((1UL << n) & allowed) != 0) {
// Only assigned allowed registers.
if (v) {
v[i] = VRegister::Create(n, reg_size);
v[i] = FPRegister::Create(n, reg_size);
}
if (d) {
d[i] = VRegister::Create(n, kDRegSizeInBits);
d[i] = FPRegister::Create(n, kDRegSizeInBits);
}
if (s) {
s[i] = VRegister::Create(n, kSRegSizeInBits);
s[i] = FPRegister::Create(n, kSRegSizeInBits);
}
list |= (1UL << n);
i++;
}
}
// Check that we got enough registers.
CHECK(CountSetBits(list, kNumberOfVRegisters) == reg_count);
CHECK(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
return list;
}
@ -305,10 +290,10 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
VRegister first = NoVReg;
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
FPRegister first = NoFPReg;
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
if (reg_list & (1UL << i)) {
VRegister dn = VRegister::Create(i, kDRegSizeInBits);
FPRegister dn = FPRegister::Create(i, kDRegSizeInBits);
if (!first.IsValid()) {
// This is the first register we've hit, so construct the literal.
__ Fmov(dn, value);
@ -327,7 +312,7 @@ void Clobber(MacroAssembler* masm, CPURegList reg_list) {
if (reg_list.type() == CPURegister::kRegister) {
// This will always clobber X registers.
Clobber(masm, reg_list.list());
} else if (reg_list.type() == CPURegister::kVRegister) {
} else if (reg_list.type() == CPURegister::kFPRegister) {
// This will always clobber D registers.
ClobberFP(masm, reg_list.list());
} else {
@ -358,7 +343,6 @@ void RegisterDump::Dump(MacroAssembler* masm) {
const int w_offset = offsetof(dump_t, w_);
const int d_offset = offsetof(dump_t, d_);
const int s_offset = offsetof(dump_t, s_);
const int q_offset = offsetof(dump_t, q_);
const int sp_offset = offsetof(dump_t, sp_);
const int wsp_offset = offsetof(dump_t, wsp_);
const int flags_offset = offsetof(dump_t, flags_);
@ -393,25 +377,18 @@ void RegisterDump::Dump(MacroAssembler* masm) {
// Dump D registers.
__ Add(dump, dump_base, d_offset);
for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
__ Stp(VRegister::DRegFromCode(i), VRegister::DRegFromCode(i + 1),
for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
__ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
MemOperand(dump, i * kDRegSize));
}
// Dump S registers.
__ Add(dump, dump_base, s_offset);
for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
__ Stp(VRegister::SRegFromCode(i), VRegister::SRegFromCode(i + 1),
for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
__ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
MemOperand(dump, i * kSRegSize));
}
// Dump Q registers.
__ Add(dump, dump_base, q_offset);
for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
__ Stp(VRegister::QRegFromCode(i), VRegister::QRegFromCode(i + 1),
MemOperand(dump, i * kQRegSize));
}
// Dump the flags.
__ Mrs(tmp, NZCV);
__ Str(tmp, MemOperand(dump_base, flags_offset));

View File

@ -39,11 +39,6 @@
namespace v8 {
namespace internal {
// Structure representing Q registers in a RegisterDump.
struct vec128_t {
uint64_t l;
uint64_t h;
};
// RegisterDump: Object allowing integer, floating point and flags registers
// to be saved to itself for future reference.
@ -77,14 +72,14 @@ class RegisterDump {
return dump_.x_[code];
}
// VRegister accessors.
// FPRegister accessors.
inline uint32_t sreg_bits(unsigned code) const {
CHECK(FPRegAliasesMatch(code));
return dump_.s_[code];
}
inline float sreg(unsigned code) const {
return bit_cast<float>(sreg_bits(code));
return rawbits_to_float(sreg_bits(code));
}
inline uint64_t dreg_bits(unsigned code) const {
@ -93,11 +88,9 @@ class RegisterDump {
}
inline double dreg(unsigned code) const {
return bit_cast<double>(dreg_bits(code));
return rawbits_to_double(dreg_bits(code));
}
inline vec128_t qreg(unsigned code) const { return dump_.q_[code]; }
// Stack pointer accessors.
inline int64_t spreg() const {
CHECK(SPRegAliasesMatch());
@ -142,7 +135,7 @@ class RegisterDump {
// As RegAliasesMatch, but for floating-point registers.
bool FPRegAliasesMatch(unsigned code) const {
CHECK(IsComplete());
CHECK(code < kNumberOfVRegisters);
CHECK(code < kNumberOfFPRegisters);
return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
}
@ -154,11 +147,8 @@ class RegisterDump {
uint32_t w_[kNumberOfRegisters];
// Floating-point registers, as raw bits.
uint64_t d_[kNumberOfVRegisters];
uint32_t s_[kNumberOfVRegisters];
// Vector registers.
vec128_t q_[kNumberOfVRegisters];
uint64_t d_[kNumberOfFPRegisters];
uint32_t s_[kNumberOfFPRegisters];
// The stack pointer.
uint64_t sp_;
@ -173,18 +163,12 @@ class RegisterDump {
} dump_;
static dump_t for_sizeof();
static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
static_assert(sizeof(for_sizeof().q_[0]) == kQRegSize,
"Array elements must be size of Q register.");
static_assert(sizeof(for_sizeof().d_[0]) == kDRegSize,
"Array elements must be size of D register.");
static_assert(sizeof(for_sizeof().s_[0]) == kSRegSize,
"Array elements must be size of S register.");
static_assert(sizeof(for_sizeof().x_[0]) == kXRegSize,
"Array elements must be size of X register.");
static_assert(sizeof(for_sizeof().w_[0]) == kWRegSize,
"Array elements must be size of W register.");
STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kDRegSize);
STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kSRegSize);
STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kXRegSize);
STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kWRegSize);
STATIC_ASSERT(sizeof(for_sizeof().x_[0]) == kXRegSize);
STATIC_ASSERT(sizeof(for_sizeof().w_[0]) == kWRegSize);
};
// Some of these methods don't use the RegisterDump argument, but they have to
@ -199,14 +183,12 @@ bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
bool EqualFP32(float expected, const RegisterDump* core,
const VRegister& fpreg);
const FPRegister& fpreg);
bool EqualFP64(double expected, const RegisterDump* core,
const VRegister& fpreg);
const FPRegister& fpreg);
bool Equal64(const Register& reg0, const RegisterDump* core,
const Register& reg1);
bool Equal128(uint64_t expected_h, uint64_t expected_l,
const RegisterDump* core, const VRegister& reg);
bool EqualNzcv(uint32_t expected, uint32_t result);
@ -226,8 +208,8 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
int reg_size, int reg_count, RegList allowed);
// As PopulateRegisterArray, but for floating-point registers.
RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
int reg_size, int reg_count, RegList allowed);
RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
int reg_size, int reg_count, RegList allowed);
// Ovewrite the contents of the specified registers. This enables tests to
// check that register contents are written in cases where it's likely that the