Remove ARM support for VFP2

R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/13560007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14159 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2013-04-07 04:34:20 +00:00
parent bdb305aade
commit ffe7f6a1ba
21 changed files with 651 additions and 1705 deletions

View File

@ -48,29 +48,17 @@ namespace internal {
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
}
return kMaxNumAllocatableRegisters;
}
int DwVfpRegister::NumRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
} else {
return 1;
}
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
int DwVfpRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return NumRegisters() - kNumReservedRegisters;
} else {
return 1;
}
return NumRegisters() - kNumReservedRegisters;
}

View File

@ -66,11 +66,8 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << ARMv7;
#endif // CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // CAN_USE_VFP3_INSTRUCTIONS
#ifdef CAN_USE_VFP2_INSTRUCTIONS
answer |= 1u << VFP2;
#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef CAN_USE_VFP32DREGS
answer |= 1u << VFP32DREGS;
#endif // CAN_USE_VFP32DREGS
@ -81,7 +78,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
// point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
#endif // _arm__
@ -94,18 +91,13 @@ static unsigned CpuFeaturesImpliedByCompiler() {
const char* DwVfpRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(VFP2)) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code())
index += kNumReservedRegisters;
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code())
index += kNumReservedRegisters;
return VFPRegisters::Name(index, true);
} else {
ASSERT(index == 0);
return "sfpd0";
}
return VFPRegisters::Name(index, true);
}
@ -133,8 +125,7 @@ void CpuFeatures::Probe() {
if (FLAG_enable_vfp3) {
supported_ |=
static_cast<uint64_t>(1) << VFP3 |
static_cast<uint64_t>(1) << ARMv7 |
static_cast<uint64_t>(1) << VFP2;
static_cast<uint64_t>(1) << ARMv7;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
@ -157,14 +148,11 @@ void CpuFeatures::Probe() {
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << VFP3 |
static_cast<uint64_t>(1) << ARMv7 |
static_cast<uint64_t>(1) << VFP2;
} else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP2;
static_cast<uint64_t>(1) << ARMv7;
}
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
@ -193,8 +181,8 @@ void CpuFeatures::Probe() {
supported_ |= found_by_runtime_probing_only_;
#endif
// Assert that VFP3 implies VFP2 and ARMv7.
ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
// Assert that VFP3 implies ARMv7.
ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
}
@ -1763,7 +1751,6 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-924.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | offset
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1807,7 +1794,6 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1851,7 +1837,6 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406C.b, A8-1082.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | (offset/4)
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1895,7 +1880,6 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1938,7 +1922,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1960,7 +1943,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1981,7 +1963,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -2002,7 +1983,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -2076,8 +2056,6 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
ASSERT(IsEnabled(VFP2));
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
@ -2148,7 +2126,6 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@ -2163,7 +2140,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-938.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2181,7 +2157,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-940.
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(index.index == 0 || index.index == 1);
int vd, d;
dst.split_code(&vd, &d);
@ -2198,7 +2173,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
int vm, m;
dst.split_code(&vm, &m);
@ -2215,7 +2189,6 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
int vm, m;
src.split_code(&vm, &m);
@ -2231,7 +2204,6 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@ -2246,7 +2218,6 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@ -2371,7 +2342,6 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@ -2380,7 +2350,6 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@ -2389,7 +2358,6 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@ -2398,7 +2366,6 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@ -2407,7 +2374,6 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@ -2416,7 +2382,6 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@ -2425,7 +2390,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@ -2436,7 +2400,6 @@ void Assembler::vneg(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2453,7 +2416,6 @@ void Assembler::vabs(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2472,7 +2434,6 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-830.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2493,7 +2454,6 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1086.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2514,7 +2474,6 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-960.
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2571,7 +2530,6 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-882.
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2590,7 +2548,6 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
src1.split_code(&vd, &d);
int vm, m;
@ -2607,7 +2564,6 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(src2 == 0.0);
int vd, d;
src1.split_code(&vd, &d);
@ -2619,7 +2575,6 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2629,7 +2584,6 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2641,7 +2595,6 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;

View File

@ -59,7 +59,6 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false;
if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
return false;
@ -117,7 +116,6 @@ struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
static const int kGPRsPerNonVFP2Double = 2;
inline static int NumAllocatableRegisters();
@ -370,9 +368,6 @@ const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
const Register sfpd_lo = { kRegister_r6_Code };
const Register sfpd_hi = { kRegister_r7_Code };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.

File diff suppressed because it is too large Load Diff

View File

@ -61,9 +61,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) {
ASSERT(CpuFeatures::IsSafeForSnapshot(VFP2) || save_fp == kDontSaveFPRegs);
}
: save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
@ -473,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
@ -491,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
// Restore all VFP registers except d0.
// TODO(hans): We should probably restore d0 too. And maybe use vldm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {

View File

@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
CpuFeatureScope use_vfp(&masm, VFP2);
DwVfpRegister input = d0;
DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2;
@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry);
@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
if (vfp2_supported) {
CpuFeatureScope scope(masm, VFP2);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
} else {
FloatingPointHelper::ConvertIntToDouble(masm,
r9,
FloatingPointHelper::kCoreRegisters,
d0,
r0,
r1,
lr,
s0);
__ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
}
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6);
__ b(lt, &loop);
if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}

View File

@ -594,23 +594,18 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15);
// Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Push registers d0-d13, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d13);
} else {
__ sub(sp, sp, Operand(kDoubleRegsSize));
}
// Push registers d0-d13, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d13);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@ -669,17 +664,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Remove the bailout id, eventually return address, and the saved registers
@ -749,21 +741,18 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
int src_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
int src_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
const DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
src_offset += kDoubleSize;
}
const DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
src_offset += kDoubleSize;
}
// Push state, pc, and continuation from the last output frame.

View File

@ -3027,37 +3027,26 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP2)) {
__ PrepareCallCFunction(1, r0);
__ ldr(r0,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
__ PrepareCallCFunction(1, r0);
__ ldr(r0,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatureScope scope(masm(), VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
__ sub(r0, r4, Operand(kHeapObjectTag));
__ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
__ ldr(r1,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ mov(r0, Operand(r4));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
__ sub(r0, r4, Operand(kHeapObjectTag));
__ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4);
context()->Plug(r0);
}
@ -3194,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(VFP2)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(r0);
}

View File

@ -2133,16 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on non-VFP2 requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(VFP2) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}

View File

@ -195,8 +195,7 @@ bool LCodeGen::GeneratePrologue() {
}
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
@ -1209,8 +1208,6 @@ void LCodeGen::DoModI(LModI* instr) {
Label vfp_modulo, both_positive, right_negative;
CpuFeatureScope scope(masm(), VFP2);
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
@ -1615,7 +1612,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
CpuFeatureScope vfp_scope(masm(), VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
@ -1901,7 +1897,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
CpuFeatureScope scope(masm(), VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@ -2072,7 +2067,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister left_reg = ToDoubleRegister(left);
DwVfpRegister right_reg = ToDoubleRegister(right);
DwVfpRegister result_reg = ToDoubleRegister(instr->result());
@ -2118,7 +2112,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister left = ToDoubleRegister(instr->left());
DwVfpRegister right = ToDoubleRegister(instr->right());
DwVfpRegister result = ToDoubleRegister(instr->result());
@ -2209,7 +2202,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
@ -2301,7 +2293,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
CpuFeatureScope scope(masm(), VFP2);
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@ -2381,7 +2372,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
CpuFeatureScope scope(masm(), VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@ -2936,8 +2926,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame());
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
@ -3319,58 +3308,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, kScratchDoubleReg.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
Register value = external_pointer;
__ ldr(value, MemOperand(scratch0(), additional_offset));
__ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
__ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
__ and_(scratch0(), scratch0(),
Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(scratch0(), Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(scratch0(), Operand(0xff));
__ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(scratch0(),
scratch0(),
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(sfpd_hi, value, Operand(kBinary32SignMask));
__ orr(sfpd_hi, sfpd_hi,
Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(sfpd_hi, sfpd_hi,
Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
__ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
} else {
__ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
__ ldr(sfpd_hi, MemOperand(scratch0(),
additional_offset + kPointerSize));
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, kScratchDoubleReg.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
@ -3444,23 +3386,12 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (!key_is_constant) {
__ add(elements, elements, Operand(key, LSL, shift_size));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ add(elements, elements, Operand(base_offset));
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
} else {
__ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
__ ldr(sfpd_lo, MemOperand(elements, base_offset));
if (instr->hydrogen()->RequiresHoleCheck()) {
ASSERT(kPointerSize == sizeof(kHoleNanLower32));
__ cmp(sfpd_hi, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ add(elements, elements, Operand(base_offset));
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
}
@ -3902,7 +3833,6 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
@ -3939,7 +3869,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register input_high = scratch0();
@ -3962,7 +3891,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
@ -4002,7 +3930,6 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
@ -4010,7 +3937,6 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister temp = ToDoubleRegister(instr->temp());
@ -4032,7 +3958,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
CpuFeatureScope scope(masm(), VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@ -4065,7 +3990,6 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
CpuFeatureScope scope(masm(), VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@ -4144,7 +4068,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
@ -4442,7 +4365,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
CpuFeatureScope scope(masm(), VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@ -4513,7 +4435,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@ -4814,7 +4735,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
CpuFeatureScope scope(masm(), VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@ -4832,7 +4752,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatureScope scope(masm(), VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
@ -4894,43 +4813,6 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register src.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register src,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
masm->mov(loword, Operand(src, LSL, mantissa_shift_for_lo_word));
masm->orr(hiword, scratch,
Operand(src, LSR, mantissa_shift_for_hi_word));
} else {
masm->mov(loword, Operand::Zero());
masm->orr(hiword, scratch,
Operand(src, LSL, -mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
@ -4952,35 +4834,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
sfpd_lo, sfpd_hi,
scratch0(), s0);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
} else {
Label no_leading_zero, convert_done;
__ tst(src, Operand(0x80000000));
__ b(ne, &no_leading_zero);
// Integer has one leading zeros.
GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 1);
__ b(&convert_done);
__ bind(&no_leading_zero);
GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 0);
__ bind(&convert_done);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
if (FLAG_inline_new) {
@ -4996,30 +4854,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
if (!CpuFeatures::IsSupported(VFP2)) {
// Preserve sfpd_lo.
__ mov(r9, sfpd_lo);
}
__ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
if (!CpuFeatures::IsSupported(VFP2)) {
// Restore sfpd_lo.
__ mov(sfpd_lo, r9);
}
__ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
} else {
__ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
__ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
}
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@ -5052,45 +4896,19 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Label no_special_nan_handling;
Label done;
if (convert_hole) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
__ vmov(reg, scratch0(), input_reg);
__ cmp(scratch0(), Operand(kHoleNanUpper32));
Label canonicalize;
__ b(ne, &canonicalize);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
__ bind(&canonicalize);
__ Vmov(input_reg,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
no_reg);
} else {
Label not_hole;
__ cmp(sfpd_hi, Operand(kHoleNanUpper32));
__ b(ne, &not_hole);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
__ bind(&not_hole);
__ and_(scratch, sfpd_hi, Operand(0x7ff00000));
__ cmp(scratch, Operand(0x7ff00000));
__ b(ne, &no_special_nan_handling);
Label special_nan_handling;
__ tst(sfpd_hi, Operand(0x000FFFFF));
__ b(ne, &special_nan_handling);
__ cmp(sfpd_lo, Operand(0));
__ b(eq, &no_special_nan_handling);
__ bind(&special_nan_handling);
double canonical_nan =
FixedDoubleArray::canonical_not_the_hole_nan_as_double();
uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
__ mov(sfpd_lo,
Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
__ mov(sfpd_hi,
Operand(static_cast<uint32_t>(casted_nan >> 32)));
}
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
__ vmov(reg, scratch0(), input_reg);
__ cmp(scratch0(), Operand(kHoleNanUpper32));
Label canonicalize;
__ b(ne, &canonicalize);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
__ bind(&canonicalize);
__ Vmov(input_reg,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
no_reg);
}
__ bind(&no_special_nan_handling);
@ -5104,13 +4922,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
} else {
__ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
__ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
}
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
__ bind(&done);
@ -5160,7 +4972,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
CpuFeatureScope scope(masm(), VFP2);
Label load_smi, heap_number, done;
@ -5249,7 +5060,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
CpuFeatureScope scope(masm(), VFP2);
Register scratch3 = ToRegister(instr->temp2());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
@ -5270,8 +5080,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ sub(scratch1, input_reg, Operand(kHeapObjectTag));
__ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
__ ECMAToInt32VFP(input_reg, double_scratch2, double_scratch,
scratch1, scratch2, scratch3);
__ ECMAToInt32(input_reg, double_scratch2, double_scratch,
scratch1, scratch2, scratch3);
} else {
CpuFeatureScope scope(masm(), VFP3);
@ -5369,8 +5179,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
__ ECMAToInt32VFP(result_reg, double_input, double_scratch,
scratch1, scratch2, scratch3);
__ ECMAToInt32(result_reg, double_input, double_scratch,
scratch1, scratch2, scratch3);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@ -5486,7 +5296,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope vfp_scope(masm(), VFP2);
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
@ -5495,7 +5304,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
CpuFeatureScope scope(masm(), VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@ -5503,7 +5311,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
CpuFeatureScope scope(masm(), VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());

View File

@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
// ip is overwritten while saving the value to the destination.
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
MemOperand source_operand = cgen_->ToMemOperand(source);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {

View File

@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
if (!dst.is(src)) {
vmov(dst, src);
}
@ -811,7 +809,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
ASSERT(IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@ -873,7 +870,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers.
if (save_doubles) {
CpuFeatureScope scope(this, VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
CheckFor32DRegs(ip);
@ -938,7 +934,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
CpuFeatureScope scope(this, VFP2);
// Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize;
sub(r3, fp,
@ -975,7 +970,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@ -2046,11 +2040,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
destination = FloatingPointHelper::kVFPRegisters;
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
@ -2063,7 +2053,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatureScope scope(this, VFP2);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
@ -2423,9 +2412,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
vcvt_s32_f64(double_scratch.low(), double_input);
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
@ -2436,9 +2422,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
@ -2454,8 +2437,6 @@ void MacroAssembler::TryInt32Floor(Register result,
Label* exact) {
ASSERT(!result.is(input_high));
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
Label negative, exception;
// Test for NaN and infinities.
@ -2500,26 +2481,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source,
Register scratch,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(this, VFP2);
vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32VFP(result, double_scratch1, double_scratch2,
scratch, input_high, input_low);
} else {
Ldrd(input_low, input_high,
FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32NoVFP(result, scratch, input_high, input_low);
}
vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32(result, double_scratch1, double_scratch2,
scratch, input_high, input_low);
}
void MacroAssembler::ECMAToInt32VFP(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low) {
CpuFeatureScope scope(this, VFP2);
void MacroAssembler::ECMAToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low) {
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
@ -2559,58 +2532,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result,
}
void MacroAssembler::ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low) {
ASSERT(!result.is(scratch));
ASSERT(!result.is(input_high));
ASSERT(!result.is(input_low));
ASSERT(!scratch.is(input_high));
ASSERT(!scratch.is(input_low));
ASSERT(!input_high.is(input_low));
Label both, out_of_range, negate, done;
Ubfx(scratch, input_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent.
sub(scratch, scratch, Operand(HeapNumber::kExponentBias));
// If exponent is negative, 0 < input < 1, the result is 0.
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// This test also catch Nan and infinities which also return 0.
cmp(scratch, Operand(84));
// We do an unsigned comparison so negative numbers are treated as big
// positive number and the two tests above are done in one test.
b(hs, &out_of_range);
// Load scratch with 20 - exponent.
rsb(scratch, scratch, Operand(20), SetCC);
b(mi, &both);
// Test 0 and -0.
bic(result, input_high, Operand(HeapNumber::kSignMask));
orr(result, result, Operand(input_low), SetCC);
b(eq, &done);
// 0 <= exponent <= 20, shift only input_high.
// Scratch contains: 20 - exponent.
Ubfx(result, input_high,
0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in input_high.
orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
mov(result, Operand(result, LSR, scratch));
b(&negate);
bind(&both);
// Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP.
rsb(scratch, scratch, Operand(19));
ECMAToInt32Tail(result, scratch, input_high, input_low,
&out_of_range, &negate, &done);
}
void MacroAssembler::ECMAToInt32Tail(Register result,
Register scratch,
Register input_high,
@ -2713,10 +2634,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@ -3461,7 +3379,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@ -3472,7 +3389,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
DwVfpRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@ -3491,7 +3407,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);

View File

@ -969,20 +969,12 @@ class MacroAssembler: public Assembler {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
void ECMAToInt32VFP(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low);
void ECMAToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer

View File

@ -975,66 +975,11 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm, VFP2);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
Register scratch1) {
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
}
@ -2082,11 +2027,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
if (!CpuFeatures::IsSupported(VFP2)) {
return Handle<Code>::null();
}
CpuFeatureScope scope_vfp2(masm(), VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@ -3126,36 +3066,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
static bool IsElementTypeSigned(ElementsKind elements_kind) {
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
return true;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
return false;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
return false;
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
@ -3163,29 +3073,23 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* fail) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm, VFP2);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
} else {
// Check that the key is a smi.
__ JumpIfNotSmi(key, fail);
}
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
}
@ -3255,28 +3159,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(r4, key);
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
StoreIntAsFloat(masm, r3, r4, r5, r7);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
destination = FloatingPointHelper::kVFPRegisters;
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatureScope scope(masm, VFP2);
__ vstr(d0, r3, 0);
} else {
__ str(r6, MemOperand(r3, 0));
__ str(r7, MemOperand(r3, Register::kSizeInBytes));
}
__ vstr(d0, r3, 0);
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@ -3306,201 +3200,59 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm, VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 1));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 2));
__ vstr(d0, r5, 0);
} else {
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32VFP(r5, d0, d1, r6, r7, r9);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 1));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 2));
__ vstr(d0, r5, 0);
} else {
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32(r5, d0, d1, r6, r7, r9);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r5, MemOperand(r3, key, LSL, 1));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ add(r7, r3, Operand(key, LSL, 2));
// r7: effective address of destination element.
__ str(r6, MemOperand(r7, 0));
__ str(r5, MemOperand(r7, Register::kSizeInBytes));
__ Ret();
} else {
bool is_signed_type = IsElementTypeSigned(elements_kind);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand::Zero(), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
__ mov(r5, Operand::Zero(), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative then result is 0.
__ mov(r5, Operand::Zero(), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big then result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r9, r9, Operand::Zero());
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r7, Operand::Zero());
__ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
__ bind(&done);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.

View File

@ -191,11 +191,9 @@ CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here!
#if V8_TARGET_ARCH_ARM
// VFP2 and ARMv7 are implied by VFP3.
// ARMv7 is implied by VFP3.
if (f == VFP3) {
mask |=
static_cast<uint64_t>(1) << VFP2 |
static_cast<uint64_t>(1) << ARMv7;
mask |= static_cast<uint64_t>(1) << ARMv7;
}
#endif
assembler_->set_enabled_cpu_features(old_enabled_ | mask);

View File

@ -770,7 +770,7 @@ class BinaryOpStub: public PlatformCodeStub {
private:
Token::Value op_;
OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
bool platform_specific_bit_; // Indicates SSE3 on IA32.
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;

View File

@ -309,10 +309,7 @@ DEFINE_bool(enable_rdtsc, true,
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies "
"enabling ARMv7 and VFP2 instructions (ARM only)")
DEFINE_bool(enable_vfp2, true,
"enable use of VFP2 instructions if available")
"enable use of VFP3 instructions if available")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,

View File

@ -2008,7 +2008,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on non-VFP2 requires an extra scratch
// float->double conversion on soft float requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.

View File

@ -146,9 +146,6 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such.
switch (feature) {
case VFP2:
search_string = "vfp";
break;
case VFP3:
search_string = "vfpv3";
break;

View File

@ -433,11 +433,10 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
VFP2 = 3, // ARM
SUDIV = 4, // ARM
UNALIGNED_ACCESSES = 5, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
VFP32DREGS = 7, // ARM
SUDIV = 3, // ARM
UNALIGNED_ACCESSES = 4, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
VFP32DREGS = 6, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS

View File

@ -654,81 +654,77 @@ TEST(8) {
// single precision values around in memory.
Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(&assm, VFP2);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vldm(ia_w, r4, d0, d3);
__ vldm(ia_w, r4, d4, d7);
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vldm(ia_w, r4, d0, d3);
__ vldm(ia_w, r4, d4, d7);
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vstm(ia_w, r4, d6, d7);
__ vstm(ia_w, r4, d0, d5);
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vstm(ia_w, r4, d6, d7);
__ vstm(ia_w, r4, d0, d5);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vldm(ia_w, r4, s0, s3);
__ vldm(ia_w, r4, s4, s7);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vldm(ia_w, r4, s0, s3);
__ vldm(ia_w, r4, s4, s7);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vstm(ia_w, r4, s6, s7);
__ vstm(ia_w, r4, s0, s5);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vstm(ia_w, r4, s6, s7);
__ vstm(ia_w, r4, s0, s5);
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
CodeDesc desc;
assm.GetCode(&desc);
Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Code::cast(code)->Print();
#endif
F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
d.d = 4.4;
d.e = 5.5;
d.f = 6.6;
d.g = 7.7;
d.h = 8.8;
F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
d.d = 4.4;
d.e = 5.5;
d.f = 6.6;
d.g = 7.7;
d.h = 8.8;
f.a = 1.0;
f.b = 2.0;
f.c = 3.0;
f.d = 4.0;
f.e = 5.0;
f.f = 6.0;
f.g = 7.0;
f.h = 8.0;
f.a = 1.0;
f.b = 2.0;
f.c = 3.0;
f.d = 4.0;
f.e = 5.0;
f.f = 6.0;
f.g = 7.0;
f.h = 8.0;
Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
USE(dummy);
Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
CHECK_EQ(1.1, d.c);
CHECK_EQ(2.2, d.d);
CHECK_EQ(3.3, d.e);
CHECK_EQ(4.4, d.f);
CHECK_EQ(5.5, d.g);
CHECK_EQ(6.6, d.h);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
CHECK_EQ(1.1, d.c);
CHECK_EQ(2.2, d.d);
CHECK_EQ(3.3, d.e);
CHECK_EQ(4.4, d.f);
CHECK_EQ(5.5, d.g);
CHECK_EQ(6.6, d.h);
CHECK_EQ(7.0, f.a);
CHECK_EQ(8.0, f.b);
CHECK_EQ(1.0, f.c);
CHECK_EQ(2.0, f.d);
CHECK_EQ(3.0, f.e);
CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h);
}
CHECK_EQ(7.0, f.a);
CHECK_EQ(8.0, f.b);
CHECK_EQ(1.0, f.c);
CHECK_EQ(2.0, f.d);
CHECK_EQ(3.0, f.e);
CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h);
}
@ -766,85 +762,81 @@ TEST(9) {
// single precision values around in memory.
Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(&assm, VFP2);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vldm(ia, r4, d0, d3);
__ add(r4, r4, Operand(4 * 8));
__ vldm(ia, r4, d4, d7);
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vldm(ia, r4, d0, d3);
__ add(r4, r4, Operand(4 * 8));
__ vldm(ia, r4, d4, d7);
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vstm(ia, r4, d6, d7);
__ add(r4, r4, Operand(2 * 8));
__ vstm(ia, r4, d0, d5);
__ add(r4, r0, Operand(OFFSET_OF(D, a)));
__ vstm(ia, r4, d6, d7);
__ add(r4, r4, Operand(2 * 8));
__ vstm(ia, r4, d0, d5);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vldm(ia, r4, s0, s3);
__ add(r4, r4, Operand(4 * 4));
__ vldm(ia, r4, s4, s7);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vldm(ia, r4, s0, s3);
__ add(r4, r4, Operand(4 * 4));
__ vldm(ia, r4, s4, s7);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vstm(ia, r4, s6, s7);
__ add(r4, r4, Operand(2 * 4));
__ vstm(ia, r4, s0, s5);
__ add(r4, r1, Operand(OFFSET_OF(F, a)));
__ vstm(ia, r4, s6, s7);
__ add(r4, r4, Operand(2 * 4));
__ vstm(ia, r4, s0, s5);
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
CodeDesc desc;
assm.GetCode(&desc);
Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Code::cast(code)->Print();
#endif
F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
d.d = 4.4;
d.e = 5.5;
d.f = 6.6;
d.g = 7.7;
d.h = 8.8;
F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
d.d = 4.4;
d.e = 5.5;
d.f = 6.6;
d.g = 7.7;
d.h = 8.8;
f.a = 1.0;
f.b = 2.0;
f.c = 3.0;
f.d = 4.0;
f.e = 5.0;
f.f = 6.0;
f.g = 7.0;
f.h = 8.0;
f.a = 1.0;
f.b = 2.0;
f.c = 3.0;
f.d = 4.0;
f.e = 5.0;
f.f = 6.0;
f.g = 7.0;
f.h = 8.0;
Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
USE(dummy);
Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
CHECK_EQ(1.1, d.c);
CHECK_EQ(2.2, d.d);
CHECK_EQ(3.3, d.e);
CHECK_EQ(4.4, d.f);
CHECK_EQ(5.5, d.g);
CHECK_EQ(6.6, d.h);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
CHECK_EQ(1.1, d.c);
CHECK_EQ(2.2, d.d);
CHECK_EQ(3.3, d.e);
CHECK_EQ(4.4, d.f);
CHECK_EQ(5.5, d.g);
CHECK_EQ(6.6, d.h);
CHECK_EQ(7.0, f.a);
CHECK_EQ(8.0, f.b);
CHECK_EQ(1.0, f.c);
CHECK_EQ(2.0, f.d);
CHECK_EQ(3.0, f.e);
CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h);
}
CHECK_EQ(7.0, f.a);
CHECK_EQ(8.0, f.b);
CHECK_EQ(1.0, f.c);
CHECK_EQ(2.0, f.d);
CHECK_EQ(3.0, f.e);
CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h);
}
@ -882,81 +874,77 @@ TEST(10) {
// single precision values around in memory.
Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(&assm, VFP2);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
__ vldm(db_w, r4, d4, d7);
__ vldm(db_w, r4, d0, d3);
__ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
__ vldm(db_w, r4, d4, d7);
__ vldm(db_w, r4, d0, d3);
__ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
__ vstm(db_w, r4, d0, d5);
__ vstm(db_w, r4, d6, d7);
__ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
__ vstm(db_w, r4, d0, d5);
__ vstm(db_w, r4, d6, d7);
__ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
__ vldm(db_w, r4, s4, s7);
__ vldm(db_w, r4, s0, s3);
__ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
__ vldm(db_w, r4, s4, s7);
__ vldm(db_w, r4, s0, s3);
__ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
__ vstm(db_w, r4, s0, s5);
__ vstm(db_w, r4, s6, s7);
__ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
__ vstm(db_w, r4, s0, s5);
__ vstm(db_w, r4, s6, s7);
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
CodeDesc desc;
assm.GetCode(&desc);
Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
Code::cast(code)->Print();
#endif
F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
d.d = 4.4;
d.e = 5.5;
d.f = 6.6;
d.g = 7.7;
d.h = 8.8;
F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
d.d = 4.4;
d.e = 5.5;
d.f = 6.6;
d.g = 7.7;
d.h = 8.8;
f.a = 1.0;
f.b = 2.0;
f.c = 3.0;
f.d = 4.0;
f.e = 5.0;
f.f = 6.0;
f.g = 7.0;
f.h = 8.0;
f.a = 1.0;
f.b = 2.0;
f.c = 3.0;
f.d = 4.0;
f.e = 5.0;
f.f = 6.0;
f.g = 7.0;
f.h = 8.0;
Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
USE(dummy);
Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
CHECK_EQ(1.1, d.c);
CHECK_EQ(2.2, d.d);
CHECK_EQ(3.3, d.e);
CHECK_EQ(4.4, d.f);
CHECK_EQ(5.5, d.g);
CHECK_EQ(6.6, d.h);
CHECK_EQ(7.7, d.a);
CHECK_EQ(8.8, d.b);
CHECK_EQ(1.1, d.c);
CHECK_EQ(2.2, d.d);
CHECK_EQ(3.3, d.e);
CHECK_EQ(4.4, d.f);
CHECK_EQ(5.5, d.g);
CHECK_EQ(6.6, d.h);
CHECK_EQ(7.0, f.a);
CHECK_EQ(8.0, f.b);
CHECK_EQ(1.0, f.c);
CHECK_EQ(2.0, f.d);
CHECK_EQ(3.0, f.e);
CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h);
}
CHECK_EQ(7.0, f.a);
CHECK_EQ(8.0, f.b);
CHECK_EQ(1.0, f.c);
CHECK_EQ(2.0, f.d);
CHECK_EQ(3.0, f.e);
CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h);
}