[arm] Clean up use of IsSupported and IsEnabled.

CpuFeatures::IsSupported(feature) indicates that the feature is
available on the target. AssemblerBase::IsEnabled(feature) indicates
that we've checked for support (using CpuFeatureScope). The main benefit
is that we can test on (for example) ARMv8, but have some assurance that
we won't generate ARMv8 instructions on ARMv7 targets.

This patch simply cleans up the usage, which had become inconsistent.
The instruction emission functions now check not only that their
dependent features are supported, but also that we've verified that
using CpuFeatureScope.

BUG=

Review-Url: https://codereview.chromium.org/2360243002
Cr-Commit-Position: refs/heads/master@{#39676}
This commit is contained in:
jacob.bramley 2016-09-23 08:29:11 -07:00 committed by Commit bot
parent 914519dda7
commit 73518a9070
14 changed files with 266 additions and 202 deletions

View File

@ -46,7 +46,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); } bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFPv3); }
bool CpuFeatures::SupportsSimd128() { return false; } bool CpuFeatures::SupportsSimd128() { return false; }

View File

@ -313,10 +313,10 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() { void CpuFeatures::PrintFeatures() {
printf( printf(
"ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d " "ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
"MOVW_MOVT_IMMEDIATE_LOADS=%d", "MOVW_MOVT_IMMEDIATE_LOADS=%d",
CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7), CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS), CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV), CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__ #ifdef __arm__
@ -598,6 +598,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_64_use_ = -1; first_const_pool_64_use_ = -1;
last_bound_pos_ = 0; last_bound_pos_ = 0;
ClearRecordedAstId(); ClearRecordedAstId();
if (CpuFeatures::IsSupported(VFP32DREGS)) {
// Register objects tend to be abstracted and survive between scopes, so
// it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
// its use consistent with other features, we always enable it if we can.
EnableCpuFeature(VFP32DREGS);
}
} }
@ -977,10 +983,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (target16_1 == 0) { if (target16_1 == 0) {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
1, CodePatcher::DONT_FLUSH); 1, CodePatcher::DONT_FLUSH);
CpuFeatureScope scope(patcher.masm(), ARMv7);
patcher.masm()->movw(dst, target16_0); patcher.masm()->movw(dst, target16_0);
} else { } else {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
2, CodePatcher::DONT_FLUSH); 2, CodePatcher::DONT_FLUSH);
CpuFeatureScope scope(patcher.masm(), ARMv7);
patcher.masm()->movw(dst, target16_0); patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1); patcher.masm()->movt(dst, target16_1);
} }
@ -1257,6 +1265,7 @@ void Assembler::move_32_bit_immediate(Register rd,
if (use_mov_immediate_load(x, this)) { if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
if (!FLAG_enable_embedded_constant_pool && if (!FLAG_enable_embedded_constant_pool &&
x.must_output_reloc_info(this)) { x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated. // Make sure the movw/movt doesn't get separated.
@ -1283,6 +1292,7 @@ void Assembler::move_32_bit_immediate(Register rd,
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset. // Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movw(target, 0, cond); movw(target, 0, cond);
movt(target, 0, cond); movt(target, 0, cond);
} else { } else {
@ -1493,8 +1503,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask)); emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
} }
void Assembler::blx(int branch_offset) {
void Assembler::blx(int branch_offset) { // v5 and above
DCHECK((branch_offset & 1) == 0); DCHECK((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24; int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2; int imm24 = branch_offset >> 2;
@ -1502,14 +1511,12 @@ void Assembler::blx(int branch_offset) { // v5 and above
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask)); emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
} }
void Assembler::blx(Register target, Condition cond) {
void Assembler::blx(Register target, Condition cond) { // v5 and above
DCHECK(!target.is(pc)); DCHECK(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code()); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
} }
void Assembler::bx(Register target, Condition cond) {
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code()); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
} }
@ -1665,13 +1672,13 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
DCHECK(CpuFeatures::IsSupported(ARMv7)); DCHECK(IsEnabled(ARMv7));
emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
} }
void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
DCHECK(CpuFeatures::IsSupported(ARMv7)); DCHECK(IsEnabled(ARMv7));
emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
} }
@ -1801,7 +1808,6 @@ void Assembler::umull(Register dstL,
// Miscellaneous arithmetic instructions. // Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) { void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
DCHECK(!dst.is(pc) && !src.is(pc)); DCHECK(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | CLZ | src.code()); 15*B8 | CLZ | src.code());
@ -1841,8 +1847,7 @@ void Assembler::ubfx(Register dst,
int lsb, int lsb,
int width, int width,
Condition cond) { Condition cond) {
// v7 and above. DCHECK(IsEnabled(ARMv7));
DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc)); DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31)); DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb))); DCHECK((width >= 1) && (width <= (32 - lsb)));
@ -1861,8 +1866,7 @@ void Assembler::sbfx(Register dst,
int lsb, int lsb,
int width, int width,
Condition cond) { Condition cond) {
// v7 and above. DCHECK(IsEnabled(ARMv7));
DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc)); DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31)); DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb))); DCHECK((width >= 1) && (width <= (32 - lsb)));
@ -1876,8 +1880,7 @@ void Assembler::sbfx(Register dst,
// to zero, preserving the value of the other bits. // to zero, preserving the value of the other bits.
// bfc dst, #lsb, #width // bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) { void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
// v7 and above. DCHECK(IsEnabled(ARMv7));
DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc)); DCHECK(!dst.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31)); DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb))); DCHECK((width >= 1) && (width <= (32 - lsb)));
@ -1895,8 +1898,7 @@ void Assembler::bfi(Register dst,
int lsb, int lsb,
int width, int width,
Condition cond) { Condition cond) {
// v7 and above. DCHECK(IsEnabled(ARMv7));
DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc)); DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31)); DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb))); DCHECK((width >= 1) && (width <= (32 - lsb)));
@ -2293,8 +2295,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#endif // def __arm__ #endif // def __arm__
} }
void Assembler::bkpt(uint32_t imm16) {
void Assembler::bkpt(uint32_t imm16) { // v5 and above
DCHECK(is_uint16(imm16)); DCHECK(is_uint16(imm16));
emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf)); emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
} }
@ -2355,13 +2356,8 @@ void Assembler::cdp(Coprocessor coproc,
crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
} }
void Assembler::cdp2(Coprocessor coproc, int opcode_1, CRegister crd,
void Assembler::cdp2(Coprocessor coproc, CRegister crn, CRegister crm, int opcode_2) {
int opcode_1,
CRegister crd,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition); cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
} }
@ -2378,13 +2374,8 @@ void Assembler::mcr(Coprocessor coproc,
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
} }
void Assembler::mcr2(Coprocessor coproc, int opcode_1, Register rd,
void Assembler::mcr2(Coprocessor coproc, CRegister crn, CRegister crm, int opcode_2) {
int opcode_1,
Register rd,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
} }
@ -2401,13 +2392,8 @@ void Assembler::mrc(Coprocessor coproc,
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
} }
void Assembler::mrc2(Coprocessor coproc, int opcode_1, Register rd,
void Assembler::mrc2(Coprocessor coproc, CRegister crn, CRegister crm, int opcode_2) {
int opcode_1,
Register rd,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
} }
@ -2433,20 +2419,13 @@ void Assembler::ldc(Coprocessor coproc,
coproc*B8 | (option & 255)); coproc*B8 | (option & 255));
} }
void Assembler::ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
void Assembler::ldc2(Coprocessor coproc, LFlag l) {
CRegister crd,
const MemOperand& src,
LFlag l) { // v5 and above
ldc(coproc, crd, src, l, kSpecialCondition); ldc(coproc, crd, src, l, kSpecialCondition);
} }
void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn, int option,
void Assembler::ldc2(Coprocessor coproc, LFlag l) {
CRegister crd,
Register rn,
int option,
LFlag l) { // v5 and above
ldc(coproc, crd, rn, option, l, kSpecialCondition); ldc(coproc, crd, rn, option, l, kSpecialCondition);
} }
@ -2461,6 +2440,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-924. // Instruction details available in ARM DDI 0406C.b, A8-924.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | offset // Vd(15-12) | 1011(11-8) | offset
DCHECK(VfpRegisterIsAvailable(dst));
int u = 1; int u = 1;
if (offset < 0) { if (offset < 0) {
CHECK(offset != kMinInt); CHECK(offset != kMinInt);
@ -2491,6 +2471,7 @@ void Assembler::vldr(const DwVfpRegister dst,
void Assembler::vldr(const DwVfpRegister dst, void Assembler::vldr(const DwVfpRegister dst,
const MemOperand& operand, const MemOperand& operand,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(operand.am_ == Offset); DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) { if (operand.rm().is_valid()) {
add(ip, operand.rn(), add(ip, operand.rn(),
@ -2558,6 +2539,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406C.b, A8-1082. // Instruction details available in ARM DDI 0406C.b, A8-1082.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | (offset/4) // Vd(15-12) | 1011(11-8) | (offset/4)
DCHECK(VfpRegisterIsAvailable(src));
int u = 1; int u = 1;
if (offset < 0) { if (offset < 0) {
CHECK(offset != kMinInt); CHECK(offset != kMinInt);
@ -2588,6 +2570,7 @@ void Assembler::vstr(const DwVfpRegister src,
void Assembler::vstr(const DwVfpRegister src, void Assembler::vstr(const DwVfpRegister src,
const MemOperand& operand, const MemOperand& operand,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(operand.am_ == Offset); DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) { if (operand.rm().is_valid()) {
add(ip, operand.rn(), add(ip, operand.rn(),
@ -2646,16 +2629,13 @@ void Assembler::vstr(const SwVfpRegister src,
} }
} }
void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
void Assembler::vldm(BlockAddrMode am, DwVfpRegister last, Condition cond) {
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-922. // Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2) // first(15-12) | 1011(11-8) | (count * 2)
DCHECK_LE(first.code(), last.code()); DCHECK_LE(first.code(), last.code());
DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w); DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc)); DCHECK(!base.is(pc));
@ -2667,16 +2647,13 @@ void Assembler::vldm(BlockAddrMode am,
0xB*B8 | count*2); 0xB*B8 | count*2);
} }
void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
void Assembler::vstm(BlockAddrMode am, DwVfpRegister last, Condition cond) {
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-1080. // Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2) // first(15-12) | 1011(11-8) | (count * 2)
DCHECK_LE(first.code(), last.code()); DCHECK_LE(first.code(), last.code());
DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w); DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc)); DCHECK(!base.is(pc));
@ -2688,11 +2665,8 @@ void Assembler::vstm(BlockAddrMode am,
0xB*B8 | count*2); 0xB*B8 | count*2);
} }
void Assembler::vldm(BlockAddrMode am, void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
Register base, SwVfpRegister last, Condition cond) {
SwVfpRegister first,
SwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-626. // Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2) // first(15-12) | 1010(11-8) | (count/2)
@ -2707,12 +2681,8 @@ void Assembler::vldm(BlockAddrMode am,
0xA*B8 | count); 0xA*B8 | count);
} }
void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
void Assembler::vstm(BlockAddrMode am, SwVfpRegister last, Condition cond) {
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2) // first(15-12) | 1011(11-8) | (count/2)
@ -2740,8 +2710,6 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats. // Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform. // We don't support VFP on the mixed endian floating point platform.
static bool FitsVmovFPImmediate(double d, uint32_t* encoding) { static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
DCHECK(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form: // VMOV can accept an immediate of the form:
// //
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
@ -2790,7 +2758,8 @@ static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
void Assembler::vmov(const SwVfpRegister dst, float imm) { void Assembler::vmov(const SwVfpRegister dst, float imm) {
uint32_t enc; uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) { if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The float can be encoded in the instruction. // The float can be encoded in the instruction.
// //
// Sd = immediate // Sd = immediate
@ -2810,6 +2779,8 @@ void Assembler::vmov(const SwVfpRegister dst, float imm) {
void Assembler::vmov(const DwVfpRegister dst, void Assembler::vmov(const DwVfpRegister dst,
double imm, double imm,
const Register scratch) { const Register scratch) {
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!scratch.is(ip));
uint32_t enc; uint32_t enc;
// If the embedded constant pool is disabled, we can use the normal, inline // If the embedded constant pool is disabled, we can use the normal, inline
// constant pool. If the embedded constant pool is enabled (via // constant pool. If the embedded constant pool is enabled (via
@ -2817,7 +2788,8 @@ void Assembler::vmov(const DwVfpRegister dst,
// pointer (pp) is valid. // pointer (pp) is valid.
bool can_use_pool = bool can_use_pool =
!FLAG_enable_embedded_constant_pool || is_constant_pool_available(); !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) { if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The double can be encoded in the instruction. // The double can be encoded in the instruction.
// //
// Dd = immediate // Dd = immediate
@ -2827,7 +2799,9 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm && can_use_pool) { } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm &&
can_use_pool) {
CpuFeatureScope scope(this, ARMv7);
// TODO(jfb) Temporarily turned off until we have constant blinding or // TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control // some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad // generated data which also happens to be executable, a Very Bad
@ -2870,6 +2844,7 @@ void Assembler::vmov(const DwVfpRegister dst,
vmov(dst, VmovIndexLo, ip); vmov(dst, VmovIndexLo, ip);
if (((lo & 0xffff) == (hi & 0xffff)) && if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) { CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movt(ip, hi >> 16); movt(ip, hi >> 16);
} else { } else {
mov(ip, Operand(hi)); mov(ip, Operand(hi));
@ -2905,6 +2880,8 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-938. // Instruction details available in ARM DDI 0406C.b, A8-938.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -2922,6 +2899,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-940. // Instruction details available in ARM DDI 0406C.b, A8-940.
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(index.index == 0 || index.index == 1); DCHECK(index.index == 0 || index.index == 1);
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
@ -2938,6 +2916,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.342. // Instruction details available in ARM DDI 0406C.b, A8.8.342.
// cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) | // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
// Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0) // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(index.index == 0 || index.index == 1); DCHECK(index.index == 0 || index.index == 1);
int vn, n; int vn, n;
src.split_code(&vn, &n); src.split_code(&vn, &n);
@ -2954,6 +2933,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-948. // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!src1.is(pc) && !src2.is(pc)); DCHECK(!src1.is(pc) && !src2.is(pc));
int vm, m; int vm, m;
dst.split_code(&vm, &m); dst.split_code(&vm, &m);
@ -2970,6 +2950,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406C.b, A8-948. // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(!dst1.is(pc) && !dst2.is(pc)); DCHECK(!dst1.is(pc) && !dst2.is(pc));
int vm, m; int vm, m;
src.split_code(&vm, &m); src.split_code(&vm, &m);
@ -3123,6 +3104,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
} }
@ -3139,6 +3121,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
} }
@ -3165,6 +3148,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
} }
@ -3173,6 +3157,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
} }
@ -3181,6 +3166,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
} }
@ -3189,6 +3175,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
VFPConversionMode mode, VFPConversionMode mode,
const Condition cond) { const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
} }
@ -3199,8 +3186,9 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-874. // Instruction details available in ARM DDI 0406C.b, A8-874.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) | // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
// 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0) // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
DCHECK(IsEnabled(VFPv3));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(fraction_bits > 0 && fraction_bits <= 32); DCHECK(fraction_bits > 0 && fraction_bits <= 32);
DCHECK(CpuFeatures::IsSupported(VFP3));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int imm5 = 32 - fraction_bits; int imm5 = 32 - fraction_bits;
@ -3217,6 +3205,8 @@ void Assembler::vneg(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-968. // Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) | // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3248,6 +3238,8 @@ void Assembler::vabs(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-524. // Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3280,6 +3272,9 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-830. // Instruction details available in ARM DDI 0406C.b, A8-830.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3318,6 +3313,9 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1086. // Instruction details available in ARM DDI 0406C.b, A8-1086.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3356,6 +3354,9 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-960. // Instruction details available in ARM DDI 0406C.b, A8-960.
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3392,6 +3393,9 @@ void Assembler::vmla(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-932. // Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3426,6 +3430,9 @@ void Assembler::vmls(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-932. // Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3462,6 +3469,9 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-882. // Instruction details available in ARM DDI 0406C.b, A8-882.
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) | // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3498,6 +3508,8 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864. // Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) | // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(VfpRegisterIsAvailable(src2));
int vd, d; int vd, d;
src1.split_code(&vd, &d); src1.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3529,6 +3541,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864. // Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(src2 == 0.0); DCHECK(src2 == 0.0);
int vd, d; int vd, d;
src1.split_code(&vd, &d); src1.split_code(&vd, &d);
@ -3553,7 +3566,7 @@ void Assembler::vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2) { const DwVfpRegister src2) {
// kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) | // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3569,7 +3582,7 @@ void Assembler::vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2) { const SwVfpRegister src2) {
// kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) | // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3585,7 +3598,7 @@ void Assembler::vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2) { const DwVfpRegister src2) {
// kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) | // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3601,7 +3614,7 @@ void Assembler::vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2) { const SwVfpRegister src2) {
// kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) | // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3618,7 +3631,7 @@ void Assembler::vsel(Condition cond, const DwVfpRegister dst,
// cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) | // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
// vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
// 0(6) | M(5) | 0(4) | Vm(3-0) // 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3650,7 +3663,7 @@ void Assembler::vsel(Condition cond, const SwVfpRegister dst,
// cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) | // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
// vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
// 0(6) | M(5) | 0(4) | Vm(3-0) // 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vn, n; int vn, n;
@ -3683,6 +3696,8 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1058. // Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) | // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3726,7 +3741,7 @@ void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) | // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3740,7 +3755,7 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) | // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3754,7 +3769,7 @@ void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) | // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3768,7 +3783,7 @@ void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) | // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3782,7 +3797,7 @@ void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) | // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3796,7 +3811,7 @@ void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) | // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3810,7 +3825,7 @@ void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) | // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3824,7 +3839,7 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) | // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) | // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0) // M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3838,7 +3853,7 @@ void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) { const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) | // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3852,7 +3867,7 @@ void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) { const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) | // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8)); DCHECK(IsEnabled(ARMv8));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3870,7 +3885,7 @@ void Assembler::vld1(NeonSize size,
// Instruction details available in ARM DDI 0406C.b, A8.8.320. // Instruction details available in ARM DDI 0406C.b, A8.8.320.
// 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) | // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
DCHECK(CpuFeatures::IsSupported(NEON)); DCHECK(IsEnabled(NEON));
int vd, d; int vd, d;
dst.base().split_code(&vd, &d); dst.base().split_code(&vd, &d);
emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 | emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
@ -3884,7 +3899,7 @@ void Assembler::vst1(NeonSize size,
// Instruction details available in ARM DDI 0406C.b, A8.8.404. // Instruction details available in ARM DDI 0406C.b, A8.8.404.
// 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) | // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
DCHECK(CpuFeatures::IsSupported(NEON)); DCHECK(IsEnabled(NEON));
int vd, d; int vd, d;
src.base().split_code(&vd, &d); src.base().split_code(&vd, &d);
emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 | emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
@ -3896,7 +3911,7 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.346. // Instruction details available in ARM DDI 0406C.b, A8.8.346.
// 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) | // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
// 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0) // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(NEON)); DCHECK(IsEnabled(NEON));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int vm, m; int vm, m;
@ -3906,6 +3921,8 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
} }
void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
DCHECK(!srcdst0.is(kScratchDoubleReg)); DCHECK(!srcdst0.is(kScratchDoubleReg));
DCHECK(!srcdst1.is(kScratchDoubleReg)); DCHECK(!srcdst1.is(kScratchDoubleReg));
@ -4431,6 +4448,7 @@ void Assembler::PatchConstantPoolAccessInstruction(
Instr instr = instr_at(pc); Instr instr = instr_at(pc);
if (access == ConstantPoolEntry::OVERFLOWED) { if (access == ConstantPoolEntry::OVERFLOWED) {
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = instr_at(pc + kInstrSize); Instr next_instr = instr_at(pc + kInstrSize);
DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0)); DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));

View File

@ -1603,6 +1603,12 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_); (pc_offset() < no_const_pool_before_);
} }
bool VfpRegisterIsAvailable(DwVfpRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
}
private: private:
int next_buffer_check_; // pc offset of next buffer check int next_buffer_check_; // pc offset of next buffer check

View File

@ -553,17 +553,14 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// 3) Fall through to both_loaded_as_doubles. // 3) Fall through to both_loaded_as_doubles.
// 4) Jump to lhs_not_nan. // 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number // In cases 3 and 4 we have found out we were dealing with a number-number
// comparison. If VFP3 is supported the double values of the numbers have // comparison. The double values of the numbers have been loaded into d7 (lhs)
// been loaded into d7 and d6. Otherwise, the double values have been loaded // and d6 (rhs).
// into r0, r1, r2, and r3.
EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles); __ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if // The arguments have been converted to doubles and stored in d6 and d7.
// VFP3 is supported, or in r0, r1, r2, and r3.
__ bind(&lhs_not_nan); __ bind(&lhs_not_nan);
Label no_nan; Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
__ VFPCompareAndSetFlags(d7, d6); __ VFPCompareAndSetFlags(d7, d6);
Label nan; Label nan;
__ b(vs, &nan); __ b(vs, &nan);

View File

@ -39,6 +39,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
Label less_4; Label less_4;
if (CpuFeatures::IsSupported(NEON)) { if (CpuFeatures::IsSupported(NEON)) {
CpuFeatureScope scope(&masm, NEON);
Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less; Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
Label size_less_than_8; Label size_less_than_8;
__ pld(MemOperand(src, 0)); __ pld(MemOperand(src, 0));
@ -193,6 +194,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
Register src = r1; Register src = r1;
Register chars = r2; Register chars = r2;
if (CpuFeatures::IsSupported(NEON)) { if (CpuFeatures::IsSupported(NEON)) {
CpuFeatureScope scope(&masm, NEON);
Register temp = r3; Register temp = r3;
Label loop; Label loop;

View File

@ -119,6 +119,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
DCHECK(kDoubleRegZero.code() == 14); DCHECK(kDoubleRegZero.code() == 14);
DCHECK(kScratchDoubleReg.code() == 15); DCHECK(kScratchDoubleReg.code() == 15);
{
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
// Check CPU flags for number of registers, setting the Z condition flag. // Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip); __ CheckFor32DRegs(ip);
@ -127,6 +132,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ vstm(db_w, sp, d16, d31, ne); __ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15); __ vstm(db_w, sp, d0, d15);
}
// Push all 16 registers (needed to populate FrameDescription::registers_). // Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@ -259,9 +265,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ cmp(r4, r1); __ cmp(r4, r1);
__ b(lt, &outer_push_loop); __ b(lt, &outer_push_loop);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i); int code = config->GetAllocatableDoubleCode(i);

View File

@ -287,6 +287,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
!src2.must_output_reloc_info(this) && !src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) && CpuFeatures::IsSupported(ARMv7) &&
base::bits::IsPowerOfTwo32(src2.immediate() + 1)) { base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, 0, ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else { } else {
@ -305,6 +306,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
} }
} else { } else {
CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, lsb, width, cond); ubfx(dst, src1, lsb, width, cond);
} }
} }
@ -325,6 +327,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
} }
} else { } else {
CpuFeatureScope scope(this, ARMv7);
sbfx(dst, src1, lsb, width, cond); sbfx(dst, src1, lsb, width, cond);
} }
} }
@ -348,6 +351,7 @@ void MacroAssembler::Bfi(Register dst,
mov(scratch, Operand(scratch, LSL, lsb)); mov(scratch, Operand(scratch, LSL, lsb));
orr(dst, dst, scratch); orr(dst, dst, scratch);
} else { } else {
CpuFeatureScope scope(this, ARMv7);
bfi(dst, src, lsb, width, cond); bfi(dst, src, lsb, width, cond);
} }
} }
@ -360,6 +364,7 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, src, Operand(mask)); bic(dst, src, Operand(mask));
} else { } else {
CpuFeatureScope scope(this, ARMv7);
Move(dst, src, cond); Move(dst, src, cond);
bfc(dst, lsb, width, cond); bfc(dst, lsb, width, cond);
} }
@ -409,6 +414,7 @@ void MacroAssembler::LoadRoot(Register destination,
if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
isolate()->heap()->RootCanBeTreatedAsConstant(index) && isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
!predictable_code_size()) { !predictable_code_size()) {
CpuFeatureScope scope(this, MOVW_MOVT_IMMEDIATE_LOADS);
// The CPU supports fast immediate values, and this root will never // The CPU supports fast immediate values, and this root will never
// change. We will load it as a relocatable immediate value. // change. We will load it as a relocatable immediate value.
Handle<Object> root = isolate()->heap()->root_handle(index); Handle<Object> root = isolate()->heap()->root_handle(index);
@ -2651,7 +2657,8 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
if (CpuFeatures::IsSupported(VFP3)) { if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(this, VFPv3);
vmov(value.low(), smi); vmov(value.low(), smi);
vcvt_f64_s32(value, 1); vcvt_f64_s32(value, 1);
} else { } else {
@ -2808,6 +2815,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src, Register src,
int num_least_bits) { int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src, kSmiTagSize, num_least_bits); ubfx(dst, src, kSmiTagSize, num_least_bits);
} else { } else {
SmiUntag(dst, src); SmiUntag(dst, src);
@ -3417,6 +3425,7 @@ void MacroAssembler::CheckFor32DRegs(Register scratch) {
void MacroAssembler::SaveFPRegs(Register location, Register scratch) { void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch); CheckFor32DRegs(scratch);
vstm(db_w, location, d16, d31, ne); vstm(db_w, location, d16, d31, ne);
sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
@ -3425,6 +3434,7 @@ void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch); CheckFor32DRegs(scratch);
vldm(ia_w, location, d0, d15); vldm(ia_w, location, d0, d15);
vldm(ia_w, location, d16, d31, ne); vldm(ia_w, location, d16, d31, ne);

View File

@ -234,22 +234,14 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// Implementation of CpuFeatureScope // Implementation of CpuFeatureScope
#ifdef DEBUG #ifdef DEBUG
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check)
: assembler_(assembler) { : assembler_(assembler) {
DCHECK(CpuFeatures::IsSupported(f)); DCHECK_IMPLIES(check == kCheckSupported, CpuFeatures::IsSupported(f));
old_enabled_ = assembler_->enabled_cpu_features(); old_enabled_ = assembler_->enabled_cpu_features();
uint64_t mask = static_cast<uint64_t>(1) << f; assembler_->EnableCpuFeature(f);
// TODO(svenpanne) This special case below doesn't belong here!
#if V8_TARGET_ARCH_ARM
// ARMv7 is implied by VFP3.
if (f == VFP3) {
mask |= static_cast<uint64_t>(1) << ARMv7;
}
#endif
assembler_->set_enabled_cpu_features(old_enabled_ | mask);
} }
CpuFeatureScope::~CpuFeatureScope() { CpuFeatureScope::~CpuFeatureScope() {
assembler_->set_enabled_cpu_features(old_enabled_); assembler_->set_enabled_cpu_features(old_enabled_);
} }

View File

@ -80,9 +80,14 @@ class AssemblerBase: public Malloced {
void set_enabled_cpu_features(uint64_t features) { void set_enabled_cpu_features(uint64_t features) {
enabled_cpu_features_ = features; enabled_cpu_features_ = features;
} }
// Features are usually enabled by CpuFeatureScope, which also asserts that
// the features are supported before they are enabled.
bool IsEnabled(CpuFeature f) { bool IsEnabled(CpuFeature f) {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0; return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
} }
void EnableCpuFeature(CpuFeature f) {
enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
}
bool is_constant_pool_available() const { bool is_constant_pool_available() const {
if (FLAG_enable_embedded_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
@ -184,15 +189,22 @@ class PredictableCodeSizeScope {
// Enable a specified feature within a scope. // Enable a specified feature within a scope.
class CpuFeatureScope BASE_EMBEDDED { class CpuFeatureScope BASE_EMBEDDED {
public: public:
enum CheckPolicy {
kCheckSupported,
kDontCheckSupported,
};
#ifdef DEBUG #ifdef DEBUG
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f); CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported);
~CpuFeatureScope(); ~CpuFeatureScope();
private: private:
AssemblerBase* assembler_; AssemblerBase* assembler_;
uint64_t old_enabled_; uint64_t old_enabled_;
#else #else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {} CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
#endif #endif
}; };

View File

@ -1227,33 +1227,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVnegF64: case kArmVnegF64:
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
case kArmVrintmF32: case kArmVrintmF32: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
case kArmVrintmF64: }
case kArmVrintmF64: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
case kArmVrintpF32: }
case kArmVrintpF32: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
case kArmVrintpF64: }
case kArmVrintpF64: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
case kArmVrintzF32: }
case kArmVrintzF32: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
case kArmVrintzF64: }
case kArmVrintzF64: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
case kArmVrintaF64: }
case kArmVrintaF64: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
case kArmVrintnF32: }
case kArmVrintnF32: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
case kArmVrintnF64: }
case kArmVrintnF64: {
CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
}
case kArmVcvtF32F64: { case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0)); __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());

View File

@ -1513,46 +1513,55 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
void InstructionSelector::VisitFloat32RoundDown(Node* node) { void InstructionSelector::VisitFloat32RoundDown(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintmF32, node); VisitRR(this, kArmVrintmF32, node);
} }
void InstructionSelector::VisitFloat64RoundDown(Node* node) { void InstructionSelector::VisitFloat64RoundDown(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintmF64, node); VisitRR(this, kArmVrintmF64, node);
} }
void InstructionSelector::VisitFloat32RoundUp(Node* node) { void InstructionSelector::VisitFloat32RoundUp(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintpF32, node); VisitRR(this, kArmVrintpF32, node);
} }
void InstructionSelector::VisitFloat64RoundUp(Node* node) { void InstructionSelector::VisitFloat64RoundUp(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintpF64, node); VisitRR(this, kArmVrintpF64, node);
} }
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintzF32, node); VisitRR(this, kArmVrintzF32, node);
} }
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintzF64, node); VisitRR(this, kArmVrintzF64, node);
} }
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintaF64, node); VisitRR(this, kArmVrintaF64, node);
} }
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintnF32, node); VisitRR(this, kArmVrintnF32, node);
} }
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintnF64, node); VisitRR(this, kArmVrintnF64, node);
} }

View File

@ -793,7 +793,7 @@ enum CpuFeature {
NUMBER_OF_CPU_FEATURES, NUMBER_OF_CPU_FEATURES,
// ARM feature aliases (based on the standard configurations above). // ARM feature aliases (based on the standard configurations above).
VFP3 = ARMv7, VFPv3 = ARMv7,
NEON = ARMv7, NEON = ARMv7,
VFP32DREGS = ARMv7, VFP32DREGS = ARMv7,
SUDIV = ARMv7_SUDIV SUDIV = ARMv7_SUDIV

View File

@ -244,9 +244,8 @@ TEST(4) {
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
Label L, C; Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
if (CpuFeatures::IsSupported(VFP3)) { CpuFeatureScope scope(&assm, VFPv3);
CpuFeatureScope scope(&assm, VFP3);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@ -450,9 +449,6 @@ static void TestRoundingMode(VCVTTypes types,
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatureScope scope(&assm, VFP3);
Label wrong_exception; Label wrong_exception;
__ vmrs(r1); __ vmrs(r1);
@ -480,8 +476,7 @@ static void TestRoundingMode(VCVTTypes types,
__ vmrs(r2); __ vmrs(r2);
__ tst(r2, Operand(kVFPExceptionMask)); __ tst(r2, Operand(kVFPExceptionMask));
// Check that we behaved as expected. // Check that we behaved as expected.
__ b(&wrong_exception, __ b(&wrong_exception, expected_exception ? eq : ne);
expected_exception ? eq : ne);
// There was no exception. Retrieve the result and return. // There was no exception. Retrieve the result and return.
__ vmov(r0, s0); __ vmov(r0, s0);
__ mov(pc, Operand(lr)); __ mov(pc, Operand(lr));
@ -505,7 +500,6 @@ static void TestRoundingMode(VCVTTypes types,
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0)); reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
::printf("res = %d\n", res); ::printf("res = %d\n", res);
CHECK_EQ(expected, res); CHECK_EQ(expected, res);
}
} }
@ -1051,9 +1045,8 @@ TEST(13) {
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
Label L, C; Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
if (CpuFeatures::IsSupported(VFP3)) { CpuFeatureScope scope(&assm, VFPv3);
CpuFeatureScope scope(&assm, VFP3);
__ stm(db_w, sp, r4.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | lr.bit());

View File

@ -505,8 +505,8 @@ TEST(msr_mrs_disasm) {
TEST(Vfp) { TEST(Vfp) {
SET_UP(); SET_UP();
if (CpuFeatures::IsSupported(VFP3)) { if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(&assm, VFP3); CpuFeatureScope scope(&assm, VFPv3);
COMPARE(vmov(d0, r2, r3), COMPARE(vmov(d0, r2, r3),
"ec432b10 vmov d0, r2, r3"); "ec432b10 vmov d0, r2, r3");
COMPARE(vmov(r2, r3, d0), COMPARE(vmov(r2, r3, d0),
@ -737,6 +737,7 @@ TEST(Vfp) {
"eeba9bcf vcvt.f64.s32 d9, d9, #2"); "eeba9bcf vcvt.f64.s32 d9, d9, #2");
if (CpuFeatures::IsSupported(VFP32DREGS)) { if (CpuFeatures::IsSupported(VFP32DREGS)) {
CpuFeatureScope scope(&assm, VFP32DREGS);
COMPARE(vmov(d3, d27), COMPARE(vmov(d3, d27),
"eeb03b6b vmov.f64 d3, d27"); "eeb03b6b vmov.f64 d3, d27");
COMPARE(vmov(d18, d7), COMPARE(vmov(d18, d7),
@ -840,6 +841,7 @@ TEST(ARMv8_vrintX_disasm) {
SET_UP(); SET_UP();
if (CpuFeatures::IsSupported(ARMv8)) { if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(&assm, ARMv8);
COMPARE(vrinta(d0, d0), "feb80b40 vrinta.f64.f64 d0, d0"); COMPARE(vrinta(d0, d0), "feb80b40 vrinta.f64.f64 d0, d0");
COMPARE(vrinta(d2, d3), "feb82b43 vrinta.f64.f64 d2, d3"); COMPARE(vrinta(d2, d3), "feb82b43 vrinta.f64.f64 d2, d3");
@ -864,6 +866,7 @@ TEST(ARMv8_vminmax_disasm) {
SET_UP(); SET_UP();
if (CpuFeatures::IsSupported(ARMv8)) { if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(&assm, ARMv8);
COMPARE(vmaxnm(d0, d1, d2), "fe810b02 vmaxnm.f64 d0, d1, d2"); COMPARE(vmaxnm(d0, d1, d2), "fe810b02 vmaxnm.f64 d0, d1, d2");
COMPARE(vminnm(d3, d4, d5), "fe843b45 vminnm.f64 d3, d4, d5"); COMPARE(vminnm(d3, d4, d5), "fe843b45 vminnm.f64 d3, d4, d5");
COMPARE(vmaxnm(s6, s7, s8), "fe833a84 vmaxnm.f32 s6, s7, s8"); COMPARE(vmaxnm(s6, s7, s8), "fe833a84 vmaxnm.f32 s6, s7, s8");
@ -878,6 +881,7 @@ TEST(ARMv8_vselX_disasm) {
SET_UP(); SET_UP();
if (CpuFeatures::IsSupported(ARMv8)) { if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(&assm, ARMv8);
// Native instructions. // Native instructions.
COMPARE(vsel(eq, d0, d1, d2), COMPARE(vsel(eq, d0, d1, d2),
"fe010b02 vseleq.f64 d0, d1, d2"); "fe010b02 vseleq.f64 d0, d1, d2");