VM initialization refactoring.

This patch introduces global once per-process initialization and moves
the OS and CPU setup there. This makes CPU features isolate-independent.

Review URL: http://codereview.chromium.org/6670119

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7462 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
vitalyr@chromium.org 2011-03-31 16:17:37 +00:00
parent fdd4773dfe
commit 179aef2b8f
42 changed files with 592 additions and 482 deletions

View File

@ -44,11 +44,12 @@
namespace v8 {
namespace internal {
CpuFeatures::CpuFeatures()
: supported_(0),
enabled_(0),
found_by_runtime_probing_(0) {
}
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
#ifdef __arm__
static uint64_t CpuFeaturesImpliedByCompiler() {
@ -70,7 +71,11 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
#endif // def __arm__
void CpuFeatures::Probe(bool portable) {
void CpuFeatures::Probe() {
ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
#ifndef __arm__
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
if (FLAG_enable_vfp3) {
@ -81,7 +86,7 @@ void CpuFeatures::Probe(bool portable) {
supported_ |= 1u << ARMv7;
}
#else // def __arm__
if (portable && Serializer::enabled()) {
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
return; // No features if we might serialize.
@ -98,8 +103,6 @@ void CpuFeatures::Probe(bool portable) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
if (!portable) found_by_runtime_probing_ = 0;
#endif
}
@ -268,8 +271,8 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
static const int kMinimalBufferSize = 4*KB;
Assembler::Assembler(void* buffer, int buffer_size)
: AssemblerBase(Isolate::Current()),
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
positions_recorder_(this),
allow_peephole_optimization_(false),
emit_debug_code_(FLAG_debug_code) {
@ -715,7 +718,7 @@ static bool fits_shifter(uint32_t imm32,
*instr ^= kMovMvnFlip;
return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
*instr |= EncodeMovwImmediate(imm32);
@ -779,7 +782,7 @@ bool Operand::is_single_instruction(Instr instr) const {
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() ||
!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
@ -822,7 +825,7 @@ void Assembler::addrmod1(Instr instr,
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() ||
!isolate()->cpu_features()->IsSupported(ARMv7)) {
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
} else {
@ -1265,7 +1268,7 @@ void Assembler::usat(Register dst,
const Operand& src,
Condition cond) {
// v6 and above.
ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.rm_.is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@ -1293,7 +1296,7 @@ void Assembler::ubfx(Register dst,
int width,
Condition cond) {
// v7 and above.
ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@ -1313,7 +1316,7 @@ void Assembler::sbfx(Register dst,
int width,
Condition cond) {
// v7 and above.
ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@ -1328,7 +1331,7 @@ void Assembler::sbfx(Register dst,
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
// v7 and above.
ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@ -1347,7 +1350,7 @@ void Assembler::bfi(Register dst,
int width,
Condition cond) {
// v7 and above.
ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
ASSERT(CpuFeatures::IsSupported(ARMv7));
ASSERT(!dst.is(pc) && !src.is(pc));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width >= 1) && (width <= (32 - lsb)));
@ -1619,7 +1622,7 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
ASSERT(CpuFeatures::IsEnabled(ARMv7));
ASSERT(src.rm().is(no_reg));
ASSERT(!dst1.is(lr)); // r14.
ASSERT_EQ(0, dst1.code() % 2);
@ -1634,7 +1637,7 @@ void Assembler::strd(Register src1, Register src2,
ASSERT(!src1.is(lr)); // r14.
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
ASSERT(CpuFeatures::IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
@ -1870,7 +1873,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1912,7 +1915,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1956,7 +1959,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1997,7 +2000,7 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -2043,7 +2046,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
// VMOV can accept an immediate of the form:
//
@ -2096,7 +2099,7 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640.
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
uint32_t enc;
if (FitsVMOVDoubleImmediate(imm, &enc)) {
@ -2133,7 +2136,7 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@ -2146,7 +2149,7 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
@ -2160,7 +2163,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@ -2175,7 +2178,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@ -2189,7 +2192,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@ -2204,7 +2207,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@ -2329,7 +2332,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@ -2338,7 +2341,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@ -2347,7 +2350,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@ -2356,7 +2359,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@ -2365,7 +2368,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@ -2374,7 +2377,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@ -2383,7 +2386,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@ -2413,7 +2416,7 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@ -2428,7 +2431,7 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@ -2443,7 +2446,7 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@ -2458,7 +2461,7 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@ -2471,7 +2474,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@ -2484,7 +2487,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
@ -2495,7 +2498,7 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2505,7 +2508,7 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2516,7 +2519,7 @@ void Assembler::vsqrt(const DwVfpRegister dst,
const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
}

View File

@ -468,58 +468,97 @@ class MemOperand BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
class CpuFeatures {
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
void Probe(bool portable);
static void Probe();
// Check whether a feature is supported by the target CPU.
bool IsSupported(CpuFeature f) const {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0;
}
#ifdef DEBUG
// Check whether a feature is currently enabled.
bool IsEnabled(CpuFeature f) const {
return (enabled_ & (1u << f)) != 0;
static bool IsEnabled(CpuFeature f) {
ASSERT(initialized_);
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL) {
// When no isolate is available, work as if we're running in
// release mode.
return IsSupported(f);
}
unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
return (enabled & (1u << f)) != 0;
}
#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f)
: cpu_features_(Isolate::Current()->cpu_features()),
isolate_(Isolate::Current()) {
ASSERT(cpu_features_->IsSupported(f));
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
old_enabled_ = cpu_features_->enabled_;
cpu_features_->enabled_ |= 1u << f;
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
isolate_ = Isolate::UncheckedCurrent();
old_enabled_ = 0;
if (isolate_ != NULL) {
old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
isolate_->set_enabled_cpu_features(old_enabled_ | mask);
}
}
~Scope() {
ASSERT_EQ(Isolate::Current(), isolate_);
cpu_features_->enabled_ = old_enabled_;
ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
if (isolate_ != NULL) {
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
private:
unsigned old_enabled_;
CpuFeatures* cpu_features_;
Isolate* isolate_;
unsigned old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
class TryForceFeatureScope BASE_EMBEDDED {
public:
explicit TryForceFeatureScope(CpuFeature f)
: old_supported_(CpuFeatures::supported_) {
if (CanForce()) {
CpuFeatures::supported_ |= (1u << f);
}
}
~TryForceFeatureScope() {
if (CanForce()) {
CpuFeatures::supported_ = old_supported_;
}
}
private:
static bool CanForce() {
// It's only safe to temporarily force support of CPU features
// when there's only a single isolate, which is guaranteed when
// the serializer is enabled.
return Serializer::enabled();
}
const unsigned old_supported_;
};
private:
CpuFeatures();
unsigned supported_;
unsigned enabled_;
unsigned found_by_runtime_probing_;
friend class Isolate;
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@ -564,7 +603,7 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(void* buffer, int buffer_size);
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.

View File

@ -1173,9 +1173,11 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Probe the CPU to set the supported features, because this builtin
// may be called before the initialization performs CPU setup.
masm->isolate()->cpu_features()->Probe(false);
CpuFeatures::TryForceFeatureScope scope(VFP3);
if (!CpuFeatures::IsSupported(VFP3)) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.

View File

@ -502,7 +502,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1);
@ -570,7 +570,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
if (CpuFeatures::IsSupported(VFP3) &&
destination == kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber to double register.
@ -585,7 +585,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
__ SmiUntag(scratch1, object);
@ -676,7 +676,7 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(single_scratch, scratch1);
__ vcvt_f64_s32(double_dst, single_scratch);
@ -744,7 +744,7 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
@ -818,7 +818,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
SwVfpRegister single_scratch = double_scratch.low();
// Load the double value.
@ -1153,7 +1153,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Lhs is a smi, rhs is a number.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
// Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP3);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
@ -1193,7 +1193,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Rhs is a smi, lhs is a heap number.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag));
@ -1373,7 +1373,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
@ -1463,7 +1463,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
if (isolate->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
scratch1,
@ -1597,7 +1597,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
if (isolate->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
Label no_nan;
@ -1707,7 +1707,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP3));
Label false_result;
Label not_heap_number;
@ -1794,7 +1794,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
const Builtins::JavaScript& builtin) {
Label slow, slow_reverse, do_the_call;
bool use_fp_registers =
Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
CpuFeatures::IsSupported(VFP3) &&
Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
@ -1811,7 +1811,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(rhs, ASR, kSmiTagSize));
__ vmov(s15, r7);
@ -1907,7 +1907,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
@ -1964,7 +1964,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
@ -2177,7 +2177,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of writing
// the register as an unsigned int so we go to slow case if we hit this
// case.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, &slow);
@ -2225,7 +2225,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
@ -3077,7 +3077,7 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
CpuFeatures::IsSupported(VFP3) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@ -3190,7 +3190,7 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
@ -3229,7 +3229,7 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP3);
@ -3358,7 +3358,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
CpuFeatures::IsSupported(VFP3) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@ -3545,7 +3545,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// to return a heap number if we can.
// The non vfp3 code does not support this special case, so jump to
// runtime if we don't support it.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi,
(result_type_ <= TRBinaryOpIC::INT32) ? &transition
: &return_heap_number);
@ -3571,7 +3571,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ Ret();
__ bind(&return_heap_number);
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
@ -3806,7 +3806,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (tagged) {
// Argument is a number and is on stack and in r0.
@ -3894,7 +3894,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
}
__ Ret();
} // if (Isolate::Current()->cpu_features()->IsSupported(VFP3))
} // if (CpuFeatures::IsSupported(VFP3))
__ bind(&calculate);
if (tagged) {
@ -3903,7 +3903,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE();
if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
CpuFeatures::Scope scope(VFP3);
Label no_update;
@ -4102,7 +4102,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(r2));
}
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
@ -4143,7 +4143,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label base_not_smi;
@ -6807,7 +6807,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load left and right operand

View File

@ -235,7 +235,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_vfp3_ = Isolate::Current()->cpu_features()->IsSupported(VFP3);
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}

View File

@ -770,7 +770,7 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
true_target->Branch(eq);
// Slow case.
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Implements the slow case by using ToBooleanStub.
// The ToBooleanStub takes a single argument, and
@ -967,8 +967,7 @@ void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
ASSERT(Token::IsBitOp(op_));
if ((op_ == Token::SHR) &&
!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
// >>> requires an unsigned to double conversion and the non VFP code
// does not support this conversion.
__ b(cond, entry_label());
@ -1072,7 +1071,7 @@ void DeferredInlineSmiOperation::Generate() {
void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
Register heap_number,
Register scratch) {
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, answer);
if (op_ == Token::SHR) {
@ -1142,7 +1141,7 @@ void DeferredInlineSmiOperation::GenerateNonSmiInput() {
// SHR is special because it is required to produce a positive answer.
__ cmp(int32, Operand(0, RelocInfo::NONE));
}
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
// Non VFP code cannot convert from unsigned to double, so fall back
@ -4617,7 +4616,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (!CpuFeatures::IsSupported(VFP3)) {
frame_->CallRuntime(Runtime::kMath_pow, 2);
frame_->EmitPush(r0);
} else {
@ -4771,7 +4770,7 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (!CpuFeatures::IsSupported(VFP3)) {
frame_->CallRuntime(Runtime::kMath_sqrt, 1);
frame_->EmitPush(r0);
} else {
@ -5360,7 +5359,7 @@ void CodeGenerator::GenerateRandomHeapNumber(
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(1, r0);
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@ -5676,7 +5675,7 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
frame_->SpillAllButCopyTOSToR0();
@ -5691,7 +5690,7 @@ void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
frame_->SpillAllButCopyTOSToR0();
@ -5706,7 +5705,7 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
frame_->SpillAllButCopyTOSToR0();

View File

@ -42,11 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
cpu_features->Probe(true);
if (!cpu_features->IsSupported(VFP3) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return CpuFeatures::IsSupported(VFP3);
}

View File

@ -562,7 +562,7 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Emit the inlined tests assumed by the stub.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@ -2802,7 +2802,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(1, r0);
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);

View File

@ -40,10 +40,10 @@ namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
: Assembler(Isolate::Current(), buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(HEAP->undefined_value()) {
code_object_(isolate()->heap()->undefined_value()) {
}
@ -292,7 +292,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
} else if (!src2.is_single_instruction() &&
!src2.must_use_constant_pool() &&
Isolate::Current()->cpu_features()->IsSupported(ARMv7) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
@ -305,7 +305,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@ -320,7 +320,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@ -348,7 +348,7 @@ void MacroAssembler::Bfi(Register dst,
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@ -362,7 +362,7 @@ void MacroAssembler::Bfi(Register dst,
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
@ -373,7 +373,7 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7)) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@ -619,7 +619,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
ASSERT_EQ(dst1.code() + 1, dst2.code());
// Generate two ldr instructions if ldrd is not available.
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@ -644,7 +644,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
ASSERT_EQ(src1.code() + 1, src2.code());
// Generate two str instructions if strd is not available.
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@ -1903,7 +1903,7 @@ void MacroAssembler::ConvertToInt32(Register source,
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
@ -1999,7 +1999,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3));
ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
@ -2157,7 +2157,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));

View File

@ -953,7 +953,7 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register fval,
Register scratch1,
Register scratch2) {
if (masm->isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
@ -2048,7 +2048,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -- sp[argc * 4] : receiver
// -----------------------------------
if (!masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
if (!CpuFeatures::IsSupported(VFP3)) {
return heap()->undefined_value();
}
@ -3509,7 +3509,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case kExternalFloatArray:
if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
@ -3548,7 +3548,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
@ -3563,7 +3563,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
@ -3627,7 +3627,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
@ -3820,7 +3820,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (array_type == kExternalFloatArray) {

View File

@ -53,6 +53,8 @@ class CPU : public AllStatic {
// Initializes the cpu architecture support. Called once at VM startup.
static void Setup();
static bool SupportsCrankshaft();
// Flush instruction cache.
static void FlushICache(void* start, size_t size);

View File

@ -48,24 +48,37 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
CpuFeatures::CpuFeatures()
: supported_(0),
enabled_(0),
found_by_runtime_probing_(0) {
}
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe(bool portable) {
ASSERT(HEAP->HasBeenSetup());
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
if (portable && Serializer::enabled()) {
#ifdef DEBUG
initialized_ = true;
#endif
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
Assembler assm(NULL, 0);
const int kBufferSize = 4 * KB;
VirtualMemory* memory = new VirtualMemory(kBufferSize);
if (!memory->IsReserved()) {
delete memory;
return;
}
ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
delete memory;
return;
}
Assembler assm(NULL, memory->address(), kBufferSize);
Label cpuid, done;
#define __ assm.
// Save old esp, since we are going to modify the stack.
@ -119,27 +132,15 @@ void CpuFeatures::Probe(bool portable) {
__ ret(0);
#undef __
CodeDesc desc;
assm.GetCode(&desc);
Object* code;
{ MaybeObject* maybe_code =
assm.isolate()->heap()->CreateCode(desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!maybe_code->ToObject(&code)) return;
}
if (!code->IsCode()) return;
PROFILE(ISOLATE,
CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
supported_ = probe();
found_by_runtime_probing_ = supported_;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
found_by_runtime_probing_ &= ~os_guarantees;
delete memory;
}
@ -297,8 +298,8 @@ bool Operand::is_reg(Register reg) const {
static void InitCoverageLog();
#endif
Assembler::Assembler(void* buffer, int buffer_size)
: AssemblerBase(Isolate::Current()),
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
@ -386,7 +387,7 @@ void Assembler::CodeTargetAlign() {
void Assembler::cpuid() {
ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@ -747,7 +748,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@ -758,7 +759,7 @@ void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@ -769,7 +770,7 @@ void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
@ -1450,7 +1451,7 @@ void Assembler::nop() {
void Assembler::rdtsc() {
ASSERT(isolate()->cpu_features()->IsEnabled(RDTSC));
ASSERT(CpuFeatures::IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@ -1856,7 +1857,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
@ -1865,7 +1866,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDD);
@ -2134,7 +2135,7 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@ -2145,7 +2146,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2156,7 +2157,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2167,7 +2168,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@ -2178,7 +2179,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2189,7 +2190,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2200,7 +2201,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2211,7 +2212,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2222,7 +2223,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2233,7 +2234,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2264,7 +2265,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2275,7 +2276,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::movmskpd(Register dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2286,7 +2287,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2298,7 +2299,7 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@ -2308,7 +2309,7 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2319,7 +2320,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2330,7 +2331,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@ -2341,7 +2342,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@ -2352,7 +2353,7 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2364,7 +2365,7 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2400,7 +2401,7 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) {
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@ -2411,7 +2412,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@ -2422,7 +2423,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2433,7 +2434,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movss(const Operand& dst, XMMRegister src ) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3); // float
@ -2444,7 +2445,7 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) {
void Assembler::movss(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3); // float
@ -2455,7 +2456,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@ -2466,7 +2467,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) {
void Assembler::movd(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2477,7 +2478,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
void Assembler::movd(const Operand& dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2488,7 +2489,7 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2499,7 +2500,7 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) {
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2510,7 +2511,7 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
void Assembler::por(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2521,7 +2522,7 @@ void Assembler::por(XMMRegister dst, XMMRegister src) {
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2533,7 +2534,7 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
void Assembler::psllq(XMMRegister reg, int8_t shift) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2545,7 +2546,7 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2556,7 +2557,7 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) {
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2568,7 +2569,7 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) {
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2579,7 +2580,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2591,7 +2592,7 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2604,7 +2605,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);

View File

@ -446,16 +446,15 @@ class Displacement BASE_EMBEDDED {
// } else {
// // Generate standard x87 floating point code.
// }
class CpuFeatures {
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. If the portable flag is set,
// the method sets safe defaults if the serializer is enabled
// (snapshots must be portable).
void Probe(bool portable);
void Clear() { supported_ = 0; }
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
bool IsSupported(CpuFeature f) const {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
@ -463,46 +462,85 @@ class CpuFeatures {
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
#ifdef DEBUG
// Check whether a feature is currently enabled.
bool IsEnabled(CpuFeature f) const {
return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
static bool IsEnabled(CpuFeature f) {
ASSERT(initialized_);
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL) {
// When no isolate is available, work as if we're running in
// release mode.
return IsSupported(f);
}
uint64_t enabled = isolate->enabled_cpu_features();
return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
}
#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f)
: cpu_features_(Isolate::Current()->cpu_features()),
isolate_(Isolate::Current()) {
explicit Scope(CpuFeature f) {
uint64_t mask = static_cast<uint64_t>(1) << f;
ASSERT(cpu_features_->IsSupported(f));
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(cpu_features_->found_by_runtime_probing_ & mask) == 0);
old_enabled_ = cpu_features_->enabled_;
cpu_features_->enabled_ |= mask;
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
isolate_ = Isolate::UncheckedCurrent();
old_enabled_ = 0;
if (isolate_ != NULL) {
old_enabled_ = isolate_->enabled_cpu_features();
isolate_->set_enabled_cpu_features(old_enabled_ | mask);
}
}
~Scope() {
ASSERT_EQ(Isolate::Current(), isolate_);
cpu_features_->enabled_ = old_enabled_;
ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
if (isolate_ != NULL) {
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
private:
uint64_t old_enabled_;
CpuFeatures* cpu_features_;
Isolate* isolate_;
uint64_t old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
class TryForceFeatureScope BASE_EMBEDDED {
public:
explicit TryForceFeatureScope(CpuFeature f)
: old_supported_(CpuFeatures::supported_) {
if (CanForce()) {
CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
}
}
~TryForceFeatureScope() {
if (CanForce()) {
CpuFeatures::supported_ = old_supported_;
}
}
private:
static bool CanForce() {
// It's only safe to temporarily force support of CPU features
// when there's only a single isolate, which is guaranteed when
// the serializer is enabled.
return Serializer::enabled();
}
const uint64_t old_supported_;
};
private:
CpuFeatures();
uint64_t supported_;
uint64_t enabled_;
uint64_t found_by_runtime_probing_;
friend class Isolate;
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
static uint64_t found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@ -535,7 +573,8 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(void* buffer, int buffer_size);
// TODO(vitalyr): the assembler does not need an isolate.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.

View File

@ -1523,12 +1523,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// We shouldn't be performing on-stack replacement in the first
// place if the CPU features we need for the optimized Crankshaft
// code aren't supported.
CpuFeatures* cpu_features = masm->isolate()->cpu_features();
cpu_features->Probe(false);
if (!cpu_features->IsSupported(SSE2)) {
CpuFeatures::TryForceFeatureScope scope(SSE2);
if (!CpuFeatures::IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}

View File

@ -771,7 +771,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// number in eax.
__ AllocateHeapNumber(eax, ecx, ebx, slow);
// Store the result in the HeapNumber and return.
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(left));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -821,7 +821,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
}
if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
__ AllocateHeapNumber(ecx, ebx, no_reg, slow);
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
switch (op_) {
@ -926,7 +926,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
Label not_floats;
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
@ -1060,7 +1060,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -1660,7 +1660,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// number in eax.
__ AllocateHeapNumber(eax, ecx, ebx, slow);
// Store the result in the HeapNumber and return.
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(left));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -1705,7 +1705,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
break;
}
__ AllocateHeapNumber(ecx, ebx, no_reg, slow);
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
switch (op_) {
@ -1837,7 +1837,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::DIV: {
Label not_floats;
Label not_int32;
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
@ -1958,7 +1958,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -2070,7 +2070,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV: {
Label not_floats;
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
@ -2173,7 +2173,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -2275,7 +2275,7 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV: {
Label not_floats;
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
@ -2373,7 +2373,7 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -2572,7 +2572,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
} else { // UNTAGGED.
if (masm->isolate()->cpu_features()->IsSupported(SSE4_1)) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
@ -2826,8 +2826,7 @@ void IntegerConvert(MacroAssembler* masm,
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
if (type_info.IsInteger32() &&
masm->isolate()->cpu_features()->IsEnabled(SSE2)) {
if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
return;
@ -3375,7 +3374,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
IntegerConvert(masm,
eax,
TypeInfo::Unknown(),
masm->isolate()->cpu_features()->IsSupported(SSE3),
CpuFeatures::IsSupported(SSE3),
&slow);
// Do the bitwise operation and check if the result fits in a smi.
@ -3398,7 +3397,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ AllocateHeapNumber(ebx, edx, edi, &slow);
__ mov(eax, Operand(ebx));
}
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ecx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@ -4270,7 +4269,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
FixedArray::kHeaderSize));
__ test(probe, Immediate(kSmiTagMask));
__ j(zero, not_found);
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
@ -4509,7 +4508,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (include_number_compare_) {
Label non_number_comparison;
Label unordered;
if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
@ -6455,8 +6454,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
CpuFeatures* cpu_features = masm->isolate()->cpu_features();
if (cpu_features->IsSupported(SSE2) && cpu_features->IsSupported(CMOV)) {
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);

View File

@ -96,7 +96,7 @@ class GenericBinaryOpStub: public CodeStub {
if (static_operands_type_.IsSmi()) {
mode_ = NO_OVERWRITE;
}
use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -235,7 +235,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}

View File

@ -556,7 +556,7 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
__ sar(val, 1);
// If there was an overflow, bits 30 and 31 of the original number disagree.
__ xor_(val, 0x80000000u);
if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvtsi2sd(xmm0, Operand(val));
} else {
@ -574,7 +574,7 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
no_reg, &allocation_failed);
VirtualFrame* clone = new VirtualFrame(frame_);
scratch.Unuse();
if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
} else {
@ -587,7 +587,7 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
RegisterFile empty_regs;
SetFrame(clone, &empty_regs);
__ bind(&allocation_failed);
if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (!CpuFeatures::IsSupported(SSE2)) {
// Pop the value from the floating point stack.
__ fstp(0);
}
@ -614,7 +614,7 @@ void CodeGenerator::Load(Expression* expr) {
safe_int32_mode_enabled() &&
expr->side_effect_free() &&
expr->num_bit_ops() > 2 &&
masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::IsSupported(SSE2)) {
BreakTarget unsafe_bailout;
JumpTarget done;
unsafe_bailout.set_expected_height(frame_->height());
@ -995,7 +995,7 @@ class DeferredInlineBinaryOperation: public DeferredCode {
Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
if (Token::IsBitOp(op_) &&
masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::IsSupported(SSE2)) {
return &non_smi_input_;
} else {
return entry_label();
@ -1018,7 +1018,7 @@ void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
void DeferredInlineBinaryOperation::Generate() {
// Registers are not saved implicitly for this stub, so we should not
// tread on the registers that were not passed to us.
if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
if (CpuFeatures::IsSupported(SSE2) &&
((op_ == Token::ADD) ||
(op_ == Token::SUB) ||
(op_ == Token::MUL) ||
@ -1154,7 +1154,7 @@ void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
// The left_ and right_ registers have not been initialized yet.
__ mov(right_, Immediate(smi_value_));
__ mov(left_, Operand(dst_));
if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (!CpuFeatures::IsSupported(SSE2)) {
__ jmp(entry_label());
return;
} else {
@ -1267,7 +1267,7 @@ void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
// This trashes right_.
__ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
__ bind(&allocation_ok);
if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
if (CpuFeatures::IsSupported(SSE2) &&
op_ != Token::SHR) {
CpuFeatures::Scope use_sse2(SSE2);
ASSERT(Token::IsBitOp(op_));
@ -3032,7 +3032,7 @@ void CodeGenerator::ConstantSmiComparison(Condition cc,
// constant smi. If the non-smi is a heap number and this is not
// a loop condition, inline the floating point code.
if (!is_loop_condition &&
masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::IsSupported(SSE2)) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
@ -3196,7 +3196,7 @@ void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
ASSERT(right_side->is_register());
JumpTarget not_numbers;
if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
// Load left and right operand into registers xmm0 and xmm1 and compare.
@ -7457,7 +7457,7 @@ void CodeGenerator::GenerateRandomHeapNumber(
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
@ -7863,7 +7863,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (!CpuFeatures::IsSupported(SSE2)) {
Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
frame_->Push(&res);
} else {
@ -8080,7 +8080,7 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
if (!CpuFeatures::IsSupported(SSE2)) {
Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
frame()->Push(&result);
} else {
@ -10211,7 +10211,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ int3();
__ bind(&ok);
}
if (masm.isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope enable(SSE2);
__ push(edi);
__ push(esi);

View File

@ -42,12 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
cpu_features->Clear();
cpu_features->Probe(true);
if (!cpu_features->IsSupported(SSE2) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return CpuFeatures::IsSupported(SSE2);
}

View File

@ -2726,7 +2726,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
if (isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
@ -2801,7 +2801,7 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
if (isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
MathPowStub stub;
__ CallStub(&stub);
} else {

View File

@ -1062,7 +1062,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (isolate()->cpu_features()->IsSupported(SSE4_1)) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
@ -3427,7 +3427,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ jmp(&done);
__ bind(&heap_number);
if (isolate()->cpu_features()->IsSupported(SSE3)) {
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
NearLabel convert;
// Use more powerful conversion when sse3 is available.
@ -3537,7 +3537,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
// the JS bitwise operations.
__ cvttsd2si(result_reg, Operand(input_reg));
__ cmp(result_reg, 0x80000000u);
if (isolate()->cpu_features()->IsSupported(SSE3)) {
if (CpuFeatures::IsSupported(SSE3)) {
// This will deoptimize if the exponent of the input in out of range.
CpuFeatures::Scope scope(SSE3);
NearLabel convert, done;

View File

@ -1633,9 +1633,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
LOperand* xmm_temp =
(instr->CanTruncateToInt32() && cpu_features->IsSupported(SSE3))
(instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@ -1656,7 +1655,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
bool needs_temp = instr->CanTruncateToInt32() &&
!Isolate::Current()->cpu_features()->IsSupported(SSE3);
!CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;

View File

@ -42,7 +42,7 @@ namespace internal {
// MacroAssembler implementation.
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
: Assembler(Isolate::Current(), buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(isolate()->heap()->undefined_value()) {
@ -231,7 +231,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::FCmp() {
if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
ffree(0);
fincstp();

View File

@ -1921,7 +1921,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
if (!isolate()->cpu_features()->IsSupported(SSE2)) {
if (!CpuFeatures::IsSupported(SSE2)) {
return isolate()->heap()->undefined_value();
}
@ -3292,7 +3292,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ mov(ebx, edi);
__ cmp(eax, arg_number);
if (isolate()->cpu_features()->IsSupported(CMOV)) {
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
} else {
@ -3611,10 +3611,10 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// processors that don't support SSE2. The code in IntegerConvert
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
if (isolate()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
if (array_type != kExternalIntArray &&
array_type != kExternalUnsignedIntArray) {
ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
// ecx: untagged integer value
@ -3642,7 +3642,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
break;
}
} else {
if (isolate()->cpu_features()->IsSupported(SSE3)) {
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
// fisttp stores values as signed integers. To represent the
// entire range of int and unsigned int arrays, store as a
@ -3655,7 +3655,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
} else {
ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
// We can easily implement the correct rounding behavior for the
// range [0, 2^31-1]. For the time being, to keep this code simple,

View File

@ -828,7 +828,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
cgen()->unsafe_bailout_->Branch(not_equal);
}
if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
if (!CpuFeatures::IsSupported(SSE2)) {
UNREACHABLE();
} else {
CpuFeatures::Scope use_sse2(SSE2);

View File

@ -414,7 +414,6 @@ Isolate::Isolate()
runtime_profiler_(NULL),
compilation_cache_(NULL),
counters_(new Counters()),
cpu_features_(NULL),
code_range_(NULL),
break_access_(OS::CreateMutex()),
logger_(new Logger()),
@ -593,8 +592,6 @@ Isolate::~Isolate() {
delete counters_;
counters_ = NULL;
delete cpu_features_;
cpu_features_ = NULL;
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
@ -680,7 +677,6 @@ bool Isolate::PreInit() {
write_input_buffer_ = new StringInputBuffer();
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper();
cpu_features_ = new CpuFeatures();
handle_scope_implementer_ = new HandleScopeImplementer();
stub_cache_ = new StubCache(this);
ast_sentinels_ = new AstSentinels();
@ -725,9 +721,6 @@ bool Isolate::Init(Deserializer* des) {
CpuProfiler::Setup();
HeapProfiler::Setup();
// Setup the platform OS support.
OS::Setup();
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
@ -786,11 +779,6 @@ bool Isolate::Init(Deserializer* des) {
// stack guard.
heap_.SetStackLimits();
// Setup the CPU support. Must be done after heap setup and after
// any deserialization because we have to have the initial heap
// objects in place for creating the code object used for probing.
CPU::Setup();
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->Setup();

View File

@ -242,6 +242,7 @@ class HashMap;
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
V(uint64_t, enabled_cpu_features, 0) \
V(v8::Debug::EventCallback, debug_event_callback, NULL) \
V(DebuggerAgent*, debugger_agent_instance, NULL)
#else
@ -708,10 +709,6 @@ class Isolate {
Bootstrapper* bootstrapper() { return bootstrapper_; }
Counters* counters() { return counters_; }
// TODO(isolates): Having CPU features per isolate is probably too
// flexible. We only really need to have the set of currently
// enabled features for asserts in DEBUG builds.
CpuFeatures* cpu_features() { return cpu_features_; }
CodeRange* code_range() { return code_range_; }
RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
@ -1029,7 +1026,6 @@ class Isolate {
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
Counters* counters_;
CpuFeatures* cpu_features_;
CodeRange* code_range_;
Mutex* break_access_;
Heap heap_;

View File

@ -508,10 +508,10 @@ class Mutex {
// ----------------------------------------------------------------------------
// ScopedLock/ScopedUnlock
// ScopedLock
//
// Stack-allocated ScopedLocks/ScopedUnlocks provide block-scoped
// locking and unlocking of a mutex.
// Stack-allocated ScopedLocks provide block-scoped locking and
// unlocking of a mutex.
class ScopedLock {
public:
explicit ScopedLock(Mutex* mutex): mutex_(mutex) {

View File

@ -41,6 +41,9 @@
namespace v8 {
namespace internal {
static Mutex* init_once_mutex = OS::CreateMutex();
static bool init_once_called = false;
bool V8::is_running_ = false;
bool V8::has_been_setup_ = false;
bool V8::has_been_disposed_ = false;
@ -49,6 +52,8 @@ bool V8::use_crankshaft_ = true;
bool V8::Initialize(Deserializer* des) {
InitializeOncePerProcess();
// The current thread may not yet had entered an isolate to run.
// Note the Isolate::Current() may be non-null because for various
// initialization purposes an initializing thread may be assigned an isolate
@ -68,15 +73,6 @@ bool V8::Initialize(Deserializer* des) {
Isolate* isolate = Isolate::Current();
if (isolate->IsInitialized()) return true;
#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
use_crankshaft_ = false;
#else
use_crankshaft_ = FLAG_crankshaft;
#endif
// Peephole optimization might interfere with deoptimization.
FLAG_peephole_optimization = !use_crankshaft_;
is_running_ = true;
has_been_setup_ = true;
has_fatal_error_ = false;
@ -188,4 +184,32 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
return heap_number;
}
void V8::InitializeOncePerProcess() {
ScopedLock lock(init_once_mutex);
if (init_once_called) return;
init_once_called = true;
// Setup the platform OS support.
OS::Setup();
#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
use_crankshaft_ = false;
#else
use_crankshaft_ = FLAG_crankshaft;
#endif
if (Serializer::enabled()) {
use_crankshaft_ = false;
}
CPU::Setup();
if (!CPU::SupportsCrankshaft()) {
use_crankshaft_ = false;
}
// Peephole optimization might interfere with deoptimization.
FLAG_peephole_optimization = !use_crankshaft_;
}
} } // namespace v8::internal

View File

@ -84,7 +84,6 @@ class V8 : public AllStatic {
static void TearDown();
static bool IsRunning() { return is_running_; }
static bool UseCrankshaft() { return use_crankshaft_; }
static void DisableCrankshaft() { use_crankshaft_ = false; }
// To be dead you have to have lived
// TODO(isolates): move IsDead to Isolate.
static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
@ -108,6 +107,8 @@ class V8 : public AllStatic {
static bool IdleNotification();
private:
static void InitializeOncePerProcess();
// True if engine is currently running
static bool is_running_;
// True if V8 has ever been run

View File

@ -38,22 +38,38 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
CpuFeatures::CpuFeatures()
: supported_(kDefaultCpuFeatures),
enabled_(0),
found_by_runtime_probing_(0) {
}
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
void CpuFeatures::Probe(bool portable) {
ASSERT(HEAP->HasBeenSetup());
void CpuFeatures::Probe() {
ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
supported_ = kDefaultCpuFeatures;
if (portable && Serializer::enabled()) {
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
Assembler assm(NULL, 0);
const int kBufferSize = 4 * KB;
VirtualMemory* memory = new VirtualMemory(kBufferSize);
if (!memory->IsReserved()) {
delete memory;
return;
}
ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
delete memory;
return;
}
Assembler assm(NULL, memory->address(), kBufferSize);
Label cpuid, done;
#define __ assm.
// Save old rsp, since we are going to modify the stack.
@ -117,31 +133,20 @@ void CpuFeatures::Probe(bool portable) {
__ ret(0);
#undef __
CodeDesc desc;
assm.GetCode(&desc);
Isolate* isolate = Isolate::Current();
MaybeObject* maybe_code =
isolate->heap()->CreateCode(desc,
Code::ComputeFlags(Code::STUB),
Handle<Object>());
Object* code;
if (!maybe_code->ToObject(&code)) return;
if (!code->IsCode()) return;
PROFILE(isolate,
CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
supported_ = probe();
found_by_runtime_probing_ = supported_;
found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
found_by_runtime_probing_ &= ~os_guarantees;
// SSE2 and CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
ASSERT(IsSupported(SSE2));
ASSERT(IsSupported(CMOV));
delete memory;
}
@ -339,8 +344,8 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
Assembler::Assembler(void* buffer, int buffer_size)
: AssemblerBase(Isolate::Current()),
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
code_targets_(100),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
@ -1037,7 +1042,7 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
@ -2388,7 +2393,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
@ -2398,7 +2403,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
@ -2716,7 +2721,7 @@ void Assembler::movq(Register dst, XMMRegister src) {
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
@ -2728,7 +2733,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);

View File

@ -434,14 +434,15 @@ class Operand BASE_EMBEDDED {
// } else {
// // Generate standard x87 or SSE2 floating point code.
// }
class CpuFeatures {
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
void Probe(bool portable);
static void Probe();
// Check whether a feature is supported by the target CPU.
bool IsSupported(CpuFeature f) const {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@ -449,51 +450,65 @@ class CpuFeatures {
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
#ifdef DEBUG
// Check whether a feature is currently enabled.
bool IsEnabled(CpuFeature f) const {
return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
static bool IsEnabled(CpuFeature f) {
ASSERT(initialized_);
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL) {
// When no isolate is available, work as if we're running in
// release mode.
return IsSupported(f);
}
uint64_t enabled = isolate->enabled_cpu_features();
return (enabled & (V8_UINT64_C(1) << f)) != 0;
}
#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f)
: cpu_features_(Isolate::Current()->cpu_features()),
isolate_(Isolate::Current()) {
uint64_t mask = (V8_UINT64_C(1) << f);
ASSERT(cpu_features_->IsSupported(f));
explicit Scope(CpuFeature f) {
uint64_t mask = V8_UINT64_C(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(cpu_features_->found_by_runtime_probing_ & mask) == 0);
old_enabled_ = cpu_features_->enabled_;
cpu_features_->enabled_ |= mask;
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
isolate_ = Isolate::UncheckedCurrent();
old_enabled_ = 0;
if (isolate_ != NULL) {
old_enabled_ = isolate_->enabled_cpu_features();
isolate_->set_enabled_cpu_features(old_enabled_ | mask);
}
}
~Scope() {
ASSERT_EQ(Isolate::Current(), isolate_);
cpu_features_->enabled_ = old_enabled_;
ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
if (isolate_ != NULL) {
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
private:
uint64_t old_enabled_;
CpuFeatures* cpu_features_;
Isolate* isolate_;
uint64_t old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
private:
CpuFeatures();
private:
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
uint64_t supported_;
uint64_t enabled_;
uint64_t found_by_runtime_probing_;
friend class Isolate;
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
static uint64_t found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@ -526,7 +541,7 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(void* buffer, int buffer_size);
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.

View File

@ -2961,7 +2961,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
times_1,
FixedArray::kHeaderSize));
__ JumpIfSmi(probe, not_found);
ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));

View File

@ -42,10 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
Isolate::Current()->cpu_features()->Probe(true);
if (Serializer::enabled()) {
V8::DisableCrankshaft();
}
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return true; // Yay!
}

View File

@ -1614,8 +1614,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
LOperand* xmm_temp =
(instr->CanTruncateToInt32() &&
Isolate::Current()->cpu_features()->IsSupported(SSE3))
(instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);

View File

@ -41,7 +41,7 @@ namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
: Assembler(Isolate::Current(), buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
root_array_available_(true),

View File

@ -58,7 +58,7 @@ TEST(0) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
__ add(r0, r0, Operand(r1));
__ mov(pc, Operand(lr));
@ -84,7 +84,7 @@ TEST(1) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
__ mov(r1, Operand(r0));
@ -121,7 +121,7 @@ TEST(2) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
__ mov(r1, Operand(r0));
@ -174,7 +174,7 @@ TEST(3) {
} T;
T t;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
__ mov(ip, Operand(sp));
@ -241,11 +241,11 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(ip, Operand(sp));
@ -357,9 +357,9 @@ TEST(5) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
// On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
__ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
@ -393,9 +393,9 @@ TEST(6) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
__ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
__ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
@ -436,9 +436,9 @@ static void TestRoundingMode(VCVTTypes types,
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label wrong_exception;

View File

@ -61,7 +61,7 @@ TEST(AssemblerIa320) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
__ add(eax, Operand(esp, 8));
@ -89,7 +89,7 @@ TEST(AssemblerIa321) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
Label L, C;
__ mov(edx, Operand(esp, 4));
@ -127,7 +127,7 @@ TEST(AssemblerIa322) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
Label L, C;
__ mov(edx, Operand(esp, 4));
@ -167,15 +167,15 @@ TEST(AssemblerIa322) {
typedef int (*F3)(float x);
TEST(AssemblerIa323) {
if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CHECK(CpuFeatures::IsSupported(SSE2));
{ CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(eax, Operand(esp, 4));
__ ret(0);
@ -202,15 +202,15 @@ TEST(AssemblerIa323) {
typedef int (*F4)(double x);
TEST(AssemblerIa324) {
if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CHECK(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
__ cvttsd2si(eax, Operand(esp, 4));
__ ret(0);
@ -239,7 +239,7 @@ TEST(AssemblerIa325) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE));
__ ret(0);
@ -259,14 +259,14 @@ TEST(AssemblerIa325) {
typedef double (*F5)(double x, double y);
TEST(AssemblerIa326) {
if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
v8::HandleScope scope;
CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CHECK(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
__ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
@ -305,14 +305,14 @@ TEST(AssemblerIa326) {
typedef double (*F6)(int x);
TEST(AssemblerIa328) {
if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
v8::HandleScope scope;
CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
CHECK(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
__ cvtsi2sd(xmm0, Operand(eax));
// Copy xmm0 to st(0) using eight bytes of stack.

View File

@ -35,30 +35,30 @@
#include "serialize.h"
#include "cctest.h"
using v8::internal::byte;
using v8::internal::OS;
using v8::internal::Assembler;
using v8::internal::Operand;
using v8::internal::Immediate;
using v8::internal::Label;
using v8::internal::rax;
using v8::internal::rsi;
using v8::internal::rdi;
using v8::internal::rcx;
using v8::internal::rdx;
using v8::internal::rbp;
using v8::internal::rsp;
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::r13;
using v8::internal::r15;
using v8::internal::times_1;
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
using v8::internal::FUNCTION_CAST;
using v8::internal::Immediate;
using v8::internal::Isolate;
using v8::internal::Label;
using v8::internal::OS;
using v8::internal::Operand;
using v8::internal::byte;
using v8::internal::greater;
using v8::internal::less_equal;
using v8::internal::not_equal;
using v8::internal::greater;
using v8::internal::r13;
using v8::internal::r15;
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::rax;
using v8::internal::rbp;
using v8::internal::rcx;
using v8::internal::rdi;
using v8::internal::rdx;
using v8::internal::rsi;
using v8::internal::rsp;
using v8::internal::times_1;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@ -93,7 +93,7 @@ TEST(AssemblerX64ReturnOperation) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@ -115,7 +115,7 @@ TEST(AssemblerX64StackOperations) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@ -147,7 +147,7 @@ TEST(AssemblerX64ArithmeticOperations) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@ -169,7 +169,7 @@ TEST(AssemblerX64ImulOperation) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that multiplies arguments returning the high
// word.
@ -197,7 +197,7 @@ TEST(AssemblerX64MemoryOperands) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
__ push(rbp);
@ -231,7 +231,7 @@ TEST(AssemblerX64ControlFlow) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 1 and returns it.
__ push(rbp);
@ -260,7 +260,7 @@ TEST(AssemblerX64LoopImmediates) {
&actual_size,
true));
CHECK(buffer);
Assembler assm(buffer, static_cast<int>(actual_size));
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
__ movq(rax, Immediate(-3));

View File

@ -72,11 +72,11 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Setup V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
#define SETUP() \
InitializeVM(); \
v8::HandleScope scope; \
#define SETUP() \
InitializeVM(); \
v8::HandleScope scope; \
byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
Assembler assm(buffer, 4*1024); \
Assembler assm(Isolate::Current(), buffer, 4*1024); \
bool failure = false;
@ -270,7 +270,7 @@ TEST(Type0) {
"13a06000 movne r6, #0");
// mov -> movw.
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
"13015234 movwne r5, #4660");
// We only disassemble one instruction so the eor instruction is not here.
@ -360,7 +360,7 @@ TEST(Type1) {
TEST(Type3) {
SETUP();
if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7)) {
COMPARE(ubfx(r0, r1, 5, 10),
"e7e902d1 ubfx r0, r1, #5, #10");
COMPARE(ubfx(r1, r0, 5, 10),
@ -415,7 +415,7 @@ TEST(Type3) {
TEST(Vfp) {
SETUP();
if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
COMPARE(vmov(d0, d1),
"eeb00b41 vmov.f64 d0, d1");

View File

@ -58,7 +58,7 @@ TEST(DisasmIa320) {
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[2048];
Assembler assm(buffer, sizeof buffer);
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
// Short immediate instructions
@ -107,12 +107,12 @@ TEST(DisasmIa320) {
__ xor_(edx, 3);
__ nop();
{
CHECK(Isolate::Current()->cpu_features()->IsSupported(CPUID));
CHECK(CpuFeatures::IsSupported(CPUID));
CpuFeatures::Scope fscope(CPUID);
__ cpuid();
}
{
CHECK(Isolate::Current()->cpu_features()->IsSupported(RDTSC));
CHECK(CpuFeatures::IsSupported(RDTSC));
CpuFeatures::Scope fscope(RDTSC);
__ rdtsc();
}
@ -375,7 +375,7 @@ TEST(DisasmIa320) {
__ fwait();
__ nop();
{
if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
@ -397,7 +397,7 @@ TEST(DisasmIa320) {
// cmov.
{
if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(overflow, eax, Operand(eax, 0));
__ cmov(no_overflow, eax, Operand(eax, 1));
@ -420,7 +420,7 @@ TEST(DisasmIa320) {
// andpd, cmpltsd, movaps, psllq, psrlq, por.
{
if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ andpd(xmm0, xmm1);
__ andpd(xmm1, xmm2);
@ -449,7 +449,7 @@ TEST(DisasmIa320) {
}
{
if (Isolate::Current()->cpu_features()->IsSupported(SSE4_1)) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
__ pextrd(Operand(eax), xmm0, 1);
__ pinsrd(xmm1, Operand(eax), 0);

View File

@ -70,7 +70,7 @@ static void CheckFindCodeObject() {
// Test FindCodeObject
#define __ assm.
Assembler assm(NULL, 0);
Assembler assm(Isolate::Current(), NULL, 0);
__ nop(); // supported on all architectures

View File

@ -103,6 +103,7 @@ void TestMemCopy(Vector<byte> src,
TEST(MemCopy) {
v8::V8::Initialize();
OS::Setup();
const int N = OS::kMinComplexMemCopy + 128;
Vector<byte> buffer1 = Vector<byte>::New(N);