Decouple CpuFeatures from serializer state.
Traditionally, we cross compile a snapshot iff the serializer is enabled. This will change in the future. Changes: - CpuFeatures probing is done once per process, depending on whether we cross compile. - CpuFeatures are consolidated into the platform-independent assembler.h as much as possible. - FLAG_enable_<feature> will only be checked at probing time (already the case for ARM). - The serializer state is cached by the MacroAssembler. - PlatformFeatureScope is no longer necessary. - CPUFeature enum values no longer map to CPUID bit fields. R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/285233010 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21347 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
2d1a75d608
commit
fe243379f8
@ -47,6 +47,9 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
|
||||
|
||||
|
||||
int Register::NumAllocatableRegisters() {
|
||||
return kMaxNumAllocatableRegisters;
|
||||
}
|
||||
|
@ -45,21 +45,6 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#ifdef DEBUG
|
||||
bool CpuFeatures::initialized_ = false;
|
||||
#endif
|
||||
unsigned CpuFeatures::supported_ = 0;
|
||||
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
|
||||
unsigned CpuFeatures::cross_compile_ = 0;
|
||||
unsigned CpuFeatures::cache_line_size_ = 64;
|
||||
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
ASSERT(CpuFeatures::initialized_);
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
}
|
||||
|
||||
|
||||
// Get the CPU features enabled by the build. For cross compilation the
|
||||
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
|
||||
// can be defined to enable ARMv7 and VFPv3 instructions when building the
|
||||
@ -67,24 +52,16 @@ ExternalReference ExternalReference::cpu_features() {
|
||||
static unsigned CpuFeaturesImpliedByCompiler() {
|
||||
unsigned answer = 0;
|
||||
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
|
||||
if (FLAG_enable_armv7) {
|
||||
answer |= 1u << ARMv7;
|
||||
}
|
||||
if (FLAG_enable_armv7) answer |= 1u << ARMv7;
|
||||
#endif // CAN_USE_ARMV7_INSTRUCTIONS
|
||||
#ifdef CAN_USE_VFP3_INSTRUCTIONS
|
||||
if (FLAG_enable_vfp3) {
|
||||
answer |= 1u << VFP3 | 1u << ARMv7;
|
||||
}
|
||||
if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
|
||||
#endif // CAN_USE_VFP3_INSTRUCTIONS
|
||||
#ifdef CAN_USE_VFP32DREGS
|
||||
if (FLAG_enable_32dregs) {
|
||||
answer |= 1u << VFP32DREGS;
|
||||
}
|
||||
if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
|
||||
#endif // CAN_USE_VFP32DREGS
|
||||
#ifdef CAN_USE_NEON
|
||||
if (FLAG_enable_neon) {
|
||||
answer |= 1u << NEON;
|
||||
}
|
||||
if (FLAG_enable_neon) answer |= 1u << NEON;
|
||||
#endif // CAN_USE_VFP32DREGS
|
||||
if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
|
||||
answer |= 1u << UNALIGNED_ACCESSES;
|
||||
@ -94,114 +71,57 @@ static unsigned CpuFeaturesImpliedByCompiler() {
|
||||
}
|
||||
|
||||
|
||||
const char* DwVfpRegister::AllocationIndexToString(int index) {
|
||||
ASSERT(index >= 0 && index < NumAllocatableRegisters());
|
||||
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
|
||||
kNumReservedRegisters - 1);
|
||||
if (index >= kDoubleRegZero.code())
|
||||
index += kNumReservedRegisters;
|
||||
void CpuFeatures::ProbeImpl(bool cross_compile) {
|
||||
supported_ |= OS::CpuFeaturesImpliedByPlatform();
|
||||
supported_ |= CpuFeaturesImpliedByCompiler();
|
||||
cache_line_size_ = 64;
|
||||
|
||||
return VFPRegisters::Name(index, true);
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::Probe(bool serializer_enabled) {
|
||||
uint64_t standard_features = static_cast<unsigned>(
|
||||
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
|
||||
ASSERT(supported_ == 0 ||
|
||||
(supported_ & standard_features) == standard_features);
|
||||
#ifdef DEBUG
|
||||
initialized_ = true;
|
||||
#endif
|
||||
|
||||
// Get the features implied by the OS and the compiler settings. This is the
|
||||
// minimal set of features which is also alowed for generated code in the
|
||||
// snapshot.
|
||||
supported_ |= standard_features;
|
||||
|
||||
if (serializer_enabled) {
|
||||
// No probing for features if we might serialize (generate snapshot).
|
||||
return;
|
||||
}
|
||||
// Only use statically determined features for cross compile (snapshot).
|
||||
if (cross_compile) return;
|
||||
|
||||
#ifndef __arm__
|
||||
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
|
||||
// For the simulator build, use whatever the flags specify.
|
||||
if (FLAG_enable_armv7) {
|
||||
supported_ |= static_cast<uint64_t>(1) << ARMv7;
|
||||
if (FLAG_enable_vfp3) {
|
||||
supported_ |= static_cast<uint64_t>(1) << VFP3;
|
||||
}
|
||||
if (FLAG_enable_neon) {
|
||||
supported_ |= 1u << NEON;
|
||||
supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
|
||||
}
|
||||
if (FLAG_enable_sudiv) {
|
||||
supported_ |= static_cast<uint64_t>(1) << SUDIV;
|
||||
}
|
||||
if (FLAG_enable_movw_movt) {
|
||||
supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
|
||||
}
|
||||
if (FLAG_enable_32dregs) {
|
||||
supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
|
||||
}
|
||||
}
|
||||
if (FLAG_enable_unaligned_accesses) {
|
||||
supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
|
||||
supported_ |= 1u << ARMv7;
|
||||
if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
|
||||
if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
|
||||
if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
|
||||
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
|
||||
if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
|
||||
}
|
||||
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
|
||||
|
||||
#else // __arm__
|
||||
// Probe for additional features not already known to be available.
|
||||
// Probe for additional features at runtime.
|
||||
CPU cpu;
|
||||
if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
|
||||
if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
|
||||
// This implementation also sets the VFP flags if runtime
|
||||
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
|
||||
// 0406B, page A1-6.
|
||||
found_by_runtime_probing_only_ |=
|
||||
static_cast<uint64_t>(1) << VFP3 |
|
||||
static_cast<uint64_t>(1) << ARMv7;
|
||||
supported_ |= 1u << VFP3 | 1u << ARMv7;
|
||||
}
|
||||
|
||||
if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
|
||||
found_by_runtime_probing_only_ |= 1u << NEON;
|
||||
}
|
||||
if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
|
||||
if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
|
||||
|
||||
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
|
||||
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
|
||||
}
|
||||
|
||||
if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
|
||||
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
|
||||
}
|
||||
|
||||
if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
|
||||
&& cpu.architecture() >= 7) {
|
||||
found_by_runtime_probing_only_ |=
|
||||
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
|
||||
}
|
||||
|
||||
// Use movw/movt for QUALCOMM ARMv7 cores.
|
||||
if (cpu.implementer() == CPU::QUALCOMM &&
|
||||
cpu.architecture() >= 7 &&
|
||||
FLAG_enable_movw_movt) {
|
||||
found_by_runtime_probing_only_ |=
|
||||
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
|
||||
if (cpu.architecture() >= 7) {
|
||||
if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
|
||||
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
|
||||
// Use movw/movt for QUALCOMM ARMv7 cores.
|
||||
if (FLAG_enable_movw_movt && cpu.implementer() == CPU::QUALCOMM) {
|
||||
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
|
||||
}
|
||||
}
|
||||
|
||||
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
|
||||
if (cpu.implementer() == CPU::ARM &&
|
||||
(cpu.part() == CPU::ARM_CORTEX_A5 ||
|
||||
cpu.part() == CPU::ARM_CORTEX_A9)) {
|
||||
if (cpu.implementer() == CPU::ARM && (cpu.part() == CPU::ARM_CORTEX_A5 ||
|
||||
cpu.part() == CPU::ARM_CORTEX_A9)) {
|
||||
cache_line_size_ = 32;
|
||||
}
|
||||
|
||||
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
|
||||
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
|
||||
}
|
||||
|
||||
supported_ |= found_by_runtime_probing_only_;
|
||||
if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
|
||||
#endif
|
||||
|
||||
// Assert that VFP3 implies ARMv7.
|
||||
ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
|
||||
}
|
||||
|
||||
@ -283,6 +203,18 @@ void CpuFeatures::PrintFeatures() {
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of DwVfpRegister
|
||||
|
||||
const char* DwVfpRegister::AllocationIndexToString(int index) {
|
||||
ASSERT(index >= 0 && index < NumAllocatableRegisters());
|
||||
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
|
||||
kNumReservedRegisters - 1);
|
||||
if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
|
||||
return VFPRegisters::Name(index, true);
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of RelocInfo
|
||||
|
||||
@ -1074,11 +1006,10 @@ static bool fits_shifter(uint32_t imm32,
|
||||
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
|
||||
// space. There is no guarantee that the relocated location can be similarly
|
||||
// encoded.
|
||||
bool Operand::must_output_reloc_info(Isolate* isolate,
|
||||
const Assembler* assembler) const {
|
||||
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
|
||||
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
if (assembler != NULL && assembler->predictable_code_size()) return true;
|
||||
return Serializer::enabled(isolate);
|
||||
return assembler->serializer_enabled();
|
||||
} else if (RelocInfo::IsNone(rmode_)) {
|
||||
return false;
|
||||
}
|
||||
@ -1086,8 +1017,7 @@ bool Operand::must_output_reloc_info(Isolate* isolate,
|
||||
}
|
||||
|
||||
|
||||
static bool use_mov_immediate_load(Isolate* isolate,
|
||||
const Operand& x,
|
||||
static bool use_mov_immediate_load(const Operand& x,
|
||||
const Assembler* assembler) {
|
||||
if (assembler != NULL && !assembler->can_use_constant_pool()) {
|
||||
// If there is no constant pool available, we must use an mov immediate.
|
||||
@ -1098,7 +1028,7 @@ static bool use_mov_immediate_load(Isolate* isolate,
|
||||
(assembler == NULL || !assembler->predictable_code_size())) {
|
||||
// Prefer movw / movt to constant pool if it is more efficient on the CPU.
|
||||
return true;
|
||||
} else if (x.must_output_reloc_info(isolate, assembler)) {
|
||||
} else if (x.must_output_reloc_info(assembler)) {
|
||||
// Prefer constant pool if data is likely to be patched.
|
||||
return false;
|
||||
} else {
|
||||
@ -1108,18 +1038,17 @@ static bool use_mov_immediate_load(Isolate* isolate,
|
||||
}
|
||||
|
||||
|
||||
bool Operand::is_single_instruction(Isolate* isolate,
|
||||
const Assembler* assembler,
|
||||
bool Operand::is_single_instruction(const Assembler* assembler,
|
||||
Instr instr) const {
|
||||
if (rm_.is_valid()) return true;
|
||||
uint32_t dummy1, dummy2;
|
||||
if (must_output_reloc_info(isolate, assembler) ||
|
||||
if (must_output_reloc_info(assembler) ||
|
||||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
|
||||
// The immediate operand cannot be encoded as a shifter operand, or use of
|
||||
// constant pool is required. For a mov instruction not setting the
|
||||
// condition code additional instruction conventions can be used.
|
||||
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
|
||||
return !use_mov_immediate_load(isolate, *this, assembler);
|
||||
return !use_mov_immediate_load(*this, assembler);
|
||||
} else {
|
||||
// If this is not a mov or mvn instruction there will always an additional
|
||||
// instructions - either mov or ldr. The mov might actually be two
|
||||
@ -1139,16 +1068,16 @@ void Assembler::move_32_bit_immediate(Register rd,
|
||||
const Operand& x,
|
||||
Condition cond) {
|
||||
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
|
||||
if (x.must_output_reloc_info(isolate(), this)) {
|
||||
if (x.must_output_reloc_info(this)) {
|
||||
RecordRelocInfo(rinfo);
|
||||
}
|
||||
|
||||
if (use_mov_immediate_load(isolate(), x, this)) {
|
||||
if (use_mov_immediate_load(x, this)) {
|
||||
Register target = rd.code() == pc.code() ? ip : rd;
|
||||
// TODO(rmcilroy): add ARMv6 support for immediate loads.
|
||||
ASSERT(CpuFeatures::IsSupported(ARMv7));
|
||||
if (!FLAG_enable_ool_constant_pool &&
|
||||
x.must_output_reloc_info(isolate(), this)) {
|
||||
x.must_output_reloc_info(this)) {
|
||||
// Make sure the movw/movt doesn't get separated.
|
||||
BlockConstPoolFor(2);
|
||||
}
|
||||
@ -1176,7 +1105,7 @@ void Assembler::addrmod1(Instr instr,
|
||||
// Immediate.
|
||||
uint32_t rotate_imm;
|
||||
uint32_t immed_8;
|
||||
if (x.must_output_reloc_info(isolate(), this) ||
|
||||
if (x.must_output_reloc_info(this) ||
|
||||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
|
||||
// The immediate operand cannot be encoded as a shifter operand, so load
|
||||
// it first to register ip and change the original instruction to use ip.
|
||||
@ -1858,7 +1787,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
|
||||
// Immediate.
|
||||
uint32_t rotate_imm;
|
||||
uint32_t immed_8;
|
||||
if (src.must_output_reloc_info(isolate(), this) ||
|
||||
if (src.must_output_reloc_info(this) ||
|
||||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
|
||||
// Immediate operand cannot be encoded, load it first to register ip.
|
||||
move_32_bit_immediate(ip, src);
|
||||
@ -3260,10 +3189,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
|
||||
if (!RelocInfo::IsNone(rinfo.rmode())) {
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
|
||||
!serializer_enabled() && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
|
||||
if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
|
||||
@ -3492,8 +3420,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
|
||||
// data
|
||||
|
||||
bool found = false;
|
||||
if (!Serializer::enabled(isolate()) &&
|
||||
(rinfo.rmode() >= RelocInfo::CELL)) {
|
||||
if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
|
||||
for (int j = 0; j < i; j++) {
|
||||
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
|
||||
|
||||
@ -3597,7 +3524,7 @@ void ConstantPoolBuilder::AddEntry(Assembler* assm,
|
||||
// Try to merge entries which won't be patched.
|
||||
int merged_index = -1;
|
||||
if (RelocInfo::IsNone(rmode) ||
|
||||
(!Serializer::enabled(assm->isolate()) && (rmode >= RelocInfo::CELL))) {
|
||||
(!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
|
||||
size_t i;
|
||||
std::vector<RelocInfo>::const_iterator it;
|
||||
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
|
||||
|
@ -50,71 +50,6 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
class CpuFeatures : public AllStatic {
|
||||
public:
|
||||
// Detect features of the target CPU. Set safe defaults if the serializer
|
||||
// is enabled (snapshots must be portable).
|
||||
static void Probe(bool serializer_enabled);
|
||||
|
||||
// Display target use when compiling.
|
||||
static void PrintTarget();
|
||||
|
||||
// Display features.
|
||||
static void PrintFeatures();
|
||||
|
||||
// Check whether a feature is supported by the target CPU.
|
||||
static bool IsSupported(CpuFeature f) {
|
||||
ASSERT(initialized_);
|
||||
return Check(f, supported_);
|
||||
}
|
||||
|
||||
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
|
||||
return Check(f, cross_compile_) ||
|
||||
(IsSupported(f) &&
|
||||
!(Serializer::enabled(isolate) &&
|
||||
Check(f, found_by_runtime_probing_only_)));
|
||||
}
|
||||
|
||||
static unsigned cache_line_size() { return cache_line_size_; }
|
||||
|
||||
static bool VerifyCrossCompiling() {
|
||||
return cross_compile_ == 0;
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling(CpuFeature f) {
|
||||
unsigned mask = flag2set(f);
|
||||
return cross_compile_ == 0 ||
|
||||
(cross_compile_ & mask) == mask;
|
||||
}
|
||||
|
||||
static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(VFP3); }
|
||||
|
||||
private:
|
||||
static bool Check(CpuFeature f, unsigned set) {
|
||||
return (set & flag2set(f)) != 0;
|
||||
}
|
||||
|
||||
static unsigned flag2set(CpuFeature f) {
|
||||
return 1u << f;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool initialized_;
|
||||
#endif
|
||||
static unsigned supported_;
|
||||
static unsigned found_by_runtime_probing_only_;
|
||||
static unsigned cache_line_size_;
|
||||
|
||||
static unsigned cross_compile_;
|
||||
|
||||
friend class ExternalReference;
|
||||
friend class PlatformFeatureScope;
|
||||
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
||||
};
|
||||
|
||||
|
||||
// CPU Registers.
|
||||
//
|
||||
// 1) We would prefer to use an enum, but enum values are assignment-
|
||||
@ -588,11 +523,9 @@ class Operand BASE_EMBEDDED {
|
||||
// the instruction this operand is used for is a MOV or MVN instruction the
|
||||
// actual instruction to use is required for this calculation. For other
|
||||
// instructions instr is ignored.
|
||||
bool is_single_instruction(Isolate* isolate,
|
||||
const Assembler* assembler,
|
||||
bool is_single_instruction(const Assembler* assembler,
|
||||
Instr instr = 0) const;
|
||||
bool must_output_reloc_info(Isolate* isolate,
|
||||
const Assembler* assembler) const;
|
||||
bool must_output_reloc_info(const Assembler* assembler) const;
|
||||
|
||||
inline int32_t immediate() const {
|
||||
ASSERT(!rm_.is_valid());
|
||||
|
@ -1651,7 +1651,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
__ mov(r0, Operand(Smi::FromInt(flags)));
|
||||
int properties_count = constant_properties->length() / 2;
|
||||
if (expr->may_store_doubles() || expr->depth() > 1 ||
|
||||
Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
|
||||
masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
|
||||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
|
||||
__ Push(r3, r2, r1, r0);
|
||||
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
|
||||
|
@ -77,9 +77,7 @@ int MacroAssembler::CallSize(
|
||||
int size = 2 * kInstrSize;
|
||||
Instr mov_instr = cond | MOV | LeaveCC;
|
||||
intptr_t immediate = reinterpret_cast<intptr_t>(target);
|
||||
if (!Operand(immediate, rmode).is_single_instruction(isolate(),
|
||||
this,
|
||||
mov_instr)) {
|
||||
if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
|
||||
size += kInstrSize;
|
||||
}
|
||||
return size;
|
||||
@ -99,9 +97,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
|
||||
int size = 2 * kInstrSize;
|
||||
Instr mov_instr = cond | MOV | LeaveCC;
|
||||
intptr_t immediate = reinterpret_cast<intptr_t>(target);
|
||||
if (!Operand(immediate, rmode).is_single_instruction(isolate,
|
||||
NULL,
|
||||
mov_instr)) {
|
||||
if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
|
||||
size += kInstrSize;
|
||||
}
|
||||
return size;
|
||||
@ -261,11 +257,11 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
|
||||
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
|
||||
Condition cond) {
|
||||
if (!src2.is_reg() &&
|
||||
!src2.must_output_reloc_info(isolate(), this) &&
|
||||
!src2.must_output_reloc_info(this) &&
|
||||
src2.immediate() == 0) {
|
||||
mov(dst, Operand::Zero(), LeaveCC, cond);
|
||||
} else if (!src2.is_single_instruction(isolate(), this) &&
|
||||
!src2.must_output_reloc_info(isolate(), this) &&
|
||||
} else if (!src2.is_single_instruction(this) &&
|
||||
!src2.must_output_reloc_info(this) &&
|
||||
CpuFeatures::IsSupported(ARMv7) &&
|
||||
IsPowerOf2(src2.immediate() + 1)) {
|
||||
ubfx(dst, src1, 0,
|
||||
@ -640,7 +636,7 @@ void MacroAssembler::PopSafepointRegisters() {
|
||||
|
||||
void MacroAssembler::PushSafepointRegistersAndDoubles() {
|
||||
// Number of d-regs not known at snapshot time.
|
||||
ASSERT(!Serializer::enabled(isolate()));
|
||||
ASSERT(!serializer_enabled());
|
||||
PushSafepointRegisters();
|
||||
// Only save allocatable registers.
|
||||
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
|
||||
@ -654,7 +650,7 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
|
||||
|
||||
void MacroAssembler::PopSafepointRegistersAndDoubles() {
|
||||
// Number of d-regs not known at snapshot time.
|
||||
ASSERT(!Serializer::enabled(isolate()));
|
||||
ASSERT(!serializer_enabled());
|
||||
// Only save allocatable registers.
|
||||
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
|
||||
ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
|
||||
@ -696,7 +692,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
|
||||
|
||||
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
|
||||
// Number of d-regs not known at snapshot time.
|
||||
ASSERT(!Serializer::enabled(isolate()));
|
||||
ASSERT(!serializer_enabled());
|
||||
// General purpose registers are pushed last on the stack.
|
||||
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
|
||||
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
|
||||
@ -1756,7 +1752,7 @@ void MacroAssembler::Allocate(int object_size,
|
||||
object_size -= bits;
|
||||
shift += 8;
|
||||
Operand bits_operand(bits);
|
||||
ASSERT(bits_operand.is_single_instruction(isolate(), this));
|
||||
ASSERT(bits_operand.is_single_instruction(this));
|
||||
add(scratch2, source, bits_operand, SetCC, cond);
|
||||
source = scratch2;
|
||||
cond = cc;
|
||||
|
@ -14,6 +14,9 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
@ -39,13 +39,27 @@ namespace internal {
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// CpuFeatures utilities (for V8 compatibility).
|
||||
// CpuFeatures implementation.
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
void CpuFeatures::ProbeImpl(bool cross_compile) {
|
||||
if (cross_compile) {
|
||||
// Always align csp in cross compiled code - this is safe and ensures that
|
||||
// csp will always be aligned if it is enabled by probing at runtime.
|
||||
if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
|
||||
} else {
|
||||
CPU cpu;
|
||||
if (FLAG_enable_always_align_csp && (cpu.implementer() == CPU::NVIDIA ||
|
||||
FLAG_debug_code)) {
|
||||
supported_ |= 1u << ALWAYS_ALIGN_CSP;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::PrintTarget() { }
|
||||
void CpuFeatures::PrintFeatures() { }
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// CPURegList utilities.
|
||||
|
||||
@ -271,9 +285,9 @@ void Operand::initialize_handle(Handle<Object> handle) {
|
||||
}
|
||||
|
||||
|
||||
bool Operand::NeedsRelocation(Isolate* isolate) const {
|
||||
bool Operand::NeedsRelocation(const Assembler* assembler) const {
|
||||
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
return Serializer::enabled(isolate);
|
||||
return assembler->serializer_enabled();
|
||||
}
|
||||
|
||||
return !RelocInfo::IsNone(rmode_);
|
||||
@ -1903,7 +1917,7 @@ void Assembler::AddSub(const Register& rd,
|
||||
FlagsUpdate S,
|
||||
AddSubOp op) {
|
||||
ASSERT(rd.SizeInBits() == rn.SizeInBits());
|
||||
ASSERT(!operand.NeedsRelocation(isolate()));
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
ASSERT(IsImmAddSub(immediate));
|
||||
@ -1943,7 +1957,7 @@ void Assembler::AddSubWithCarry(const Register& rd,
|
||||
ASSERT(rd.SizeInBits() == rn.SizeInBits());
|
||||
ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
|
||||
ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
|
||||
ASSERT(!operand.NeedsRelocation(isolate()));
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
|
||||
}
|
||||
|
||||
@ -1964,7 +1978,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
|
||||
#ifdef USE_SIMULATOR
|
||||
// Don't generate simulator specific code if we are building a snapshot, which
|
||||
// might be run on real hardware.
|
||||
if (!Serializer::enabled(isolate())) {
|
||||
if (!serializer_enabled()) {
|
||||
// The arguments to the debug marker need to be contiguous in memory, so
|
||||
// make sure we don't try to emit pools.
|
||||
BlockPoolsScope scope(this);
|
||||
@ -1999,7 +2013,7 @@ void Assembler::Logical(const Register& rd,
|
||||
const Operand& operand,
|
||||
LogicalOp op) {
|
||||
ASSERT(rd.SizeInBits() == rn.SizeInBits());
|
||||
ASSERT(!operand.NeedsRelocation(isolate()));
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
unsigned reg_size = rd.SizeInBits();
|
||||
@ -2051,7 +2065,7 @@ void Assembler::ConditionalCompare(const Register& rn,
|
||||
Condition cond,
|
||||
ConditionalCompareOp op) {
|
||||
Instr ccmpop;
|
||||
ASSERT(!operand.NeedsRelocation(isolate()));
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
ASSERT(IsImmConditionalCompare(immediate));
|
||||
@ -2166,7 +2180,7 @@ void Assembler::DataProcShiftedRegister(const Register& rd,
|
||||
Instr op) {
|
||||
ASSERT(operand.IsShiftedRegister());
|
||||
ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
|
||||
ASSERT(!operand.NeedsRelocation(isolate()));
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
Emit(SF(rd) | op | Flags(S) |
|
||||
ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
|
||||
Rm(operand.reg()) | Rn(rn) | Rd(rd));
|
||||
@ -2178,7 +2192,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
|
||||
const Operand& operand,
|
||||
FlagsUpdate S,
|
||||
Instr op) {
|
||||
ASSERT(!operand.NeedsRelocation(isolate()));
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
|
||||
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
|
||||
ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
|
||||
@ -2516,10 +2530,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
|
||||
if (!RelocInfo::IsNone(rmode)) {
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
|
||||
!serializer_enabled() && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
|
||||
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
|
||||
|
@ -8,12 +8,12 @@
|
||||
#include <list>
|
||||
#include <map>
|
||||
|
||||
#include "cpu.h"
|
||||
#include "globals.h"
|
||||
#include "utils.h"
|
||||
#include "assembler.h"
|
||||
#include "serialize.h"
|
||||
#include "arm64/instructions-arm64.h"
|
||||
#include "arm64/cpu-arm64.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
@ -655,7 +655,7 @@ class Operand {
|
||||
// Relocation information.
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
|
||||
bool NeedsRelocation(Isolate* isolate) const;
|
||||
bool NeedsRelocation(const Assembler* assembler) const;
|
||||
|
||||
// Helpers
|
||||
inline static Operand UntagSmi(Register smi);
|
||||
|
@ -8,20 +8,12 @@
|
||||
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "arm64/cpu-arm64.h"
|
||||
#include "cpu.h"
|
||||
#include "arm64/utils-arm64.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#ifdef DEBUG
|
||||
bool CpuFeatures::initialized_ = false;
|
||||
#endif
|
||||
unsigned CpuFeatures::supported_ = 0;
|
||||
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
|
||||
unsigned CpuFeatures::cross_compile_ = 0;
|
||||
|
||||
|
||||
class CacheLineSizes {
|
||||
public:
|
||||
CacheLineSizes() {
|
||||
@ -126,34 +118,6 @@ void CPU::FlushICache(void* address, size_t length) {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::Probe(bool serializer_enabled) {
|
||||
ASSERT(supported_ == 0);
|
||||
|
||||
if (serializer_enabled && FLAG_enable_always_align_csp) {
|
||||
// Always align csp in snapshot code - this is safe and ensures that csp
|
||||
// will always be aligned if it is enabled by probing at runtime.
|
||||
supported_ |= static_cast<uint64_t>(1) << ALWAYS_ALIGN_CSP;
|
||||
}
|
||||
|
||||
if (!serializer_enabled) {
|
||||
CPU cpu;
|
||||
// Always align csp on Nvidia cores or when debug_code is enabled.
|
||||
if (FLAG_enable_always_align_csp &&
|
||||
(cpu.implementer() == CPU::NVIDIA || FLAG_debug_code)) {
|
||||
found_by_runtime_probing_only_ |=
|
||||
static_cast<uint64_t>(1) << ALWAYS_ALIGN_CSP;
|
||||
}
|
||||
|
||||
supported_ |= found_by_runtime_probing_only_;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
initialized_ = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM64
|
||||
|
@ -1,74 +0,0 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_ARM64_CPU_ARM64_H_
|
||||
#define V8_ARM64_CPU_ARM64_H_
|
||||
|
||||
#include <stdio.h>
|
||||
#include "serialize.h"
|
||||
#include "cpu.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
class CpuFeatures : public AllStatic {
|
||||
public:
|
||||
// Detect features of the target CPU. Set safe defaults if the serializer
|
||||
// is enabled (snapshots must be portable).
|
||||
static void Probe(bool serializer_enabled);
|
||||
|
||||
// Check whether a feature is supported by the target CPU.
|
||||
static bool IsSupported(CpuFeature f) {
|
||||
ASSERT(initialized_);
|
||||
return Check(f, supported_);
|
||||
}
|
||||
|
||||
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
|
||||
return IsSupported(f);
|
||||
}
|
||||
|
||||
// I and D cache line size in bytes.
|
||||
static unsigned dcache_line_size();
|
||||
static unsigned icache_line_size();
|
||||
|
||||
static unsigned supported_;
|
||||
|
||||
static bool VerifyCrossCompiling() {
|
||||
return cross_compile_ == 0;
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling(CpuFeature f) {
|
||||
unsigned mask = flag2set(f);
|
||||
return cross_compile_ == 0 ||
|
||||
(cross_compile_ & mask) == mask;
|
||||
}
|
||||
|
||||
static bool SupportsCrankshaft() { return true; }
|
||||
|
||||
private:
|
||||
#ifdef DEBUG
|
||||
static bool initialized_;
|
||||
#endif
|
||||
|
||||
static unsigned found_by_runtime_probing_only_;
|
||||
static unsigned cross_compile_;
|
||||
|
||||
static bool Check(CpuFeature f, unsigned set) {
|
||||
return (set & flag2set(f)) != 0;
|
||||
}
|
||||
|
||||
static unsigned flag2set(CpuFeature f) {
|
||||
return 1u << f;
|
||||
}
|
||||
|
||||
friend class PlatformFeatureScope;
|
||||
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_ARM64_CPU_ARM64_H_
|
@ -1655,7 +1655,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
const int max_cloned_properties =
|
||||
FastCloneShallowObjectStub::kMaximumClonedProperties;
|
||||
if (expr->may_store_doubles() || expr->depth() > 1 ||
|
||||
Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
|
||||
masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
|
||||
properties_count > max_cloned_properties) {
|
||||
__ Push(x3, x2, x1, x0);
|
||||
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
|
||||
|
@ -56,7 +56,7 @@ void MacroAssembler::LogicalMacro(const Register& rd,
|
||||
LogicalOp op) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
|
||||
if (operand.NeedsRelocation(isolate())) {
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
Logical(rd, rn, temp, op);
|
||||
@ -249,7 +249,7 @@ void MacroAssembler::Mov(const Register& rd,
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
|
||||
|
||||
if (operand.NeedsRelocation(isolate())) {
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
LoadRelocated(dst, operand);
|
||||
|
||||
} else if (operand.IsImmediate()) {
|
||||
@ -297,7 +297,7 @@ void MacroAssembler::Mov(const Register& rd,
|
||||
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
|
||||
if (operand.NeedsRelocation(isolate())) {
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
LoadRelocated(rd, operand);
|
||||
mvn(rd, rd);
|
||||
|
||||
@ -352,7 +352,7 @@ void MacroAssembler::ConditionalCompareMacro(const Register& rn,
|
||||
Condition cond,
|
||||
ConditionalCompareOp op) {
|
||||
ASSERT((cond != al) && (cond != nv));
|
||||
if (operand.NeedsRelocation(isolate())) {
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
@ -418,12 +418,12 @@ void MacroAssembler::AddSubMacro(const Register& rd,
|
||||
FlagsUpdate S,
|
||||
AddSubOp op) {
|
||||
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
|
||||
!operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) {
|
||||
!operand.NeedsRelocation(this) && (S == LeaveFlags)) {
|
||||
// The instruction would be a nop. Avoid generating useless code.
|
||||
return;
|
||||
}
|
||||
|
||||
if (operand.NeedsRelocation(isolate())) {
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
@ -449,7 +449,7 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
|
||||
ASSERT(rd.SizeInBits() == rn.SizeInBits());
|
||||
UseScratchRegisterScope temps(this);
|
||||
|
||||
if (operand.NeedsRelocation(isolate())) {
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
AddSubWithCarryMacro(rd, rn, temp, S, op);
|
||||
|
@ -123,7 +123,8 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
|
||||
jit_cookie_(0),
|
||||
enabled_cpu_features_(0),
|
||||
emit_debug_code_(FLAG_debug_code),
|
||||
predictable_code_size_(false) {
|
||||
predictable_code_size_(false),
|
||||
serializer_enabled_(Serializer::enabled(isolate)) {
|
||||
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
|
||||
jit_cookie_ = isolate->random_number_generator()->NextInt();
|
||||
}
|
||||
@ -191,7 +192,7 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
|
||||
#ifdef DEBUG
|
||||
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
|
||||
: assembler_(assembler) {
|
||||
ASSERT(CpuFeatures::IsSafeForSnapshot(assembler_->isolate(), f));
|
||||
ASSERT(CpuFeatures::IsSupported(f));
|
||||
old_enabled_ = assembler_->enabled_cpu_features();
|
||||
uint64_t mask = static_cast<uint64_t>(1) << f;
|
||||
// TODO(svenpanne) This special case below doesn't belong here!
|
||||
@ -211,23 +212,9 @@ CpuFeatureScope::~CpuFeatureScope() {
|
||||
#endif
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of PlatformFeatureScope
|
||||
|
||||
PlatformFeatureScope::PlatformFeatureScope(Isolate* isolate, CpuFeature f)
|
||||
: isolate_(isolate), old_cross_compile_(CpuFeatures::cross_compile_) {
|
||||
// CpuFeatures is a global singleton, therefore this is only safe in
|
||||
// single threaded code.
|
||||
ASSERT(Serializer::enabled(isolate));
|
||||
uint64_t mask = static_cast<uint64_t>(1) << f;
|
||||
CpuFeatures::cross_compile_ |= mask;
|
||||
USE(isolate_);
|
||||
}
|
||||
|
||||
|
||||
PlatformFeatureScope::~PlatformFeatureScope() {
|
||||
CpuFeatures::cross_compile_ = old_cross_compile_;
|
||||
}
|
||||
bool CpuFeatures::initialized_ = false;
|
||||
unsigned CpuFeatures::supported_ = 0;
|
||||
unsigned CpuFeatures::cache_line_size_ = 0;
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@ -1438,6 +1425,12 @@ ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
ASSERT(CpuFeatures::initialized_);
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
}
|
||||
|
||||
|
||||
double power_helper(double x, double y) {
|
||||
int y_int = static_cast<int>(y);
|
||||
if (y == y_int) {
|
||||
|
@ -65,6 +65,8 @@ class AssemblerBase: public Malloced {
|
||||
bool emit_debug_code() const { return emit_debug_code_; }
|
||||
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
|
||||
|
||||
bool serializer_enabled() const { return serializer_enabled_; }
|
||||
|
||||
bool predictable_code_size() const { return predictable_code_size_; }
|
||||
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
|
||||
|
||||
@ -104,6 +106,7 @@ class AssemblerBase: public Malloced {
|
||||
uint64_t enabled_cpu_features_;
|
||||
bool emit_debug_code_;
|
||||
bool predictable_code_size_;
|
||||
bool serializer_enabled_;
|
||||
};
|
||||
|
||||
|
||||
@ -154,16 +157,47 @@ class CpuFeatureScope BASE_EMBEDDED {
|
||||
};
|
||||
|
||||
|
||||
// Enable a unsupported feature within a scope for cross-compiling for a
|
||||
// different CPU.
|
||||
class PlatformFeatureScope BASE_EMBEDDED {
|
||||
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
// Example:
|
||||
// if (assembler->IsSupported(SSE3)) {
|
||||
// CpuFeatureScope fscope(assembler, SSE3);
|
||||
// // Generate code containing SSE3 instructions.
|
||||
// } else {
|
||||
// // Generate alternative code.
|
||||
// }
|
||||
class CpuFeatures : public AllStatic {
|
||||
public:
|
||||
PlatformFeatureScope(Isolate* isolate, CpuFeature f);
|
||||
~PlatformFeatureScope();
|
||||
static void Probe(bool cross_compile) {
|
||||
STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
|
||||
if (initialized_) return;
|
||||
initialized_ = true;
|
||||
ProbeImpl(cross_compile);
|
||||
}
|
||||
|
||||
static bool IsSupported(CpuFeature f) {
|
||||
return (supported_ & (1u << f)) != 0;
|
||||
}
|
||||
|
||||
static inline bool SupportsCrankshaft();
|
||||
|
||||
static inline unsigned cache_line_size() {
|
||||
ASSERT(cache_line_size_ != 0);
|
||||
return cache_line_size_;
|
||||
}
|
||||
|
||||
static void PrintTarget();
|
||||
static void PrintFeatures();
|
||||
|
||||
private:
|
||||
Isolate* isolate_;
|
||||
uint64_t old_cross_compile_;
|
||||
// Platform-dependent implementation.
|
||||
static void ProbeImpl(bool cross_compile);
|
||||
|
||||
static unsigned supported_;
|
||||
static unsigned cache_line_size_;
|
||||
static bool initialized_;
|
||||
friend class ExternalReference;
|
||||
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
||||
};
|
||||
|
||||
|
||||
|
@ -106,11 +106,6 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
|
||||
}
|
||||
|
||||
|
||||
void CodeStub::VerifyPlatformFeatures() {
|
||||
ASSERT(CpuFeatures::VerifyCrossCompiling());
|
||||
}
|
||||
|
||||
|
||||
Handle<Code> CodeStub::GetCode() {
|
||||
Heap* heap = isolate()->heap();
|
||||
Code* code;
|
||||
@ -121,10 +116,6 @@ Handle<Code> CodeStub::GetCode() {
|
||||
return Handle<Code>(code);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
VerifyPlatformFeatures();
|
||||
#endif
|
||||
|
||||
{
|
||||
HandleScope scope(isolate());
|
||||
|
||||
|
@ -194,8 +194,6 @@ class CodeStub BASE_EMBEDDED {
|
||||
// Generates the assembler code for the stub.
|
||||
virtual Handle<Code> GenerateCode() = 0;
|
||||
|
||||
virtual void VerifyPlatformFeatures();
|
||||
|
||||
// Returns whether the code generated for this stub needs to be allocated as
|
||||
// a fixed (non-moveable) code object.
|
||||
virtual bool NeedsImmovableCode() { return false; }
|
||||
@ -1130,8 +1128,6 @@ class BinaryOpICStub : public HydrogenCodeStub {
|
||||
return state_.GetExtraICState();
|
||||
}
|
||||
|
||||
virtual void VerifyPlatformFeatures() V8_FINAL V8_OVERRIDE { }
|
||||
|
||||
virtual Handle<Code> GenerateCode() V8_OVERRIDE;
|
||||
|
||||
const BinaryOpIC::State& state() const { return state_; }
|
||||
@ -1185,8 +1181,6 @@ class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
|
||||
return state_.GetExtraICState();
|
||||
}
|
||||
|
||||
virtual void VerifyPlatformFeatures() V8_OVERRIDE { }
|
||||
|
||||
virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
|
||||
|
||||
virtual void PrintState(StringStream* stream) V8_OVERRIDE;
|
||||
@ -1266,8 +1260,6 @@ class StringAddStub V8_FINAL : public HydrogenCodeStub {
|
||||
return PretenureFlagBits::decode(bit_field_);
|
||||
}
|
||||
|
||||
virtual void VerifyPlatformFeatures() V8_OVERRIDE { }
|
||||
|
||||
virtual Handle<Code> GenerateCode() V8_OVERRIDE;
|
||||
|
||||
virtual void InitializeInterfaceDescriptor(
|
||||
@ -1477,9 +1469,6 @@ class CEntryStub : public PlatformCodeStub {
|
||||
// can generate both variants ahead of time.
|
||||
static void GenerateAheadOfTime(Isolate* isolate);
|
||||
|
||||
protected:
|
||||
virtual void VerifyPlatformFeatures() V8_OVERRIDE { }
|
||||
|
||||
private:
|
||||
// Number of pointers/values returned.
|
||||
const int result_size_;
|
||||
@ -1874,7 +1863,7 @@ class DoubleToIStub : public PlatformCodeStub {
|
||||
OffsetBits::encode(offset) |
|
||||
IsTruncatingBits::encode(is_truncating) |
|
||||
SkipFastPathBits::encode(skip_fastpath) |
|
||||
SSE3Bits::encode(CpuFeatures::IsSafeForSnapshot(isolate, SSE3) ? 1 : 0);
|
||||
SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
|
||||
}
|
||||
|
||||
Register source() {
|
||||
@ -1901,9 +1890,6 @@ class DoubleToIStub : public PlatformCodeStub {
|
||||
|
||||
virtual bool SometimesSetsUpAFrame() { return false; }
|
||||
|
||||
protected:
|
||||
virtual void VerifyPlatformFeatures() V8_OVERRIDE { }
|
||||
|
||||
private:
|
||||
static const int kBitsPerRegisterNumber = 6;
|
||||
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
|
||||
|
15
src/flags.cc
15
src/flags.cc
@ -7,14 +7,11 @@
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "assembler.h"
|
||||
#include "platform.h"
|
||||
#include "smart-pointers.h"
|
||||
#include "string-stream.h"
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
#include "arm/assembler-arm-inl.h"
|
||||
#endif
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
@ -337,15 +334,10 @@ static Flag* FindFlag(const char* name) {
|
||||
}
|
||||
|
||||
|
||||
bool FlagList::serializer_enabled_ = false;
|
||||
|
||||
|
||||
// static
|
||||
int FlagList::SetFlagsFromCommandLine(int* argc,
|
||||
char** argv,
|
||||
bool remove_flags,
|
||||
bool serializer_enabled) {
|
||||
serializer_enabled_ = serializer_enabled;
|
||||
bool remove_flags) {
|
||||
int return_code = 0;
|
||||
// parse arguments
|
||||
for (int i = 1; i < *argc;) {
|
||||
@ -525,11 +517,8 @@ void FlagList::ResetAllFlags() {
|
||||
|
||||
// static
|
||||
void FlagList::PrintHelp() {
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
CpuFeatures::PrintTarget();
|
||||
CpuFeatures::Probe(serializer_enabled_);
|
||||
CpuFeatures::PrintFeatures();
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
|
||||
printf("Usage:\n");
|
||||
printf(" shell [options] -e string\n");
|
||||
|
@ -42,8 +42,7 @@ class FlagList {
|
||||
// -- (equivalent to --js_arguments, captures all remaining args)
|
||||
static int SetFlagsFromCommandLine(int* argc,
|
||||
char** argv,
|
||||
bool remove_flags,
|
||||
bool serializer_enabled = false);
|
||||
bool remove_flags);
|
||||
|
||||
// Set the flag values by parsing the string str. Splits string into argc
|
||||
// substrings argv[], each of which consisting of non-white-space chars,
|
||||
@ -58,10 +57,6 @@ class FlagList {
|
||||
|
||||
// Set flags as consequence of being implied by another flag.
|
||||
static void EnforceFlagImplications();
|
||||
|
||||
private:
|
||||
// TODO(svenpanne) Remove this when Serializer/startup has been refactored.
|
||||
static bool serializer_enabled_;
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
@ -402,7 +402,7 @@ void FullCodeGenerator::Initialize() {
|
||||
// we disable the production of debug code in the full compiler if we are
|
||||
// either generating a snapshot or we booted from a snapshot.
|
||||
generate_debug_code_ = FLAG_debug_code &&
|
||||
!Serializer::enabled(isolate()) &&
|
||||
!masm_->serializer_enabled() &&
|
||||
!Snapshot::HaveASnapshotToStartFrom();
|
||||
masm_->set_emit_debug_code(generate_debug_code_);
|
||||
masm_->set_predictable_code_size(true);
|
||||
|
@ -45,6 +45,8 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
|
||||
static const byte kCallOpcode = 0xE8;
|
||||
static const int kNoCodeAgeSequenceLength = 5;
|
||||
|
@ -48,53 +48,26 @@ namespace internal {
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of CpuFeatures
|
||||
|
||||
#ifdef DEBUG
|
||||
bool CpuFeatures::initialized_ = false;
|
||||
#endif
|
||||
uint64_t CpuFeatures::supported_ = 0;
|
||||
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
|
||||
uint64_t CpuFeatures::cross_compile_ = 0;
|
||||
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
ASSERT(CpuFeatures::initialized_);
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::Probe(bool serializer_enabled) {
|
||||
ASSERT(!initialized_);
|
||||
ASSERT(supported_ == 0);
|
||||
#ifdef DEBUG
|
||||
initialized_ = true;
|
||||
#endif
|
||||
if (serializer_enabled) {
|
||||
supported_ |= OS::CpuFeaturesImpliedByPlatform();
|
||||
return; // No features if we might serialize.
|
||||
}
|
||||
|
||||
uint64_t probed_features = 0;
|
||||
void CpuFeatures::ProbeImpl(bool cross_compile) {
|
||||
CPU cpu;
|
||||
if (cpu.has_sse41()) {
|
||||
probed_features |= static_cast<uint64_t>(1) << SSE4_1;
|
||||
}
|
||||
if (cpu.has_sse3()) {
|
||||
probed_features |= static_cast<uint64_t>(1) << SSE3;
|
||||
}
|
||||
|
||||
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
|
||||
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
|
||||
CHECK(cpu.has_sahf()); // SAHF must be available in compat/legacy mode.
|
||||
supported_ |= 1u << SAHF;
|
||||
supported_ |= OS::CpuFeaturesImpliedByPlatform();
|
||||
|
||||
// SAHF must be available in compat/legacy mode.
|
||||
ASSERT(cpu.has_sahf());
|
||||
probed_features |= static_cast<uint64_t>(1) << SAHF;
|
||||
// Only use statically determined features for cross compile (snapshot).
|
||||
if (cross_compile) return;
|
||||
|
||||
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
|
||||
supported_ = probed_features | platform_features;
|
||||
found_by_runtime_probing_only_ = probed_features & ~platform_features;
|
||||
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
|
||||
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::PrintTarget() { }
|
||||
void CpuFeatures::PrintFeatures() { }
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of Displacement
|
||||
|
||||
@ -2613,10 +2586,9 @@ void Assembler::dd(uint32_t data) {
|
||||
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
ASSERT(!RelocInfo::IsNone(rmode));
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
|
||||
!serializer_enabled() && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
RelocInfo rinfo(pc_, rmode, data, NULL);
|
||||
reloc_info_writer.Write(&rinfo);
|
||||
|
@ -452,73 +452,6 @@ class Displacement BASE_EMBEDDED {
|
||||
};
|
||||
|
||||
|
||||
|
||||
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
// Example:
|
||||
// if (assembler->IsSupported(SSE3)) {
|
||||
// CpuFeatureScope fscope(assembler, SSE3);
|
||||
// // Generate code containing SSE3 instructions.
|
||||
// } else {
|
||||
// // Generate alternative code.
|
||||
// }
|
||||
class CpuFeatures : public AllStatic {
|
||||
public:
|
||||
// Detect features of the target CPU. Set safe defaults if the serializer
|
||||
// is enabled (snapshots must be portable).
|
||||
static void Probe(bool serializer_enabled);
|
||||
|
||||
// Check whether a feature is supported by the target CPU.
|
||||
static bool IsSupported(CpuFeature f) {
|
||||
ASSERT(initialized_);
|
||||
if (Check(f, cross_compile_)) return true;
|
||||
if (f == SSE3 && !FLAG_enable_sse3) return false;
|
||||
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
|
||||
return Check(f, supported_);
|
||||
}
|
||||
|
||||
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
|
||||
return Check(f, cross_compile_) ||
|
||||
(IsSupported(f) &&
|
||||
!(Serializer::enabled(isolate) &&
|
||||
Check(f, found_by_runtime_probing_only_)));
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling() {
|
||||
return cross_compile_ == 0;
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling(CpuFeature f) {
|
||||
uint64_t mask = flag2set(f);
|
||||
return cross_compile_ == 0 ||
|
||||
(cross_compile_ & mask) == mask;
|
||||
}
|
||||
|
||||
static bool SupportsCrankshaft() { return true; }
|
||||
|
||||
private:
|
||||
static bool Check(CpuFeature f, uint64_t set) {
|
||||
return (set & flag2set(f)) != 0;
|
||||
}
|
||||
|
||||
static uint64_t flag2set(CpuFeature f) {
|
||||
return static_cast<uint64_t>(1) << f;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool initialized_;
|
||||
#endif
|
||||
static uint64_t supported_;
|
||||
static uint64_t found_by_runtime_probing_only_;
|
||||
|
||||
static uint64_t cross_compile_;
|
||||
|
||||
friend class ExternalReference;
|
||||
friend class PlatformFeatureScope;
|
||||
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
||||
};
|
||||
|
||||
|
||||
class Assembler : public AssemblerBase {
|
||||
private:
|
||||
// We check before assembling an instruction that there is sufficient
|
||||
|
@ -3213,7 +3213,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
|
||||
Register character,
|
||||
Register scratch) {
|
||||
// hash = (seed + character) + ((seed + character) << 10);
|
||||
if (Serializer::enabled(masm->isolate())) {
|
||||
if (masm->serializer_enabled()) {
|
||||
__ LoadRoot(scratch, Heap::kHashSeedRootIndex);
|
||||
__ SmiUntag(scratch);
|
||||
__ add(scratch, character);
|
||||
|
@ -1590,7 +1590,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
: ObjectLiteral::kNoFlags;
|
||||
int properties_count = constant_properties->length() / 2;
|
||||
if (expr->may_store_doubles() || expr->depth() > 1 ||
|
||||
Serializer::enabled(isolate()) ||
|
||||
masm()->serializer_enabled() ||
|
||||
flags != ObjectLiteral::kFastElements ||
|
||||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
|
||||
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
|
@ -1279,7 +1279,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
||||
// Note: r0 will contain hash code
|
||||
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
|
||||
// Xor original key with a seed.
|
||||
if (Serializer::enabled(isolate())) {
|
||||
if (serializer_enabled()) {
|
||||
ExternalReference roots_array_start =
|
||||
ExternalReference::roots_array_start(isolate());
|
||||
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
|
||||
|
@ -450,7 +450,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
|
||||
JSGlobalObject::EnsurePropertyCell(global, name);
|
||||
ASSERT(cell->value()->IsTheHole());
|
||||
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
|
||||
if (Serializer::enabled(masm->isolate())) {
|
||||
if (masm->serializer_enabled()) {
|
||||
__ mov(scratch, Immediate(cell));
|
||||
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
|
||||
Immediate(the_hole));
|
||||
@ -1379,7 +1379,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
|
||||
|
||||
HandlerFrontendHeader(type, receiver(), global, name, &miss);
|
||||
// Get the value from the cell.
|
||||
if (Serializer::enabled(isolate())) {
|
||||
if (masm()->serializer_enabled()) {
|
||||
__ mov(eax, Immediate(cell));
|
||||
__ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
|
||||
} else {
|
||||
|
@ -46,6 +46,10 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operand and MemOperand.
|
||||
|
||||
|
@ -43,38 +43,22 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#ifdef DEBUG
|
||||
bool CpuFeatures::initialized_ = false;
|
||||
#endif
|
||||
unsigned CpuFeatures::supported_ = 0;
|
||||
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
|
||||
unsigned CpuFeatures::cross_compile_ = 0;
|
||||
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
ASSERT(CpuFeatures::initialized_);
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
}
|
||||
|
||||
|
||||
// Get the CPU features enabled by the build. For cross compilation the
|
||||
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
|
||||
// can be defined to enable FPU instructions when building the
|
||||
// snapshot.
|
||||
static uint64_t CpuFeaturesImpliedByCompiler() {
|
||||
uint64_t answer = 0;
|
||||
static unsigned CpuFeaturesImpliedByCompiler() {
|
||||
unsigned answer = 0;
|
||||
#ifdef CAN_USE_FPU_INSTRUCTIONS
|
||||
answer |= static_cast<uint64_t>(1) << FPU;
|
||||
answer |= 1u << FPU;
|
||||
#endif // def CAN_USE_FPU_INSTRUCTIONS
|
||||
|
||||
#ifdef __mips__
|
||||
// If the compiler is allowed to use FPU then we can use FPU too in our code
|
||||
// generation even when generating snapshots. This won't work for cross
|
||||
// compilation.
|
||||
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
|
||||
answer |= static_cast<uint64_t>(1) << FPU;
|
||||
#endif // defined(__mips_hard_float) && __mips_hard_float != 0
|
||||
#endif // def __mips__
|
||||
#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
|
||||
answer |= 1u << FPU;
|
||||
#endif
|
||||
|
||||
return answer;
|
||||
}
|
||||
@ -102,43 +86,30 @@ const char* DoubleRegister::AllocationIndexToString(int index) {
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::Probe(bool serializer_enabled) {
|
||||
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
|
||||
CpuFeaturesImpliedByCompiler());
|
||||
ASSERT(supported_ == 0 ||
|
||||
(supported_ & standard_features) == standard_features);
|
||||
#ifdef DEBUG
|
||||
initialized_ = true;
|
||||
#endif
|
||||
void CpuFeatures::ProbeImpl(bool cross_compile) {
|
||||
supported_ |= OS::CpuFeaturesImpliedByPlatform();
|
||||
supported_ |= CpuFeaturesImpliedByCompiler();
|
||||
|
||||
// Get the features implied by the OS and the compiler settings. This is the
|
||||
// minimal set of features which is also allowed for generated code in the
|
||||
// snapshot.
|
||||
supported_ |= standard_features;
|
||||
|
||||
if (serializer_enabled) {
|
||||
// No probing for features if we might serialize (generate snapshot).
|
||||
return;
|
||||
}
|
||||
// Only use statically determined features for cross compile (snapshot).
|
||||
if (cross_compile) return;
|
||||
|
||||
// If the compiler is allowed to use fpu then we can use fpu too in our
|
||||
// code generation.
|
||||
#if !defined(__mips__)
|
||||
#ifndef __mips__
|
||||
// For the simulator build, use FPU.
|
||||
supported_ |= static_cast<uint64_t>(1) << FPU;
|
||||
supported_ |= 1u << FPU;
|
||||
#else
|
||||
// Probe for additional features not already known to be available.
|
||||
// Probe for additional features at runtime.
|
||||
CPU cpu;
|
||||
if (cpu.has_fpu()) {
|
||||
// This implementation also sets the FPU flags if
|
||||
// runtime detection of FPU returns true.
|
||||
supported_ |= static_cast<uint64_t>(1) << FPU;
|
||||
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
|
||||
}
|
||||
if (cpu.has_fpu()) supported_ |= 1u << FPU;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::PrintTarget() { }
|
||||
void CpuFeatures::PrintFeatures() { }
|
||||
|
||||
|
||||
int ToNumber(Register reg) {
|
||||
ASSERT(reg.is_valid());
|
||||
const int kNumbers[] = {
|
||||
@ -2079,10 +2050,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
}
|
||||
if (!RelocInfo::IsNone(rinfo.rmode())) {
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
|
||||
!serializer_enabled() && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
|
||||
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
|
||||
|
@ -419,65 +419,6 @@ class MemOperand : public Operand {
|
||||
};
|
||||
|
||||
|
||||
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
class CpuFeatures : public AllStatic {
|
||||
public:
|
||||
// Detect features of the target CPU. Set safe defaults if the serializer
|
||||
// is enabled (snapshots must be portable).
|
||||
static void Probe(bool serializer_enabled);
|
||||
|
||||
// A special case for printing target and features, which we want to do
|
||||
// before initializing the isolate
|
||||
|
||||
// Check whether a feature is supported by the target CPU.
|
||||
static bool IsSupported(CpuFeature f) {
|
||||
ASSERT(initialized_);
|
||||
return Check(f, supported_);
|
||||
}
|
||||
|
||||
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
|
||||
return Check(f, cross_compile_) ||
|
||||
(IsSupported(f) &&
|
||||
!(Serializer::enabled(isolate) &&
|
||||
Check(f, found_by_runtime_probing_only_)));
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling() {
|
||||
return cross_compile_ == 0;
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling(CpuFeature f) {
|
||||
unsigned mask = flag2set(f);
|
||||
return cross_compile_ == 0 ||
|
||||
(cross_compile_ & mask) == mask;
|
||||
}
|
||||
|
||||
static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(FPU); }
|
||||
|
||||
private:
|
||||
static bool Check(CpuFeature f, unsigned set) {
|
||||
return (set & flag2set(f)) != 0;
|
||||
}
|
||||
|
||||
static unsigned flag2set(CpuFeature f) {
|
||||
return 1u << f;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool initialized_;
|
||||
#endif
|
||||
static unsigned supported_;
|
||||
static unsigned found_by_runtime_probing_only_;
|
||||
|
||||
static unsigned cross_compile_;
|
||||
|
||||
friend class ExternalReference;
|
||||
friend class PlatformFeatureScope;
|
||||
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
||||
};
|
||||
|
||||
|
||||
class Assembler : public AssemblerBase {
|
||||
public:
|
||||
// Create an assembler. Instructions and relocation information are emitted
|
||||
|
@ -1661,7 +1661,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
__ li(a0, Operand(Smi::FromInt(flags)));
|
||||
int properties_count = constant_properties->length() / 2;
|
||||
if (expr->may_store_doubles() || expr->depth() > 1 ||
|
||||
Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
|
||||
masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
|
||||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
|
||||
__ Push(a3, a2, a1, a0);
|
||||
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "assembler.h"
|
||||
#include "bootstrapper.h"
|
||||
#include "flags.h"
|
||||
#include "natives.h"
|
||||
@ -18,9 +19,6 @@
|
||||
#include "serialize.h"
|
||||
#include "list.h"
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
#include "arm/assembler-arm-inl.h"
|
||||
#endif
|
||||
|
||||
using namespace v8;
|
||||
|
||||
@ -274,13 +272,14 @@ void DumpException(Handle<Message> message) {
|
||||
int main(int argc, char** argv) {
|
||||
V8::InitializeICU();
|
||||
i::Isolate::SetCrashIfDefaultIsolateInitialized();
|
||||
i::CpuFeatures::Probe(true);
|
||||
|
||||
// By default, log code create information in the snapshot.
|
||||
i::FLAG_log_code = true;
|
||||
|
||||
// Print the usage if an error occurs when parsing the command line
|
||||
// flags or if the help flag is set.
|
||||
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true, true);
|
||||
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
|
||||
if (result > 0 || argc != 2 || i::FLAG_help) {
|
||||
::printf("Usage: %s [flag] ... outfile\n", argv[0]);
|
||||
i::FlagList::PrintHelp();
|
||||
|
@ -52,7 +52,7 @@ namespace internal {
|
||||
static const pthread_t kNoThread = (pthread_t) 0;
|
||||
|
||||
|
||||
uint64_t OS::CpuFeaturesImpliedByPlatform() {
|
||||
unsigned OS::CpuFeaturesImpliedByPlatform() {
|
||||
return 0; // Nothing special.
|
||||
}
|
||||
|
||||
|
@ -1191,7 +1191,7 @@ void OS::SignalCodeMovingGC() { }
|
||||
#endif // __MINGW32__
|
||||
|
||||
|
||||
uint64_t OS::CpuFeaturesImpliedByPlatform() {
|
||||
unsigned OS::CpuFeaturesImpliedByPlatform() {
|
||||
return 0; // Windows runs on anything.
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ class OS {
|
||||
// This is a little messy because the interpretation is subject to the cross
|
||||
// of the CPU and the OS. The bits in the answer correspond to the bit
|
||||
// positions indicated by the members of the CpuFeature enum from globals.h
|
||||
static uint64_t CpuFeaturesImpliedByPlatform();
|
||||
static unsigned CpuFeaturesImpliedByPlatform();
|
||||
|
||||
// The total amount of physical memory available on the current system.
|
||||
static uint64_t TotalPhysicalMemory();
|
||||
|
@ -8802,8 +8802,10 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
|
||||
BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
|
||||
ASSERT(!ast_id.IsNone());
|
||||
|
||||
Compiler::ConcurrencyMode mode = isolate->concurrent_osr_enabled()
|
||||
? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
|
||||
Compiler::ConcurrencyMode mode =
|
||||
isolate->concurrent_osr_enabled() &&
|
||||
(caller_code->CodeSize() > 32 * FullCodeGenerator::kCodeSizeMultiplier)
|
||||
? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
|
||||
Handle<Code> result = Handle<Code>::null();
|
||||
|
||||
OptimizedCompileJob* job = NULL;
|
||||
|
@ -96,9 +96,7 @@ void V8::InitializeOncePerProcessImpl() {
|
||||
platform_ = new DefaultPlatform;
|
||||
#endif
|
||||
Sampler::SetUp();
|
||||
// TODO(svenpanne) Clean this up when Serializer is a real object.
|
||||
bool serializer_enabled = Serializer::enabled(NULL);
|
||||
CpuFeatures::Probe(serializer_enabled);
|
||||
CpuFeatures::Probe(false);
|
||||
OS::PostSetUp();
|
||||
// The custom exp implementation needs 16KB of lookup data; initialize it
|
||||
// on demand.
|
||||
|
@ -399,20 +399,26 @@ enum StateTag {
|
||||
#endif
|
||||
|
||||
|
||||
// Feature flags bit positions. They are mostly based on the CPUID spec.
|
||||
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
|
||||
enum CpuFeature { SSE4_1 = 32 + 19, // x86
|
||||
SSE3 = 32 + 0, // x86
|
||||
VFP3 = 1, // ARM
|
||||
ARMv7 = 2, // ARM
|
||||
SUDIV = 3, // ARM
|
||||
UNALIGNED_ACCESSES = 4, // ARM
|
||||
MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
|
||||
VFP32DREGS = 6, // ARM
|
||||
NEON = 7, // ARM
|
||||
SAHF = 0, // x86
|
||||
FPU = 1, // MIPS
|
||||
ALWAYS_ALIGN_CSP = 1 }; // ARM64
|
||||
// CPU feature flags.
|
||||
enum CpuFeature {
|
||||
// x86
|
||||
SSE4_1,
|
||||
SSE3,
|
||||
SAHF,
|
||||
// ARM
|
||||
VFP3,
|
||||
ARMv7,
|
||||
SUDIV,
|
||||
UNALIGNED_ACCESSES,
|
||||
MOVW_MOVT_IMMEDIATE_LOADS,
|
||||
VFP32DREGS,
|
||||
NEON,
|
||||
// MIPS
|
||||
FPU,
|
||||
// ARM64
|
||||
ALWAYS_ALIGN_CSP,
|
||||
NUMBER_OF_CPU_FEATURES
|
||||
};
|
||||
|
||||
|
||||
// Used to specify if a macro instruction must perform a smi check on tagged
|
||||
|
@ -14,6 +14,8 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of Assembler
|
||||
|
@ -15,57 +15,27 @@ namespace internal {
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of CpuFeatures
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool CpuFeatures::initialized_ = false;
|
||||
#endif
|
||||
uint64_t CpuFeatures::supported_ = 0;
|
||||
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
|
||||
uint64_t CpuFeatures::cross_compile_ = 0;
|
||||
|
||||
ExternalReference ExternalReference::cpu_features() {
|
||||
ASSERT(CpuFeatures::initialized_);
|
||||
return ExternalReference(&CpuFeatures::supported_);
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::Probe(bool serializer_enabled) {
|
||||
ASSERT(supported_ == 0);
|
||||
#ifdef DEBUG
|
||||
initialized_ = true;
|
||||
#endif
|
||||
supported_ = 0;
|
||||
if (serializer_enabled) {
|
||||
supported_ |= OS::CpuFeaturesImpliedByPlatform();
|
||||
return; // No features if we might serialize.
|
||||
}
|
||||
|
||||
uint64_t probed_features = 0;
|
||||
void CpuFeatures::ProbeImpl(bool cross_compile) {
|
||||
CPU cpu;
|
||||
if (cpu.has_sse41()) {
|
||||
probed_features |= static_cast<uint64_t>(1) << SSE4_1;
|
||||
}
|
||||
if (cpu.has_sse3()) {
|
||||
probed_features |= static_cast<uint64_t>(1) << SSE3;
|
||||
}
|
||||
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
|
||||
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
|
||||
|
||||
// SSE2 must be available on every x64 CPU.
|
||||
ASSERT(cpu.has_sse2());
|
||||
supported_ |= OS::CpuFeaturesImpliedByPlatform();
|
||||
|
||||
// CMOV must be available on every x64 CPU.
|
||||
ASSERT(cpu.has_cmov());
|
||||
// Only use statically determined features for cross compile (snapshot).
|
||||
if (cross_compile) return;
|
||||
|
||||
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
|
||||
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
|
||||
// SAHF is not generally available in long mode.
|
||||
if (cpu.has_sahf()) {
|
||||
probed_features |= static_cast<uint64_t>(1) << SAHF;
|
||||
}
|
||||
|
||||
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
|
||||
supported_ = probed_features | platform_features;
|
||||
found_by_runtime_probing_only_ = probed_features & ~platform_features;
|
||||
if (cpu.has_sahf() && FLAG_enable_sahf) supported_|= 1u << SAHF;
|
||||
}
|
||||
|
||||
|
||||
void CpuFeatures::PrintTarget() { }
|
||||
void CpuFeatures::PrintFeatures() { }
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of RelocInfo
|
||||
|
||||
@ -2220,6 +2190,7 @@ void Assembler::fnclex() {
|
||||
void Assembler::sahf() {
|
||||
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
|
||||
// in 64-bit mode. Test CpuID.
|
||||
ASSERT(IsEnabled(SAHF));
|
||||
EnsureSpace ensure_space(this);
|
||||
emit(0x9E);
|
||||
}
|
||||
@ -2925,11 +2896,10 @@ void Assembler::dd(uint32_t data) {
|
||||
|
||||
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
ASSERT(!RelocInfo::IsNone(rmode));
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
|
||||
return;
|
||||
}
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
|
||||
!serializer_enabled() && !emit_debug_code()) {
|
||||
return;
|
||||
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
|
||||
// Don't record psuedo relocation info for code age sequence mode.
|
||||
return;
|
||||
|
@ -437,73 +437,6 @@ class Operand BASE_EMBEDDED {
|
||||
};
|
||||
|
||||
|
||||
// CpuFeatures keeps track of which features are supported by the target CPU.
|
||||
// Supported features must be enabled by a CpuFeatureScope before use.
|
||||
// Example:
|
||||
// if (assembler->IsSupported(SSE3)) {
|
||||
// CpuFeatureScope fscope(assembler, SSE3);
|
||||
// // Generate SSE3 floating point code.
|
||||
// } else {
|
||||
// // Generate standard SSE2 floating point code.
|
||||
// }
|
||||
class CpuFeatures : public AllStatic {
|
||||
public:
|
||||
// Detect features of the target CPU. Set safe defaults if the serializer
|
||||
// is enabled (snapshots must be portable).
|
||||
static void Probe(bool serializer_enabled);
|
||||
|
||||
// Check whether a feature is supported by the target CPU.
|
||||
static bool IsSupported(CpuFeature f) {
|
||||
if (Check(f, cross_compile_)) return true;
|
||||
ASSERT(initialized_);
|
||||
if (f == SSE3 && !FLAG_enable_sse3) return false;
|
||||
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
|
||||
if (f == SAHF && !FLAG_enable_sahf) return false;
|
||||
return Check(f, supported_);
|
||||
}
|
||||
|
||||
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
|
||||
return Check(f, cross_compile_) ||
|
||||
(IsSupported(f) &&
|
||||
!(Serializer::enabled(isolate) &&
|
||||
Check(f, found_by_runtime_probing_only_)));
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling() {
|
||||
return cross_compile_ == 0;
|
||||
}
|
||||
|
||||
static bool VerifyCrossCompiling(CpuFeature f) {
|
||||
uint64_t mask = flag2set(f);
|
||||
return cross_compile_ == 0 ||
|
||||
(cross_compile_ & mask) == mask;
|
||||
}
|
||||
|
||||
static bool SupportsCrankshaft() { return true; }
|
||||
|
||||
private:
|
||||
static bool Check(CpuFeature f, uint64_t set) {
|
||||
return (set & flag2set(f)) != 0;
|
||||
}
|
||||
|
||||
static uint64_t flag2set(CpuFeature f) {
|
||||
return static_cast<uint64_t>(1) << f;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool initialized_;
|
||||
#endif
|
||||
static uint64_t supported_;
|
||||
static uint64_t found_by_runtime_probing_only_;
|
||||
|
||||
static uint64_t cross_compile_;
|
||||
|
||||
friend class ExternalReference;
|
||||
friend class PlatformFeatureScope;
|
||||
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
||||
};
|
||||
|
||||
|
||||
#define ASSEMBLER_INSTRUCTION_LIST(V) \
|
||||
V(add) \
|
||||
V(and) \
|
||||
|
@ -1628,7 +1628,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
: ObjectLiteral::kNoFlags;
|
||||
int properties_count = constant_properties->length() / 2;
|
||||
if (expr->may_store_doubles() || expr->depth() > 1 ||
|
||||
Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
|
||||
masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
|
||||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
|
||||
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
|
||||
|
@ -60,7 +60,7 @@ int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
|
||||
|
||||
Operand MacroAssembler::ExternalOperand(ExternalReference target,
|
||||
Register scratch) {
|
||||
if (root_array_available_ && !Serializer::enabled(isolate())) {
|
||||
if (root_array_available_ && !serializer_enabled()) {
|
||||
int64_t delta = RootRegisterDelta(target);
|
||||
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
|
||||
return Operand(kRootRegister, static_cast<int32_t>(delta));
|
||||
@ -72,7 +72,7 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
|
||||
|
||||
|
||||
void MacroAssembler::Load(Register destination, ExternalReference source) {
|
||||
if (root_array_available_ && !Serializer::enabled(isolate())) {
|
||||
if (root_array_available_ && !serializer_enabled()) {
|
||||
int64_t delta = RootRegisterDelta(source);
|
||||
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
|
||||
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
|
||||
@ -90,7 +90,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
|
||||
|
||||
|
||||
void MacroAssembler::Store(ExternalReference destination, Register source) {
|
||||
if (root_array_available_ && !Serializer::enabled(isolate())) {
|
||||
if (root_array_available_ && !serializer_enabled()) {
|
||||
int64_t delta = RootRegisterDelta(destination);
|
||||
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
|
||||
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
|
||||
@ -109,7 +109,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
|
||||
|
||||
void MacroAssembler::LoadAddress(Register destination,
|
||||
ExternalReference source) {
|
||||
if (root_array_available_ && !Serializer::enabled(isolate())) {
|
||||
if (root_array_available_ && !serializer_enabled()) {
|
||||
int64_t delta = RootRegisterDelta(source);
|
||||
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
|
||||
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
|
||||
@ -122,7 +122,7 @@ void MacroAssembler::LoadAddress(Register destination,
|
||||
|
||||
|
||||
int MacroAssembler::LoadAddressSize(ExternalReference source) {
|
||||
if (root_array_available_ && !Serializer::enabled(isolate())) {
|
||||
if (root_array_available_ && !serializer_enabled()) {
|
||||
// This calculation depends on the internals of LoadAddress.
|
||||
// It's correctness is ensured by the asserts in the Call
|
||||
// instruction below.
|
||||
@ -144,7 +144,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
|
||||
|
||||
void MacroAssembler::PushAddress(ExternalReference source) {
|
||||
int64_t address = reinterpret_cast<int64_t>(source.address());
|
||||
if (is_int32(address) && !Serializer::enabled(isolate())) {
|
||||
if (is_int32(address) && !serializer_enabled()) {
|
||||
if (emit_debug_code()) {
|
||||
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
|
||||
}
|
||||
@ -252,7 +252,7 @@ void MacroAssembler::InNewSpace(Register object,
|
||||
Condition cc,
|
||||
Label* branch,
|
||||
Label::Distance distance) {
|
||||
if (Serializer::enabled(isolate())) {
|
||||
if (serializer_enabled()) {
|
||||
// Can't do arithmetic on external references if it might get serialized.
|
||||
// The mask isn't really an address. We load it as an external reference in
|
||||
// case the size of the new space is different between the snapshot maker
|
||||
|
@ -66,6 +66,7 @@ static const Register arg2 = rsi;
|
||||
|
||||
|
||||
TEST(AssemblerX64ReturnOperation) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -88,6 +89,7 @@ TEST(AssemblerX64ReturnOperation) {
|
||||
|
||||
|
||||
TEST(AssemblerX64StackOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -120,6 +122,7 @@ TEST(AssemblerX64StackOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64ArithmeticOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -142,6 +145,7 @@ TEST(AssemblerX64ArithmeticOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64CmpbOperation) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -173,6 +177,7 @@ TEST(AssemblerX64CmpbOperation) {
|
||||
|
||||
|
||||
TEST(AssemblerX64ImulOperation) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -201,6 +206,7 @@ TEST(AssemblerX64ImulOperation) {
|
||||
|
||||
|
||||
TEST(AssemblerX64XchglOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -229,6 +235,7 @@ TEST(AssemblerX64XchglOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64OrlOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -253,6 +260,7 @@ TEST(AssemblerX64OrlOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64RollOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -275,6 +283,7 @@ TEST(AssemblerX64RollOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64SublOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -299,6 +308,7 @@ TEST(AssemblerX64SublOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64TestlOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -328,6 +338,7 @@ TEST(AssemblerX64TestlOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64XorlOperations) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -352,6 +363,7 @@ TEST(AssemblerX64XorlOperations) {
|
||||
|
||||
|
||||
TEST(AssemblerX64MemoryOperands) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -386,6 +398,7 @@ TEST(AssemblerX64MemoryOperands) {
|
||||
|
||||
|
||||
TEST(AssemblerX64ControlFlow) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
@ -415,6 +428,7 @@ TEST(AssemblerX64ControlFlow) {
|
||||
|
||||
|
||||
TEST(AssemblerX64LoopImmediates) {
|
||||
CcTest::InitializeVM();
|
||||
// Allocate an executable page of memory.
|
||||
size_t actual_size;
|
||||
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
|
||||
|
@ -656,7 +656,6 @@
|
||||
'../../src/arm64/code-stubs-arm64.h',
|
||||
'../../src/arm64/constants-arm64.h',
|
||||
'../../src/arm64/cpu-arm64.cc',
|
||||
'../../src/arm64/cpu-arm64.h',
|
||||
'../../src/arm64/debug-arm64.cc',
|
||||
'../../src/arm64/decoder-arm64.cc',
|
||||
'../../src/arm64/decoder-arm64.h',
|
||||
|
Loading…
Reference in New Issue
Block a user