Revert 13117: "Enable stub generation using Hydrogen/Lithium (again)"

TBR=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/11415261

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13120 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2012-12-03 17:16:51 +00:00
parent 702cc25def
commit 66f6a8182c
96 changed files with 2387 additions and 3390 deletions

View File

@ -81,12 +81,6 @@ endif
ifeq ($(liveobjectlist), on)
GYPFLAGS += -Dv8_use_liveobjectlist=true
endif
# vfp2=off
ifeq ($(vfp2), off)
GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
else
GYPFLAGS += -Dv8_can_use_vfp2_instructions=true
endif
# vfp3=off
ifeq ($(vfp3), off)
GYPFLAGS += -Dv8_can_use_vfp3_instructions=false

View File

@ -47,15 +47,6 @@ namespace v8 {
namespace internal {
ArmDoubleRegister ArmDoubleRegister::FromAllocationIndex(int index) {
if (CpuFeatures::IsSupported(VFP2)) {
return DwVfpRegister::FromAllocationIndex(index);
} else {
return SoftFloatRegister::FromAllocationIndex(index);
}
}
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));

View File

@ -85,33 +85,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
}
}
int DoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return DwVfpRegister::kMaxNumAllocatableRegisters;
} else {
return SoftFloatRegister::kMaxNumAllocatableRegisters;
}
}
const char* DoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(VFP2)) {
return DwVfpRegister::AllocationIndexToString(index);
} else {
return SoftFloatRegister::AllocationIndexToString(index);
}
}
void CpuFeatures::Probe() {
unsigned standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();

View File

@ -71,23 +71,21 @@ namespace internal {
// Core register
struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8;
static const int kGPRsPerNonVFP2Double = 2;
static int NumAllocatableRegisters();
static const int kNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < NumAllocatableRegisters());
ASSERT(reg.code() < kNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
@ -190,57 +188,26 @@ struct SwVfpRegister {
};
struct ArmDoubleRegister {
static const int kMaxNumRegisters = 16;
// Double word VFP register.
struct DwVfpRegister {
static const int kNumRegisters = 16;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
static const int kNumAllocatableRegisters = kNumRegisters -
kNumReservedRegisters;
explicit ArmDoubleRegister(int code) { code_ = code; }
static int NumAllocatableRegisters();
static int NumRegisters() { return kNumRegisters; }
static const char* AllocationIndexToString(int index);
inline static ArmDoubleRegister FromAllocationIndex(int index);
inline static int ToAllocationIndex(ArmDoubleRegister reg) {
return reg.code();
}
static ArmDoubleRegister from_code(int code) {
ArmDoubleRegister r = ArmDoubleRegister(code);
return r;
}
bool is_valid() const {
return 0 <= code_ && code_ < NumRegisters();
}
bool is(ArmDoubleRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int code_;
};
// Double word VFP register.
struct DwVfpRegister : ArmDoubleRegister {
static const int kNumRegisters = 16;
explicit DwVfpRegister(int code) : ArmDoubleRegister(code) {}
inline int ToAllocationIndex(DwVfpRegister reg);
inline static int ToAllocationIndex(DwVfpRegister reg);
static DwVfpRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"d0",
"d1",
@ -261,7 +228,8 @@ struct DwVfpRegister : ArmDoubleRegister {
}
static DwVfpRegister from_code(int code) {
return DwVfpRegister(code);
DwVfpRegister r = { code };
return r;
}
// Supporting d0 to d15, can be later extended to d31.
@ -294,37 +262,12 @@ struct DwVfpRegister : ArmDoubleRegister {
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
}
int code_;
};
// Double word VFP register.
struct SoftFloatRegister : ArmDoubleRegister {
static const int kNumRegisters = 1;
static const int kMaxNumAllocatableRegisters = kNumRegisters;
explicit SoftFloatRegister(int code) : ArmDoubleRegister(code) {}
static SoftFloatRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"sfpd0"
};
return names[index];
}
static SoftFloatRegister from_code(int code) {
SoftFloatRegister r = SoftFloatRegister(code);
return r;
}
};
typedef ArmDoubleRegister DoubleRegister;
typedef DwVfpRegister DoubleRegister;
// Support for the VFP registers s0 to s31 (d0 to d15).
@ -362,26 +305,23 @@ const SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = DwVfpRegister(-1);
const DwVfpRegister d0 = DwVfpRegister(0);
const DwVfpRegister d1 = DwVfpRegister(1);
const DwVfpRegister d2 = DwVfpRegister(2);
const DwVfpRegister d3 = DwVfpRegister(3);
const DwVfpRegister d4 = DwVfpRegister(4);
const DwVfpRegister d5 = DwVfpRegister(5);
const DwVfpRegister d6 = DwVfpRegister(6);
const DwVfpRegister d7 = DwVfpRegister(7);
const DwVfpRegister d8 = DwVfpRegister(8);
const DwVfpRegister d9 = DwVfpRegister(9);
const DwVfpRegister d10 = DwVfpRegister(10);
const DwVfpRegister d11 = DwVfpRegister(11);
const DwVfpRegister d12 = DwVfpRegister(12);
const DwVfpRegister d13 = DwVfpRegister(13);
const DwVfpRegister d14 = DwVfpRegister(14);
const DwVfpRegister d15 = DwVfpRegister(15);
const Register sfpd_lo = { kRegister_r6_Code };
const Register sfpd_hi = { kRegister_r7_Code };
const DwVfpRegister no_dreg = { -1 };
const DwVfpRegister d0 = { 0 };
const DwVfpRegister d1 = { 1 };
const DwVfpRegister d2 = { 2 };
const DwVfpRegister d3 = { 3 };
const DwVfpRegister d4 = { 4 };
const DwVfpRegister d5 = { 5 };
const DwVfpRegister d6 = { 6 };
const DwVfpRegister d7 = { 7 };
const DwVfpRegister d8 = { 8 };
const DwVfpRegister d9 = { 9 };
const DwVfpRegister d10 = { 10 };
const DwVfpRegister d11 = { 11 };
const DwVfpRegister d12 = { 12 };
const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a

View File

@ -1259,26 +1259,6 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyICMiss, 0);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
__ mov(ip, lr); // Stash the miss continuation
__ add(sp, sp, Operand(kPointerSize)); // Ignore state
__ pop(lr); // Restore LR to continuation in JSFunction
__ mov(pc, ip); // Jump to miss handler
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{

View File

@ -37,23 +37,6 @@ namespace v8 {
namespace internal {
CodeStubInterfaceDescriptor*
KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
static CodeStubInterfaceDescriptor* result = NULL;
if (result == NULL) {
Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
static Register registers[] = { r1, r0 };
static CodeStubInterfaceDescriptor info = {
2,
registers,
miss
};
result = &info;
}
return result;
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@ -520,7 +503,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
class ConvertToDoubleStub : public PlatformCodeStub {
class ConvertToDoubleStub : public CodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@ -3585,10 +3568,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
const DwVfpRegister double_base = d1;
const DwVfpRegister double_exponent = d2;
const DwVfpRegister double_result = d3;
const DwVfpRegister double_scratch = d0;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
const DoubleRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
const Register scratch2 = r7;
@ -3798,29 +3781,12 @@ void CodeStub::GenerateStubsAheadOfTime() {
void CodeStub::GenerateFPStubs() {
SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub save_doubles(1, mode);
StoreBufferOverflowStub stub(mode);
// These stubs might already be in the snapshot, detect that and don't
// regenerate, which would lead to code stub initialization state being messed
// up.
Code* save_doubles_code = NULL;
Code* store_buffer_overflow_code = NULL;
if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope2(VFP2);
save_doubles_code = *save_doubles.GetCode();
store_buffer_overflow_code = *stub.GetCode();
} else {
save_doubles_code = *save_doubles.GetCode();
store_buffer_overflow_code = *stub.GetCode();
}
save_doubles_code->set_is_pregenerated(true);
store_buffer_overflow_code->set_is_pregenerated(true);
}
ISOLATE->set_fp_stubs_generated(true);
CEntryStub save_doubles(1, kSaveFPRegs);
Handle<Code> code = save_doubles.GetCode();
code->set_is_pregenerated(true);
StoreBufferOverflowStub stub(kSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
code->GetIsolate()->set_fp_stubs_generated(true);
}

View File

@ -36,7 +36,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
class TranscendentalCacheStub: public CodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@ -58,7 +58,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
};
class StoreBufferOverflowStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
class UnaryOpStub: public PlatformCodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@ -219,7 +219,7 @@ enum StringAddFlags {
};
class StringAddStub: public PlatformCodeStub {
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@ -242,7 +242,7 @@ class StringAddStub: public PlatformCodeStub {
};
class SubStringStub: public PlatformCodeStub {
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
@ -255,7 +255,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub {
class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
@ -295,7 +295,7 @@ class StringCompareStub: public PlatformCodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@ -329,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
};
class NumberToStringStub: public PlatformCodeStub {
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
@ -355,7 +355,7 @@ class NumberToStringStub: public PlatformCodeStub {
};
class RecordWriteStub: public PlatformCodeStub {
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
@ -511,7 +511,7 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
@ -570,7 +570,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public PlatformCodeStub {
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
@ -589,7 +589,7 @@ class RegExpCEntryStub: public PlatformCodeStub {
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub: public PlatformCodeStub {
class DirectCEntryStub: public CodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic {
};
class StringDictionaryLookupStub: public PlatformCodeStub {
class StringDictionaryLookupStub: public CodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };

View File

@ -73,10 +73,10 @@ UnaryMathFunction CreateExpFunction() {
{
CpuFeatures::Scope use_vfp(VFP2);
DwVfpRegister input = d0;
DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2;
DwVfpRegister double_scratch2 = d3;
DoubleRegister input = d0;
DoubleRegister result = d1;
DoubleRegister double_scratch1 = d2;
DoubleRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
@ -527,10 +527,10 @@ static MemOperand ExpConstant(int index, Register base) {
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {

View File

@ -44,10 +44,6 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
CodeGenerator() {
InitializeAstVisitor();
}
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@ -72,8 +68,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@ -98,10 +92,10 @@ class StringCharLoadGenerator : public AllStatic {
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);

View File

@ -222,7 +222,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
compiled_code_->deoptimization_data());
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@ -256,7 +256,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@ -348,7 +348,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
compiled_code_->entry() + pc_offset);
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@ -461,70 +461,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
int frame_index) {
//
// FROM TO <-fp
// | .... | | .... |
// +-------------------------+ +-------------------------+
// | JSFunction continuation | | JSFunction continuation |
// +-------------------------+ +-------------------------+<-sp
// | | saved frame (fp) |
// | +=========================+<-fp
// | | JSFunction context |
// v +-------------------------+
// | COMPILED_STUB marker | fp = saved frame
// +-------------------------+ f8 = JSFunction context
// | |
// | ... |
// | |
// +-------------------------+<-sp
//
//
int output_frame_size = 1 * kPointerSize;
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, 0);
Code* notify_miss =
isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_miss->entry()));
ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
int major_key = compiled_code_->major_key();
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptors()[major_key];
Handle<Code> miss_ic(descriptor->deoptimization_handler);
output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
unsigned input_frame_size = input_->GetFrameSize();
intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
output_frame->SetFrameSlot(0, value);
value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
output_frame->SetRegister(fp.code(), value);
output_frame->SetFp(value);
value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
output_frame->SetRegister(cp.code(), value);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
ASSERT(opcode == Translation::REGISTER);
USE(opcode);
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(r1.code(), input_value);
int32_t next = iterator->Next();
opcode = static_cast<Translation::Opcode>(next);
ASSERT(opcode == Translation::REGISTER);
input_reg = iterator->Next();
input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(r0.code(), input_value);
ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
@ -952,7 +888,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@ -972,6 +908,7 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -979,29 +916,23 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Save all VFP registers before messing with them.
DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
DwVfpRegister last =
DwVfpRegister::FromAllocationIndex(
DwVfpRegister::kMaxNumAllocatableRegisters - 1);
ASSERT(last.code() > first.code());
ASSERT((last.code() - first.code()) ==
(DwVfpRegister::kMaxNumAllocatableRegisters - 1));
// Save all VFP registers before messing with them.
DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
DwVfpRegister last =
DwVfpRegister::FromAllocationIndex(
DwVfpRegister::kNumAllocatableRegisters - 1);
ASSERT(last.code() > first.code());
ASSERT((last.code() - first.code()) ==
(DwVfpRegister::kNumAllocatableRegisters - 1));
#ifdef DEBUG
int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
for (int i = 0; i <= max; i++) {
ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
(DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
}
#endif
__ vstm(db_w, sp, first, last);
} else {
__ sub(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
(DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
}
#endif
__ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@ -1060,17 +991,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Remove the bailout id, eventually return address, and the saved registers
@ -1091,13 +1019,10 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
@ -1114,29 +1039,24 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
Label outer_push_loop, inner_push_loop;
// Outer loop state: r0 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
__ add(r1, r0, Operand(r1, LSL, 2));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r0, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
__ bind(&inner_loop_header);
__ cmp(r3, Operand(0));
__ b(ne, &inner_push_loop); // test for gt?
__ add(r0, r0, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);

View File

@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
double_register_spills_[i] = NULL;
}
}
@ -612,7 +612,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@ -1685,7 +1684,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@ -1710,7 +1708,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@ -1730,7 +1727,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@ -1968,16 +1964,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on non-VFP2 requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(VFP2) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@ -2195,17 +2182,8 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LParameter* result = new(zone()) LParameter;
if (info()->IsOptimizing()) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(result, spill_index);
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params[instr->index()];
return DefineFixed(result, reg);
}
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new(zone()) LParameter, spill_index);
}

View File

@ -254,11 +254,6 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
@ -2339,9 +2334,8 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
LOperand* double_register_spills_[
DoubleRegister::kMaxNumAllocatableRegisters];
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};

View File

@ -65,6 +65,8 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
CodeStub::GenerateFPStubs();
@ -116,38 +118,37 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
// r1: Callee's JS function.
// cp: Callee's context.
// fp: Caller's frame pointer.
// lr: Caller's pc.
// r1: Callee's JS function.
// cp: Callee's context.
// fp: Caller's frame pointer.
// lr: Caller's pc.
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
{
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
@ -158,7 +159,6 @@ bool LCodeGen::GeneratePrologue() {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
frame_is_built_ = true;
}
// Reserve space for the stack slots needed by the code.
@ -178,7 +178,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Possibly allocate a local context.
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
@ -214,7 +214,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@ -272,31 +272,10 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Deferred build frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ add(fp, sp, Operand(2 * kPointerSize));
}
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_);
__ pop(ip);
__ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
frame_is_built_ = false;
}
__ jmp(code->exit());
}
}
@ -318,68 +297,24 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
deopt_jump_table_.length() * 7)) {
deopt_jump_table_.length() * 2)) {
Abort("Generated code is too large");
}
// Block the constant pool emission during the jump table emission.
__ BlockConstPoolFor(deopt_jump_table_.length());
__ RecordComment("[ Deoptimisation jump table");
Label table_start;
__ bind(&table_start);
Label needs_frame_not_call;
Label needs_frame_is_call;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
if (deopt_jump_table_[i].needs_frame) {
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].is_lazy_deopt) {
if (needs_frame_is_call.is_bound()) {
__ b(&needs_frame_is_call);
} else {
__ bind(&needs_frame_is_call);
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ add(fp, sp, Operand(2 * kPointerSize));
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, ip);
}
} else {
if (needs_frame_not_call.is_bound()) {
__ b(&needs_frame_not_call);
} else {
__ bind(&needs_frame_not_call);
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ add(fp, sp, Operand(2 * kPointerSize));
__ mov(pc, ip);
}
}
} else {
if (deopt_jump_table_[i].is_lazy_deopt) {
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
} else {
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
}
masm()->CheckConstPool(false, false);
__ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
__ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
}
ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
deopt_jump_table_.length() * 2);
__ RecordComment("]");
// Force constant pool emission at the end of the deopt jump table to make
// sure that no constant pools are emitted after.
masm()->CheckConstPool(true, false);
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
@ -399,8 +334,8 @@ Register LCodeGen::ToRegister(int index) const {
}
DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
return DwVfpRegister::FromAllocationIndex(index);
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
return DoubleRegister::FromAllocationIndex(index);
}
@ -441,15 +376,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
}
DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch) {
DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch) {
if (op->IsDoubleRegister()) {
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
@ -585,9 +520,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation,
arguments_index,
arguments_count);
bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure();
int closure_id = has_closure_id
int closure_id = *info()->closure() != *environment->closure()
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@ -608,9 +541,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
@ -806,11 +736,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@ -826,19 +752,14 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
bool needs_lazy_deopt = info()->IsStub();
ASSERT(info()->IsStub() || frame_is_built_);
if (cc == al && !needs_lazy_deopt) {
if (cc == al) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
(deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
deopt_jump_table_.Add(table_entry, zone());
(deopt_jump_table_.last().address != entry)) {
deopt_jump_table_.Add(JumpTableEntry(entry), zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
@ -1447,7 +1368,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
CpuFeatures::Scope vfp_scope(VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
@ -1733,7 +1653,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
CpuFeatures::Scope scope(VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@ -1902,10 +1821,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
CpuFeatures::Scope scope(VFP2);
DwVfpRegister left_reg = ToDoubleRegister(left);
DwVfpRegister right_reg = ToDoubleRegister(right);
DwVfpRegister result_reg = ToDoubleRegister(instr->result());
DoubleRegister left_reg = ToDoubleRegister(left);
DoubleRegister right_reg = ToDoubleRegister(right);
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Label check_nan_left, check_zero, return_left, return_right, done;
__ VFPCompareAndSetFlags(left_reg, right_reg);
__ b(vs, &check_nan_left);
@ -1948,10 +1866,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister left = ToDoubleRegister(instr->left());
DwVfpRegister right = ToDoubleRegister(instr->right());
DwVfpRegister result = ToDoubleRegister(instr->result());
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
__ vadd(result, left, right);
@ -2039,8 +1956,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister reg = ToDoubleRegister(instr->value());
DoubleRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
@ -2125,9 +2041,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
CpuFeatures::Scope scope(VFP2);
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
@ -2205,7 +2120,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
CpuFeatures::Scope scope(VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@ -2744,21 +2658,16 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (NeedsEagerFrame()) {
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand(sp_delta));
}
if (info()->IsStub()) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
}
@ -3108,63 +3017,17 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
Register value = external_pointer;
__ ldr(value, MemOperand(scratch0(), additional_offset));
__ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
__ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
__ and_(scratch0(), scratch0(),
Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(scratch0(), Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(scratch0(), Operand(0xff));
__ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(scratch0(),
scratch0(),
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(sfpd_hi, value, Operand(kBinary32SignMask));
__ orr(sfpd_hi, sfpd_hi,
Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(sfpd_hi, sfpd_hi,
Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
__ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
} else {
__ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
__ ldr(sfpd_hi, MemOperand(scratch0(),
additional_offset + kPointerSize));
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
@ -3233,28 +3096,23 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
key = ToRegister(instr->key());
}
int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
((constant_key + instr->additional_index()) << element_size_shift);
Operand operand = key_is_constant
? Operand(((constant_key + instr->additional_index()) <<
element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements, Operand(key, LSL, shift_size));
__ add(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << element_size_shift)));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ add(elements, elements, Operand(base_offset));
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
} else {
__ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
__ ldr(sfpd_lo, MemOperand(elements, base_offset));
if (instr->hydrogen()->RequiresHoleCheck()) {
ASSERT(kPointerSize == sizeof(kHoleNanLower32));
__ cmp(sfpd_hi, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
}
@ -3690,7 +3548,6 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(VFP2);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
@ -3727,8 +3584,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@ -3753,8 +3609,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
@ -3819,18 +3674,16 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister temp = ToDoubleRegister(instr->temp());
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp());
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
@ -3849,7 +3702,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
CpuFeatures::Scope scope(VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@ -3882,7 +3734,6 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
CpuFeatures::Scope scope(VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@ -3961,11 +3812,10 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
DwVfpRegister double_scratch2 = double_scratch0();
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
DoubleRegister double_scratch2 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
@ -4251,7 +4101,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
CpuFeatures::Scope scope(VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@ -4322,7 +4171,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@ -4599,7 +4447,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@ -4617,7 +4464,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
@ -4679,49 +4525,13 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
masm->orr(hiword, scratch,
Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
masm->mov(loword, Operand(0, RelocInfo::NONE));
masm->orr(hiword, scratch,
Operand(hiword, LSL, -mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
DwVfpRegister dbl_scratch = double_scratch0();
DoubleRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
@ -4736,40 +4546,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
sfpd_lo, sfpd_hi,
scratch0(), s0);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
} else {
Label no_leading_zero, done;
__ tst(src, Operand(0x80000000));
__ b(ne, &no_leading_zero);
// Integer has one leading zeros.
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
__ b(&done);
__ bind(&no_leading_zero);
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
__ b(&done);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
if (FLAG_inline_new) {
__ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@ -4789,13 +4575,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
} else {
__ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
__ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
}
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@ -4812,7 +4592,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
DoubleRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
Register temp1 = ToRegister(instr->temp());
@ -4828,13 +4608,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
} else {
__ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
__ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
}
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
}
@ -4875,14 +4649,13 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DwVfpRegister result_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
CpuFeatures::Scope scope(VFP2);
Label load_smi, heap_number, done;
@ -4957,7 +4730,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
CpuFeatures::Scope scope(VFP2);
Register scratch3 = ToRegister(instr->temp2());
SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
@ -5049,7 +4821,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
DwVfpRegister result_reg = ToDoubleRegister(result);
DoubleRegister result_reg = ToDoubleRegister(result);
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
@ -5198,16 +4970,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatures::Scope vfp_scope(VFP2);
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
CpuFeatures::Scope scope(VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@ -5215,11 +4985,10 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
CpuFeatures::Scope scope(VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@ -5796,7 +5565,6 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt() {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();

View File

@ -61,7 +61,6 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@ -77,15 +76,6 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
@ -94,12 +84,12 @@ class LCodeGen BASE_EMBEDDED {
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DwVfpRegister ToDoubleRegister(LOperand* op) const;
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch);
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
@ -203,7 +193,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return info()->num_parameters(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@ -285,7 +275,7 @@ class LCodeGen BASE_EMBEDDED {
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@ -318,7 +308,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
DwVfpRegister result,
DoubleRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
@ -379,15 +369,11 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment);
struct JumpTableEntry {
inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
explicit inline JumpTableEntry(Address entry)
: label(),
address(entry),
needs_frame(frame),
is_lazy_deopt(is_lazy) { }
address(entry) { }
Label label;
Address address;
bool needs_frame;
bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
@ -416,7 +402,6 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@ -432,7 +417,6 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;

View File

@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
CpuFeatures::Scope scope(VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
CpuFeatures::Scope scope(VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
CpuFeatures::Scope scope(VFP2);
// ip is overwritten while saving the value to the destination.
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
@ -272,8 +267,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
CpuFeatures::Scope scope(VFP2);
DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(VFP2);
MemOperand source_operand = cgen_->ToMemOperand(source);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {

View File

@ -290,7 +290,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
@ -643,19 +643,19 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
kDoubleSize));
for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
kDoubleSize));
PopSafepointRegisters();
}
@ -691,7 +691,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@ -967,7 +967,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
@ -2717,10 +2717,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@ -3396,9 +3393,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
if (use_eabi_hardfloat()) {
// In the hard floating point calling convention, we can use
// all double registers to pass doubles.
if (num_double_arguments > DoubleRegister::NumRegisters()) {
if (num_double_arguments > DoubleRegister::kNumRegisters) {
stack_passed_words +=
2 * (num_double_arguments - DoubleRegister::NumRegisters());
2 * (num_double_arguments - DoubleRegister::kNumRegisters);
}
} else {
// In the soft floating point calling convention, every double
@ -3439,7 +3436,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
@ -3449,8 +3446,8 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
}
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
DwVfpRegister dreg2) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
@ -3468,7 +3465,7 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
}
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
@ -3751,8 +3748,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
DwVfpRegister temp_double_reg) {
DoubleRegister input_reg,
DoubleRegister temp_double_reg) {
Label above_zero;
Label done;
Label in_bounds;

View File

@ -178,7 +178,7 @@ class MacroAssembler: public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src);
void Move(DoubleRegister dst, DoubleRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
@ -1058,9 +1058,9 @@ class MacroAssembler: public Assembler {
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void SetCallCDoubleArguments(DwVfpRegister dreg);
void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
void SetCallCDoubleArguments(DoubleRegister dreg);
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@ -1076,7 +1076,7 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DwVfpRegister dst);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
@ -1289,8 +1289,8 @@ class MacroAssembler: public Assembler {
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
DwVfpRegister temp_double_reg);
DoubleRegister input_reg,
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
@ -1365,9 +1365,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class CompiledFrame;
friend class OptimizedFrame;
};

View File

@ -1053,6 +1053,42 @@ static void StoreIntAsFloat(MacroAssembler* masm,
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
#undef __
#define __ ACCESS_MASM(masm())
@ -3283,17 +3319,9 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -- r1 : receiver
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
Handle<Code> stub = KeyedLoadFastElementStub(
receiver_map->instance_type() == JS_ARRAY_TYPE,
elements_kind).GetCode();
__ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
} else {
Handle<Code> stub =
KeyedLoadDictionaryElementStub().GetCode();
__ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
}
Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
__ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
@ -3698,6 +3726,339 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss_force_generic, slow, failed_allocation;
Register key = r0;
Register receiver = r1;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r3: elements array
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(key, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &miss_force_generic);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
case EXTERNAL_DOUBLE_ELEMENTS:
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 2));
__ vldr(d0, r2, 0);
} else {
__ add(r4, r3, Operand(key, LSL, 2));
// r4: pointer to the beginning of the double we want to load.
__ ldr(r2, MemOperand(r4, 0));
__ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
// For integer array types:
// r2: value
// For float array type:
// s0: value (if VFP3 is supported)
// r2: value (if VFP3 is not supported)
// For double array type:
// d0: value (if VFP3 is supported)
// r2/r3: value (if VFP3 is not supported)
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(value, Operand(0xC0000000));
__ b(mi, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ add(r0, r5, Operand(kHeapObjectTag));
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r5, HeapNumber::kValueOffset);
__ Ret();
} else {
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't touch r0 or r1 as they are needed if allocation
// fails.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
Register dst_mantissa = r1;
Register dst_exponent = r3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
d0,
dst_mantissa,
dst_exponent,
r9,
s0);
__ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, value);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_u32(d0, s0);
__ vstr(d0, r2, HeapNumber::kValueOffset);
__ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(value, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(value, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
Register hiword = value; // r2.
Register loword = r3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, hiword, loword, r4, 1);
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ mov(r0, r4);
__ Ret();
}
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_f32(d0, s0);
__ vstr(d0, r2, HeapNumber::kValueOffset);
__ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
// r3: heap number for result
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
// the slow case from here.
__ and_(r0, value, Operand(kBinary32MantissaMask));
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to
// the slow case from here.
__ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r1, Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));
__ mov(r1, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r1,
r1,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r2, value, Operand(kBinary32SignMask));
value = no_reg;
__ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
__ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vstr(d0, r2, HeapNumber::kValueOffset);
__ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ mov(r0, r4);
__ Ret();
}
} else {
// Tag integer as smi and return it.
__ mov(r0, Operand(value, LSL, kSmiTagSize));
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
Handle<Code> stub =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(stub, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@ -4042,6 +4403,118 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
__ AssertFastElements(r2);
// Check that the key is within bounds.
__ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ cmp(r0, Operand(r3));
__ b(hs, &miss_force_generic);
// Load the result and make sure it's not the hole.
__ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ ldr(r4,
MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r4, ip);
__ b(eq, &miss_force_generic);
__ mov(r0, r4);
__ Ret();
__ bind(&miss_force_generic);
Handle<Code> stub =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(stub, RelocInfo::CODE_TARGET);
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
Register key_reg = r0;
Register receiver_reg = r1;
Register elements_reg = r2;
Register heap_number_reg = r2;
Register indexed_double_offset = r3;
Register scratch = r4;
Register scratch2 = r5;
Register scratch3 = r6;
Register heap_number_map = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array.
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
// Check that the key is within bounds.
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ cmp(key_reg, Operand(scratch));
__ b(hs, &miss_force_generic);
// Load the upper word of the double in the fixed array and test for NaN.
__ add(indexed_double_offset, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
__ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
__ cmp(scratch, Operand(kHoleNanUpper32));
__ b(&miss_force_generic, eq);
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
__ str(scratch, FieldMemOperand(heap_number_reg,
HeapNumber::kExponentOffset));
__ ldr(scratch, FieldMemOperand(indexed_double_offset,
FixedArray::kHeaderSize));
__ str(scratch, FieldMemOperand(heap_number_reg,
HeapNumber::kMantissaOffset));
__ mov(r0, heap_number_reg);
__ Ret();
__ bind(&slow_allocate_heapnumber);
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ Jump(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,

View File

@ -1375,11 +1375,6 @@ ExternalReference ExternalReference::page_flags(Page* page) {
}
ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
return ExternalReference(entry);
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry

View File

@ -736,8 +736,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT

View File

@ -616,6 +616,14 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true;
StackLimitCheck check(isolate_);
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));

View File

@ -2492,53 +2492,42 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
class AstVisitor BASE_EMBEDDED {
public:
AstVisitor() {}
AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
virtual ~AstVisitor() { }
// Stack overflow check and dynamic dispatch.
virtual void Visit(AstNode* node) = 0;
void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
// Stack overflow tracking support.
bool HasStackOverflow() const { return stack_overflow_; }
bool CheckStackOverflow();
// If a stack-overflow exception is encountered when visiting a
// node, calling SetStackOverflow will make sure that the visitor
// bails out without visiting more nodes.
void SetStackOverflow() { stack_overflow_ = true; }
void ClearStackOverflow() { stack_overflow_ = false; }
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
protected:
Isolate* isolate() { return isolate_; }
private:
Isolate* isolate_;
bool stack_overflow_;
};
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
virtual void Visit(AstNode* node) { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
void SetStackOverflow() { stack_overflow_ = true; } \
void ClearStackOverflow() { stack_overflow_ = false; } \
bool HasStackOverflow() const { return stack_overflow_; } \
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
StackLimitCheck check(isolate_); \
if (!check.HasOverflowed()) return false; \
return (stack_overflow_ = true); \
} \
\
private: \
void InitializeAstVisitor() { \
isolate_ = Isolate::Current(); \
stack_overflow_ = false; \
} \
Isolate* isolate() { return isolate_; } \
\
Isolate* isolate_; \
bool stack_overflow_
// ----------------------------------------------------------------------------
// Construction time visitor.

View File

@ -107,8 +107,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyICMiss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
@ -388,7 +386,6 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyICMiss(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);

View File

@ -1,137 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "code-stubs.h"
#include "hydrogen.h"
#include "lithium.h"
namespace v8 {
namespace internal {
Handle<Code> HydrogenCodeStub::CodeFromGraph(HGraph* graph) {
graph->OrderBlocks();
graph->AssignDominators();
graph->CollectPhis();
graph->InsertRepresentationChanges();
graph->EliminateRedundantBoundsChecks();
LChunk* chunk = LChunk::NewChunk(graph);
ASSERT(chunk != NULL);
Handle<Code> stub = chunk->Codegen(Code::COMPILED_STUB);
return stub;
}
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
: HGraphBuilder(&info_), info_(stub, isolate) {}
virtual bool BuildGraph();
protected:
virtual void BuildCodeStub() = 0;
HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
HydrogenCodeStub* stub() { return info_.code_stub(); }
private:
SmartArrayPointer<HParameter*> parameters_;
CompilationInfoWithZone info_;
};
bool CodeStubGraphBuilderBase::BuildGraph() {
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling stub using hydrogen\n");
HTracer::Instance()->TraceCompilation(&info_);
}
HBasicBlock* next_block = graph()->CreateBasicBlock();
next_block->SetInitialEnvironment(graph()->start_environment());
HGoto* jump = new(zone()) HGoto(next_block);
graph()->entry_block()->Finish(jump);
set_current_block(next_block);
int major_key = stub()->MajorKey();
CodeStubInterfaceDescriptor** descriptors =
info_.isolate()->code_stub_interface_descriptors();
if (descriptors[major_key] == NULL) {
descriptors[major_key] = stub()->GetInterfaceDescriptor(info_.isolate());
}
CodeStubInterfaceDescriptor* descriptor = descriptors[major_key];
parameters_.Reset(new HParameter*[descriptor->number_of_register_params]);
HGraph* graph = this->graph();
Zone* zone = this->zone();
for (int i = 0; i < descriptor->number_of_register_params; ++i) {
HParameter* param = new(zone) HParameter(i);
AddInstruction(param);
graph->start_environment()->Push(param);
parameters_[i] = param;
}
AddSimulate(BailoutId::StubEntry());
BuildCodeStub();
return true;
}
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
explicit CodeStubGraphBuilder(Stub* stub)
: CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
protected:
virtual void BuildCodeStub();
Stub* casted_stub() { return static_cast<Stub*>(stub()); }
};
template <>
void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
Zone* zone = this->zone();
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
AddInstruction(load);
HReturn* ret = new(zone) HReturn(load);
current_block()->Finish(ret);
}
Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
return CodeFromGraph(builder.CreateGraph());
}
} } // namespace v8::internal

View File

@ -48,6 +48,20 @@ bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
}
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(masm, false);
// Generate the code for the stub.
masm->set_generating_stub(true);
NoCurrentFrameScope scope(masm);
Generate(masm);
}
SmartArrayPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@ -58,7 +72,8 @@ SmartArrayPointer<const char> CodeStub::GetName() {
}
void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@ -72,39 +87,6 @@ int CodeStub::GetCodeKind() {
}
Handle<Code> PlatformCodeStub::GenerateCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
// Generate the new code.
MacroAssembler masm(isolate, NULL, 256);
{
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(&masm, false);
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
Generate(&masm);
}
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()), GetICState());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
}
Handle<Code> CodeStub::GetCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
@ -120,10 +102,23 @@ Handle<Code> CodeStub::GetCode() {
{
HandleScope scope(isolate);
Handle<Code> new_object = GenerateCode();
// Generate the new code.
MacroAssembler masm(isolate, NULL, 256);
GenerateCode(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
GetICState());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
new_object->set_major_key(MajorKey());
FinishCode(new_object);
RecordCodeGeneration(*new_object, isolate);
RecordCodeGeneration(*new_object, &masm);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@ -421,8 +416,36 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
break;
case DICTIONARY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}

View File

@ -162,29 +162,20 @@ class CodeStub BASE_EMBEDDED {
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out, Isolate* isolate);
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
protected:
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode() = 0;
// BinaryOpStub needs to override this.
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
private:
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
void GenerateCode(MacroAssembler* masm);
// Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0;
// Perform bookkeeping required after code generation when stub code is
// initially generated.
void RecordCodeGeneration(Code* code, Isolate* isolate);
void RecordCodeGeneration(Code* code, MacroAssembler* masm);
// Finish the code object after it has been generated.
virtual void FinishCode(Handle<Code> code) { }
@ -193,9 +184,18 @@ class CodeStub BASE_EMBEDDED {
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
// BinaryOpStub needs to override this.
virtual int GetCodeKind();
// BinaryOpStub needs to override this.
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
// roots object, otherwise we will remove the code during GC.
@ -213,6 +213,10 @@ class CodeStub BASE_EMBEDDED {
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@ -228,43 +232,6 @@ class CodeStub BASE_EMBEDDED {
};
class PlatformCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode();
virtual int GetCodeKind() { return Code::STUB; }
protected:
// Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0;
};
struct CodeStubInterfaceDescriptor {
int number_of_register_params;
Register* register_params;
Handle<Code> deoptimization_handler;
};
class HGraph;
struct Register;
class HydrogenCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode() = 0;
virtual int GetCodeKind() { return Code::COMPILED_STUB; }
virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor(
Isolate* isolate) = 0;
protected:
Handle<Code> CodeFromGraph(HGraph* graph);
};
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
@ -322,7 +289,7 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
class StackCheckStub : public PlatformCodeStub {
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }
@ -334,7 +301,7 @@ class StackCheckStub : public PlatformCodeStub {
};
class InterruptStub : public PlatformCodeStub {
class InterruptStub : public CodeStub {
public:
InterruptStub() { }
@ -346,7 +313,7 @@ class InterruptStub : public PlatformCodeStub {
};
class ToNumberStub: public PlatformCodeStub {
class ToNumberStub: public CodeStub {
public:
ToNumberStub() { }
@ -358,7 +325,7 @@ class ToNumberStub: public PlatformCodeStub {
};
class FastNewClosureStub : public PlatformCodeStub {
class FastNewClosureStub : public CodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode)
: language_mode_(language_mode) { }
@ -374,7 +341,7 @@ class FastNewClosureStub : public PlatformCodeStub {
};
class FastNewContextStub : public PlatformCodeStub {
class FastNewContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
@ -392,7 +359,7 @@ class FastNewContextStub : public PlatformCodeStub {
};
class FastNewBlockContextStub : public PlatformCodeStub {
class FastNewBlockContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
@ -410,7 +377,7 @@ class FastNewBlockContextStub : public PlatformCodeStub {
};
class FastCloneShallowArrayStub : public PlatformCodeStub {
class FastCloneShallowArrayStub : public CodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
@ -443,7 +410,7 @@ class FastCloneShallowArrayStub : public PlatformCodeStub {
};
class FastCloneShallowObjectStub : public PlatformCodeStub {
class FastCloneShallowObjectStub : public CodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
@ -463,7 +430,7 @@ class FastCloneShallowObjectStub : public PlatformCodeStub {
};
class InstanceofStub: public PlatformCodeStub {
class InstanceofStub: public CodeStub {
public:
enum Flags {
kNoFlags = 0,
@ -501,7 +468,7 @@ class InstanceofStub: public PlatformCodeStub {
};
class MathPowStub: public PlatformCodeStub {
class MathPowStub: public CodeStub {
public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
@ -517,7 +484,7 @@ class MathPowStub: public PlatformCodeStub {
};
class BinaryOpStub: public PlatformCodeStub {
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
@ -633,7 +600,7 @@ class BinaryOpStub: public PlatformCodeStub {
};
class ICCompareStub: public PlatformCodeStub {
class ICCompareStub: public CodeStub {
public:
ICCompareStub(Token::Value op,
CompareIC::State left,
@ -699,7 +666,7 @@ class ICCompareStub: public PlatformCodeStub {
};
class CEntryStub : public PlatformCodeStub {
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@ -733,7 +700,7 @@ class CEntryStub : public PlatformCodeStub {
};
class JSEntryStub : public PlatformCodeStub {
class JSEntryStub : public CodeStub {
public:
JSEntryStub() { }
@ -767,7 +734,7 @@ class JSConstructEntryStub : public JSEntryStub {
};
class ArgumentsAccessStub: public PlatformCodeStub {
class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
READ_ELEMENT,
@ -794,7 +761,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
};
class RegExpExecStub: public PlatformCodeStub {
class RegExpExecStub: public CodeStub {
public:
RegExpExecStub() { }
@ -806,7 +773,7 @@ class RegExpExecStub: public PlatformCodeStub {
};
class RegExpConstructResultStub: public PlatformCodeStub {
class RegExpConstructResultStub: public CodeStub {
public:
RegExpConstructResultStub() { }
@ -818,7 +785,7 @@ class RegExpConstructResultStub: public PlatformCodeStub {
};
class CallFunctionStub: public PlatformCodeStub {
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, CallFunctionFlags flags)
: argc_(argc), flags_(flags) { }
@ -859,7 +826,7 @@ class CallFunctionStub: public PlatformCodeStub {
};
class CallConstructStub: public PlatformCodeStub {
class CallConstructStub: public CodeStub {
public:
explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
@ -1050,53 +1017,25 @@ class AllowStubCallsScope {
};
class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
class KeyedLoadElementStub : public CodeStub {
public:
KeyedLoadDictionaryElementStub() {}
explicit KeyedLoadElementStub(ElementsKind elements_kind)
: elements_kind_(elements_kind)
{ }
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return DICTIONARY_ELEMENTS; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
private:
DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
ElementsKind elements_kind_;
DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
};
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array);
}
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return bit_field_; }
bool is_js_array() const {
return IsJSArrayBits::decode(bit_field_);
}
ElementsKind elements_kind() const {
return ElementsKindBits::decode(bit_field_);
}
virtual Handle<Code> GenerateCode();
virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor(
Isolate* isolate);
private:
class IsJSArrayBits: public BitField<bool, 8, 1> {};
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
};
class KeyedStoreElementStub : public PlatformCodeStub {
class KeyedStoreElementStub : public CodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
ElementsKind elements_kind,
@ -1131,7 +1070,7 @@ class KeyedStoreElementStub : public PlatformCodeStub {
};
class ToBooleanStub: public PlatformCodeStub {
class ToBooleanStub: public CodeStub {
public:
enum Type {
UNDEFINED,
@ -1201,7 +1140,7 @@ class ToBooleanStub: public PlatformCodeStub {
};
class ElementsTransitionAndStoreStub : public PlatformCodeStub {
class ElementsTransitionAndStoreStub : public CodeStub {
public:
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to,
@ -1242,7 +1181,7 @@ class ElementsTransitionAndStoreStub : public PlatformCodeStub {
};
class StoreArrayLiteralElementStub : public PlatformCodeStub {
class StoreArrayLiteralElementStub : public CodeStub {
public:
StoreArrayLiteralElementStub()
: fp_registers_(CanUseFPRegisters()) { }
@ -1261,7 +1200,7 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub {
};
class ProfileEntryHookStub : public PlatformCodeStub {
class ProfileEntryHookStub : public CodeStub {
public:
explicit ProfileEntryHookStub() {}

View File

@ -121,21 +121,19 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
if (code->kind() != Code::COMPILED_STUB) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
StringInputBuffer stream(String::cast(script->source()));
stream.Seek(function->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
int source_len =
function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.has_more()) PrintF("%c", stream.GetNext());
}
PrintF("\n\n");
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
StringInputBuffer stream(String::cast(script->source()));
stream.Seek(function->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
int source_len =
function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.has_more()) PrintF("%c", stream.GetNext());
}
PrintF("\n\n");
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
@ -147,12 +145,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
code->Disassemble(CodeStub::MajorName(major_key, false));
} else {
code->Disassemble(*function->debug_name()->ToCString());
}
code->Disassemble(*function->debug_name()->ToCString());
}
#endif // ENABLE_DISASSEMBLER
}

View File

@ -55,7 +55,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()) {
Initialize(script->GetIsolate(), BASE, zone);
Initialize(zone);
}
@ -65,7 +65,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()) {
Initialize(script_->GetIsolate(), BASE, zone);
Initialize(zone);
}
@ -76,22 +76,12 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()) {
Initialize(script_->GetIsolate(), BASE, zone);
Initialize(zone);
}
CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Isolate* isolate, Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
isolate_ = isolate;
void CompilationInfo::Initialize(Zone* zone) {
isolate_ = script_->GetIsolate();
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
@ -99,13 +89,8 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
pre_parse_data_ = NULL;
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
if (mode == STUB) {
mode_ = STUB;
return;
}
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
@ -122,33 +107,6 @@ CompilationInfo::~CompilationInfo() {
}
int CompilationInfo::num_parameters() const {
if (IsStub()) {
return 0;
} else {
return scope()->num_parameters();
}
}
int CompilationInfo::num_heap_slots() const {
if (IsStub()) {
return 0;
} else {
return scope()->num_heap_slots();
}
}
Code::Flags CompilationInfo::flags() const {
if (IsStub()) {
return Code::ComputeFlags(Code::COMPILED_STUB);
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
}
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@ -359,13 +317,13 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
HTracer::Instance()->TraceCompilation(info());
HTracer::Instance()->TraceCompilation(info()->function());
}
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@ -418,7 +376,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL);
Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
return AbortOptimization();

View File

@ -38,7 +38,6 @@ namespace internal {
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl;
class HydrogenCodeStub;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
@ -47,7 +46,6 @@ class CompilationInfo {
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
@ -74,14 +72,10 @@ class CompilationInfo {
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
HydrogenCodeStub* code_stub() {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
int num_parameters() const;
int num_heap_slots() const;
Code::Flags flags() const;
void MarkAsEval() {
ASSERT(!is_lazy());
@ -104,31 +98,9 @@ class CompilationInfo {
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
bool is_native() const {
return IsNative::decode(flags_);
}
bool is_calling() const {
return is_deferred_calling() || is_non_deferred_calling();
}
void MarkAsDeferredCalling() {
flags_ |= IsDeferredCalling::encode(true);
}
bool is_deferred_calling() const {
return IsDeferredCalling::decode(flags_);
}
void MarkAsNonDeferredCalling() {
flags_ |= IsNonDeferredCalling::encode(true);
}
bool is_non_deferred_calling() const {
return IsNonDeferredCalling::decode(flags_);
}
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@ -179,7 +151,6 @@ class CompilationInfo {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
@ -238,11 +209,10 @@ class CompilationInfo {
enum Mode {
BASE,
OPTIMIZE,
NONOPT,
STUB
NONOPT
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void Initialize(Zone* zone);
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
@ -268,12 +238,6 @@ class CompilationInfo {
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
// If the compiled code contains calls that require building a frame
class IsCalling: public BitField<bool, 9, 1> {};
// If the compiled code contains calls that require building a frame
class IsDeferredCalling: public BitField<bool, 10, 1> {};
// If the compiled code contains calls that require building a frame
class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
unsigned flags_;
@ -286,8 +250,6 @@ class CompilationInfo {
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
// For compiled stubs, the stub object
HydrogenCodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@ -348,10 +310,6 @@ class CompilationInfoWithZone: public CompilationInfo {
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_),
zone_(isolate),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
@ -377,7 +335,7 @@ class CompilationHandleScope BASE_EMBEDDED {
class HGraph;
class HOptimizedGraphBuilder;
class HGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@ -419,7 +377,7 @@ class OptimizingCompiler: public ZoneObject {
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
HOptimizedGraphBuilder* graph_builder_;
HGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;

View File

@ -410,24 +410,17 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
// For COMPILED_STUBs called from builtins, the function pointer
// is a SMI indicating an internal frame.
if (function->IsSmi()) {
function = NULL;
}
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
}
function->shared()->increment_deopt_count();
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
compiled_code_ = function_->code();
optimized_code_ = function_->code();
if (FLAG_trace_deopt && FLAG_code_comments) {
// Print instruction associated with this bailout.
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(compiled_code_, mask); !it.done(); it.next()) {
for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
@ -443,22 +436,18 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
}
}
} else if (type == LAZY) {
compiled_code_ = FindDeoptimizingCodeFromAddress(from);
if (compiled_code_ == NULL) {
compiled_code_ =
static_cast<Code*>(isolate->heap()->FindCodeObject(from));
}
ASSERT(compiled_code_ != NULL);
optimized_code_ = FindDeoptimizingCodeFromAddress(from);
ASSERT(optimized_code_ != NULL);
} else if (type == OSR) {
// The function has already been optimized and we're transitioning
// from the unoptimized shared version to the optimized one in the
// function. The return address (from) points to unoptimized code.
compiled_code_ = function_->code();
ASSERT(compiled_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!compiled_code_->contains(from));
optimized_code_ = function_->code();
ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!optimized_code_->contains(from));
} else if (type == DEBUGGER) {
compiled_code_ = optimized_code;
ASSERT(compiled_code_->contains(from));
optimized_code_ = optimized_code;
ASSERT(optimized_code_->contains(from));
}
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
@ -584,7 +573,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@ -629,9 +618,6 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::SETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, true);
break;
case Translation::COMPILED_STUB_FRAME:
DoCompiledStubFrame(&iterator, i);
break;
case Translation::BEGIN:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
@ -644,7 +630,6 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
case Translation::DUPLICATE:
default:
UNREACHABLE();
break;
}
@ -824,7 +809,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@ -1133,7 +1117,6 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@ -1354,9 +1337,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// environment at the OSR entry. The code for that his built into
// the DoComputeOsrOutputFrame function for now.
} else {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
? 0 : ComputeOutgoingArgumentSize();
unsigned stack_slots = optimized_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
#endif
@ -1375,10 +1357,6 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
if (function->IsSmi()) {
ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
return 0;
}
unsigned arguments = function->shared()->formal_parameter_count() + 1;
return arguments * kPointerSize;
}
@ -1386,7 +1364,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
compiled_code_->deoptimization_data());
optimized_code_->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
return height * kPointerSize;
}
@ -1394,7 +1372,7 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
Object* Deoptimizer::ComputeLiteral(int index) const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
compiled_code_->deoptimization_data());
optimized_code_->deoptimization_data());
FixedArray* literals = data->LiteralArray();
return literals->get(index);
}
@ -1425,6 +1403,8 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
ASSERT(!Serializer::enabled());
ASSERT(type == EAGER || type == LAZY);
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
int entry_count = (type == EAGER)
@ -1439,6 +1419,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
VirtualMemory* memory = type == EAGER
? data->eager_deoptimization_entry_code_
@ -1700,11 +1681,6 @@ void Translation::BeginJSFrame(BailoutId node_id,
}
void Translation::BeginCompiledStubFrame() {
buffer_->Add(COMPILED_STUB_FRAME, zone());
}
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@ -1786,7 +1762,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
case COMPILED_STUB_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
@ -1817,8 +1792,6 @@ const char* Translation::StringFor(Opcode opcode) {
return "GETTER_STUB_FRAME";
case SETTER_STUB_FRAME:
return "SETTER_STUB_FRAME";
case COMPILED_STUB_FRAME:
return "COMPILED_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
@ -1926,10 +1899,6 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
int literal_index = iterator->Next();
return SlotRef(data->LiteralArray()->get(literal_index));
}
case Translation::COMPILED_STUB_FRAME:
UNREACHABLE();
break;
}
UNREACHABLE();

View File

@ -135,8 +135,6 @@ class Deoptimizer : public Malloced {
int output_count() const { return output_count_; }
Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@ -299,9 +297,6 @@ class Deoptimizer : public Malloced {
static size_t GetMaxDeoptTableSize();
static void EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id);
private:
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
@ -325,8 +320,6 @@ class Deoptimizer : public Malloced {
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
void DoCompiledStubFrame(TranslationIterator* iterator,
int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@ -349,6 +342,8 @@ class Deoptimizer : public Malloced {
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
static void EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@ -365,7 +360,7 @@ class Deoptimizer : public Malloced {
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
Code* optimized_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
@ -535,7 +530,7 @@ class FrameDescription {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
double double_registers_[DoubleRegister::kNumAllocatableRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
@ -605,7 +600,6 @@ class Translation BASE_EMBEDDED {
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
COMPILED_STUB_FRAME,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@ -636,7 +630,6 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginCompiledStubFrame();
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);

View File

@ -287,12 +287,7 @@ static int DecodeIt(FILE* f,
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
} else {
out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
}
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
@ -327,8 +322,7 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::COMPILED_STUB)
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.

View File

@ -235,18 +235,8 @@ inline Object* JavaScriptFrame::function() const {
}
inline CompiledFrame::CompiledFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
inline StubFrame::StubFrame(StackFrameIterator* iterator)
: CompiledFrame(iterator) {
}
inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
: CompiledFrame(iterator) {
: JavaScriptFrame(iterator) {
}

View File

@ -617,7 +617,7 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
}
void CompiledFrame::Iterate(ObjectVisitor* v) const {
void OptimizedFrame::Iterate(ObjectVisitor* v) const {
#ifdef DEBUG
// Make sure that optimized frames do not contain any stack handlers.
StackHandlerIterator it(this, top_handler());
@ -649,7 +649,7 @@ void CompiledFrame::Iterate(ObjectVisitor* v) const {
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
parameters_base += DoubleRegister::NumAllocatableRegisters() *
parameters_base += DoubleRegister::kNumAllocatableRegisters *
kDoubleSize / kPointerSize;
}
@ -681,24 +681,14 @@ void CompiledFrame::Iterate(ObjectVisitor* v) const {
}
}
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), code);
}
void StubFrame::Iterate(ObjectVisitor* v) const {
CompiledFrame::Iterate(v);
}
void OptimizedFrame::Iterate(ObjectVisitor* v) const {
CompiledFrame::Iterate(v);
// Visit the context and the function.
Object** fixed_base = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset);
Object** fixed_limit = &Memory::Object_at(fp());
v->VisitPointers(fixed_base, fixed_limit);
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), code);
}

View File

@ -136,7 +136,6 @@ class StackHandler BASE_EMBEDDED {
V(EXIT, ExitFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
V(STUB, StubFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@ -556,33 +555,7 @@ class JavaScriptFrame: public StandardFrame {
};
class CompiledFrame : public JavaScriptFrame {
public:
virtual Type type() const = 0;
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
protected:
inline explicit CompiledFrame(StackFrameIterator* iterator);
};
class StubFrame : public CompiledFrame {
public:
virtual Type type() const { return STUB; }
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
protected:
inline explicit StubFrame(StackFrameIterator* iterator);
friend class StackFrameIterator;
};
class OptimizedFrame : public CompiledFrame {
class OptimizedFrame : public JavaScriptFrame {
public:
virtual Type type() const { return OPTIMIZED; }

View File

@ -398,7 +398,6 @@ void FullCodeGenerator::Initialize() {
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
InitializeAstVisitor();
}

View File

@ -48,9 +48,7 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
BreakableStatementChecker() : is_breakable_(false) {
InitializeAstVisitor();
}
BreakableStatementChecker() : is_breakable_(false) {}
void Check(Statement* stmt);
void Check(Expression* stmt);
@ -65,7 +63,6 @@ class BreakableStatementChecker: public AstVisitor {
bool is_breakable_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
};
@ -827,7 +824,6 @@ class FullCodeGenerator: public AstVisitor {
friend class NestedStatement;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
};

File diff suppressed because it is too large Load Diff

View File

@ -429,8 +429,7 @@ enum FrameType {
JS_CONSTRUCT,
JS_GETTER,
JS_SETTER,
ARGUMENTS_ADAPTOR,
STUB
ARGUMENTS_ADAPTOR
};
@ -441,8 +440,6 @@ class HEnvironment: public ZoneObject {
Handle<JSFunction> closure,
Zone* zone);
explicit HEnvironment(Zone* zone);
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
}
@ -639,7 +636,7 @@ class HInferRepresentation BASE_EMBEDDED {
};
class HOptimizedGraphBuilder;
class HGraphBuilder;
enum ArgumentsAllowedFlag {
ARGUMENTS_NOT_ALLOWED,
@ -675,10 +672,10 @@ class AstContext {
bool is_for_typeof() { return for_typeof_; }
protected:
AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
AstContext(HGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
HOptimizedGraphBuilder* owner() const { return owner_; }
HGraphBuilder* owner() const { return owner_; }
inline Zone* zone() const;
@ -689,7 +686,7 @@ class AstContext {
#endif
private:
HOptimizedGraphBuilder* owner_;
HGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
bool for_typeof_;
@ -698,7 +695,7 @@ class AstContext {
class EffectContext: public AstContext {
public:
explicit EffectContext(HOptimizedGraphBuilder* owner)
explicit EffectContext(HGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
@ -711,7 +708,7 @@ class EffectContext: public AstContext {
class ValueContext: public AstContext {
public:
ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
@ -729,7 +726,7 @@ class ValueContext: public AstContext {
class TestContext: public AstContext {
public:
TestContext(HOptimizedGraphBuilder* owner,
TestContext(HGraphBuilder* owner,
Expression* condition,
TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
@ -769,7 +766,7 @@ class TestContext: public AstContext {
class FunctionState {
public:
FunctionState(HOptimizedGraphBuilder* owner,
FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
@ -799,7 +796,7 @@ class FunctionState {
bool arguments_pushed() { return arguments_elements() != NULL; }
private:
HOptimizedGraphBuilder* owner_;
HGraphBuilder* owner_;
CompilationInfo* compilation_info_;
TypeFeedbackOracle* oracle_;
@ -831,65 +828,7 @@ class FunctionState {
};
class HGraphBuilder {
public:
explicit HGraphBuilder(CompilationInfo* info)
: info_(info), graph_(NULL), current_block_(NULL) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
void set_current_block(HBasicBlock* block) { current_block_ = block; }
HEnvironment* environment() const {
return current_block()->last_environment();
}
Zone* zone() const { return info_->zone(); }
HGraph* graph() { return graph_; }
HGraph* CreateGraph();
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(BailoutId id,
RemovableSimulate removable = FIXED_SIMULATE);
protected:
virtual bool BuildGraph() = 0;
// Building common constructs
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildFastElementAccess(
HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* object,
HValue* key,
HValue* val,
HCheckMaps* mapcheck,
bool is_js_array,
ElementsKind elements_kind,
bool is_store);
private:
HGraphBuilder();
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
};
class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
class HGraphBuilder: public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
@ -925,8 +864,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// structures mirroring BreakableStatement nesting.
class BreakAndContinueScope BASE_EMBEDDED {
public:
BreakAndContinueScope(BreakAndContinueInfo* info,
HOptimizedGraphBuilder* owner)
BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
: info_(info), owner_(owner), next_(owner->break_scope()) {
owner->set_break_scope(this);
}
@ -934,7 +872,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
~BreakAndContinueScope() { owner_->set_break_scope(next_); }
BreakAndContinueInfo* info() { return info_; }
HOptimizedGraphBuilder* owner() { return owner_; }
HGraphBuilder* owner() { return owner_; }
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
@ -942,20 +880,32 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
private:
BreakAndContinueInfo* info_;
HOptimizedGraphBuilder* owner_;
HGraphBuilder* owner_;
BreakAndContinueScope* next_;
};
HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
virtual bool BuildGraph();
HGraph* CreateGraph();
// Simple accessors.
HGraph* graph() const { return graph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
HBasicBlock* current_block() const { return current_block_; }
void set_current_block(HBasicBlock* block) { current_block_ = block; }
HEnvironment* environment() const {
return current_block()->last_environment();
}
bool inline_bailout() { return inline_bailout_; }
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(BailoutId ast_id,
RemovableSimulate removable = FIXED_SIMULATE);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
@ -978,12 +928,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
// Type of a member function that generates inline code for a native function.
typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
(CallRuntime* call);
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
@ -1192,14 +1139,25 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
HValue* key);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps);
HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HCheckMaps* mapcheck,
Handle<Map> map,
bool is_store);
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@ -1239,6 +1197,14 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Handle<String> name,
Property* expr,
Handle<Map> map);
HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store);
void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);
@ -1280,6 +1246,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue** operand,
HValue** shift_amount);
Zone* zone() const { return zone_; }
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@ -1293,16 +1261,20 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// A stack of breakable statements entered.
BreakAndContinueScope* break_scope_;
HGraph* graph_;
HBasicBlock* current_block_;
int inlined_count_;
ZoneList<Handle<Object> > globals_;
Zone* zone_;
bool inline_bailout_;
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
friend class KeyedLoadFastElementStub;
DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
@ -1475,7 +1447,7 @@ class HPhase BASE_EMBEDDED {
class HTracer: public Malloced {
public:
void TraceCompilation(CompilationInfo* info);
void TraceCompilation(FunctionLiteral* function);
void TraceHydrogen(const char* name, HGraph* graph);
void TraceLithium(const char* name, LChunk* chunk);
void TraceLiveRanges(const char* name, LAllocator* allocator);

View File

@ -55,33 +55,6 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
int IntelDoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumAllocatableRegisters;
} else {
return X87TopOfStackRegister::kNumAllocatableRegisters;
}
}
int IntelDoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumRegisters;
} else {
return X87TopOfStackRegister::kNumRegisters;
}
}
const char* IntelDoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::AllocationIndexToString(index);
} else {
return X87TopOfStackRegister::AllocationIndexToString(index);
}
}
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
@ -2226,8 +2199,7 @@ void Assembler::prefetch(const Operand& src, int level) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x18);
// Emit hint number in Reg position of RegR/M.
XMMRegister code = XMMRegister(level);
XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
emit_sse_operand(code, src);
}

View File

@ -65,10 +65,7 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
static const int kMaxNumAllocatableRegisters = 6;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
static const int kNumAllocatableRegisters = 6;
static const int kNumRegisters = 8;
static inline const char* AllocationIndexToString(int index);
@ -122,7 +119,7 @@ const Register no_reg = { kRegister_no_reg_Code };
inline const char* Register::AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
@ -136,58 +133,22 @@ inline int Register::ToAllocationIndex(Register reg) {
inline Register Register::FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
struct IntelDoubleRegister {
static const int kMaxNumAllocatableRegisters = 7;
explicit IntelDoubleRegister(int code) { code_ = code; }
static int NumAllocatableRegisters();
static int NumRegisters();
static const char* AllocationIndexToString(int index);
struct XMMRegister {
static const int kNumAllocatableRegisters = 7;
static const int kNumRegisters = 8;
static int ToAllocationIndex(IntelDoubleRegister reg) {
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
static IntelDoubleRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
return from_code(index + 1);
}
static IntelDoubleRegister from_code(int code) {
return IntelDoubleRegister(code);
}
bool is_valid() const {
return 0 <= code_ && code_ < NumRegisters();
}
int code() const {
ASSERT(is_valid());
return code_;
}
int code_;
};
struct XMMRegister : IntelDoubleRegister {
static const int kNumAllocatableRegisters = 7;
static const int kNumRegisters = 8;
explicit XMMRegister(int code) : IntelDoubleRegister(code) {}
static XMMRegister from_code(int code) {
XMMRegister r = XMMRegister(code);
return r;
}
bool is(XMMRegister reg) const { return code_ == reg.code_; }
static XMMRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index + 1);
}
@ -204,46 +165,34 @@ struct XMMRegister : IntelDoubleRegister {
};
return names[index];
}
static XMMRegister from_code(int code) {
XMMRegister r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int code_;
};
const XMMRegister xmm0 = XMMRegister(0);
const XMMRegister xmm1 = XMMRegister(1);
const XMMRegister xmm2 = XMMRegister(2);
const XMMRegister xmm3 = XMMRegister(3);
const XMMRegister xmm4 = XMMRegister(4);
const XMMRegister xmm5 = XMMRegister(5);
const XMMRegister xmm6 = XMMRegister(6);
const XMMRegister xmm7 = XMMRegister(7);
const XMMRegister xmm0 = { 0 };
const XMMRegister xmm1 = { 1 };
const XMMRegister xmm2 = { 2 };
const XMMRegister xmm3 = { 3 };
const XMMRegister xmm4 = { 4 };
const XMMRegister xmm5 = { 5 };
const XMMRegister xmm6 = { 6 };
const XMMRegister xmm7 = { 7 };
struct X87TopOfStackRegister : IntelDoubleRegister {
static const int kNumAllocatableRegisters = 1;
static const int kNumRegisters = 1;
explicit X87TopOfStackRegister(int code)
: IntelDoubleRegister(code) {}
bool is(X87TopOfStackRegister reg) const {
return code_ == reg.code_;
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"st0",
};
return names[index];
}
static int ToAllocationIndex(X87TopOfStackRegister reg) {
ASSERT(reg.code() == 0);
return 0;
}
};
const X87TopOfStackRegister x87tos = X87TopOfStackRegister(0);
typedef IntelDoubleRegister DoubleRegister;
typedef XMMRegister DoubleRegister;
enum Condition {

View File

@ -574,25 +574,6 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
__ CallRuntime(Runtime::kNotifyICMiss, 0);
__ popad();
// Tear down internal frame.
}
__ pop(MemOperand(esp, 0)); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{

View File

@ -40,24 +40,6 @@
namespace v8 {
namespace internal {
CodeStubInterfaceDescriptor*
KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
static CodeStubInterfaceDescriptor* result = NULL;
if (result == NULL) {
Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
static Register registers[] = { edx, ecx };
static CodeStubInterfaceDescriptor info = {
2,
registers,
miss
};
result = &info;
}
return result;
}
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
@ -2444,7 +2426,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
} else { // UNTAGGED.
CpuFeatures::Scope scope(SSE2);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
@ -2517,7 +2498,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fstp(0);
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@ -2530,7 +2510,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
CpuFeatures::Scope scope(SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
@ -2545,7 +2524,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
@ -2578,7 +2556,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
CpuFeatures::Scope scope(SSE2);
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@ -4831,17 +4808,10 @@ void CodeStub::GenerateStubsAheadOfTime() {
void CodeStub::GenerateFPStubs() {
if (CpuFeatures::IsSupported(SSE2)) {
CEntryStub save_doubles(1, kSaveFPRegs);
// Stubs might already be in the snapshot, detect that and don't regenerate,
// which would lead to code stub initialization state being messed up.
Code* save_doubles_code;
if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
save_doubles_code = *(save_doubles.GetCode());
}
save_doubles_code->set_is_pregenerated(true);
save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
}
CEntryStub save_doubles(1, kSaveFPRegs);
Handle<Code> code = save_doubles.GetCode();
code->set_is_pregenerated(true);
code->GetIsolate()->set_fp_stubs_generated(true);
}

View File

@ -38,7 +38,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
class TranscendentalCacheStub: public CodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@ -61,7 +61,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
};
class StoreBufferOverflowStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@ -80,7 +80,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
class UnaryOpStub: public PlatformCodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@ -225,7 +225,7 @@ enum StringAddFlags {
};
class StringAddStub: public PlatformCodeStub {
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@ -247,7 +247,7 @@ class StringAddStub: public PlatformCodeStub {
};
class SubStringStub: public PlatformCodeStub {
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
@ -259,7 +259,7 @@ class SubStringStub: public PlatformCodeStub {
};
class StringCompareStub: public PlatformCodeStub {
class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
@ -295,7 +295,7 @@ class StringCompareStub: public PlatformCodeStub {
};
class NumberToStringStub: public PlatformCodeStub {
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
@ -320,7 +320,7 @@ class NumberToStringStub: public PlatformCodeStub {
};
class StringDictionaryLookupStub: public PlatformCodeStub {
class StringDictionaryLookupStub: public CodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@ -382,7 +382,7 @@ class StringDictionaryLookupStub: public PlatformCodeStub {
};
class RecordWriteStub: public PlatformCodeStub {
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
@ -585,7 +585,7 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;

View File

@ -307,7 +307,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
compiled_code_->deoptimization_data());
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
@ -344,7 +344,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@ -455,7 +455,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
compiled_code_->entry() + pc_offset);
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
@ -569,70 +569,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
int frame_index) {
//
// FROM TO <-ebp
// | .... | | .... |
// +-------------------------+ +-------------------------+
// | JSFunction continuation | | JSFunction continuation |
// +-------------------------+ +-------------------------+<-esp
// | | saved frame (ebp) |
// | +=========================+<-ebp
// | | JSFunction context |
// v +-------------------------+
// | COMPILED_STUB marker | ebp = saved frame
// +-------------------------+ esi = JSFunction context
// | |
// | ... |
// | |
// +-------------------------+<-esp
//
//
int output_frame_size = 1 * kPointerSize;
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, 0);
Code* notify_miss =
isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(notify_miss->entry()));
ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
int major_key = compiled_code_->major_key();
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptors()[major_key];
Handle<Code> miss_ic(descriptor->deoptimization_handler);
output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
unsigned input_frame_size = input_->GetFrameSize();
intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
output_frame->SetFrameSlot(0, value);
value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
output_frame->SetRegister(ebp.code(), value);
output_frame->SetFp(value);
value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
output_frame->SetRegister(esi.code(), value);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
ASSERT(opcode == Translation::REGISTER);
USE(opcode);
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(edx.code(), input_value);
int32_t next = iterator->Next();
opcode = static_cast<Translation::Opcode>(next);
ASSERT(opcode == Translation::REGISTER);
input_reg = iterator->Next();
input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(ecx.code(), input_value);
ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
@ -1061,7 +997,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@ -1076,6 +1012,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
CpuFeatures::Scope scope(SSE2);
Isolate* isolate = masm()->isolate();
@ -1085,13 +1022,10 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movdbl(Operand(esp, offset), xmm_reg);
}
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movdbl(Operand(esp, offset), xmm_reg);
}
__ pushad();
@ -1139,18 +1073,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(Operand(ebx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
// Fill in the double input registers.
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
__ movdbl(xmm0, Operand(esp, src_offset));
__ movdbl(Operand(ebx, dst_offset), xmm0);
}
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
__ movdbl(xmm0, Operand(esp, src_offset));
__ movdbl(Operand(ebx, dst_offset), xmm0);
}
__ fninit();
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
@ -1168,13 +1098,10 @@ void Deoptimizer::EntryGenerator::Generate() {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
@ -1212,39 +1139,31 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movdbl(xmm_reg, Operand(ebx, src_offset));
}
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movdbl(xmm_reg, Operand(ebx, src_offset));
}
}

View File

@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "ia32/lithium-codegen-ia32.h"
#include "ic.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
@ -71,6 +70,7 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
CodeStub::GenerateFPStubs();
@ -79,15 +79,13 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
dynamic_frame_alignment_ = info()->IsOptimizing() &&
((chunk()->num_double_slots() > 2 &&
!chunk()->graph()->is_recursive()) ||
!info()->osr_ast_id().IsNone());
dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
!chunk()->graph()->is_recursive()) ||
!info()->osr_ast_id().IsNone();
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
GenerateJumpTable() &&
GenerateSafepointTable();
}
@ -97,9 +95,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
@ -130,126 +126,113 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). ecx is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
}
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). ecx is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
}
if (dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ mov(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp + 4 to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(not_zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
__ mov(edx, Immediate(kAlignmentPaddingPushed));
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
if (dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ mov(edx, Immediate(kNoAlignmentPadding));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
__ bind(&do_not_pad);
}
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp + 4 to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(not_zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
__ mov(edx, Immediate(kAlignmentPaddingPushed));
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
__ bind(&do_not_pad);
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
if (info()->IsStub()) {
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
} else {
__ push(edi); // Callee's JS function.
}
}
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
if (info()->IsOptimizing() &&
dynamic_frame_alignment_ &&
FLAG_debug_code) {
if (dynamic_frame_alignment_ && FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
__ Assert(zero, "frame is expected to be aligned");
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
ASSERT(slots != 0 || !info()->IsOptimizing());
if (slots > 0) {
if (slots == 1) {
if (dynamic_frame_alignment_) {
__ push(edx);
} else {
__ push(Immediate(kNoAlignmentPadding));
}
ASSERT_GE(slots, 1);
if (slots == 1) {
if (dynamic_frame_alignment_) {
__ push(edx);
} else {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
Label loop;
__ bind(&loop);
__ push(Immediate(kSlotsZapValue));
__ dec(eax);
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
const int kPageSize = 4 * KB;
for (int offset = slots * kPointerSize - kPageSize;
offset > 0;
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
#endif
__ push(Immediate(kNoAlignmentPadding));
}
} else {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
Label loop;
__ bind(&loop);
__ push(Immediate(kSlotsZapValue));
__ dec(eax);
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
const int kPageSize = 4 * KB;
for (int offset = slots * kPointerSize - kPageSize;
offset > 0;
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
#endif
}
// Store dynamic frame alignment state in the first local.
if (dynamic_frame_alignment_) {
__ mov(Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
edx);
} else {
__ mov(Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
Immediate(kNoAlignmentPadding));
}
// Store dynamic frame alignment state in the first local.
if (dynamic_frame_alignment_) {
__ mov(Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
edx);
} else {
__ mov(Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
Immediate(kNoAlignmentPadding));
}
}
// Possibly allocate a local context.
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@ -289,7 +272,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
if (FLAG_trace) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
@ -343,102 +326,16 @@ bool LCodeGen::GenerateBody() {
}
bool LCodeGen::GenerateJumpTable() {
Label needs_frame_not_call;
Label needs_frame_is_call;
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
Address entry = jump_table_[i].address;
if (jump_table_[i].needs_frame) {
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (jump_table_[i].is_lazy_deopt) {
if (needs_frame_is_call.is_bound()) {
__ jmp(&needs_frame_is_call);
} else {
__ bind(&needs_frame_is_call);
__ push(esi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
// Push a PC inside the function so that the deopt code can find where
// the deopt comes from. It doesn't have to be the precise return
// address of a "calling" LAZY deopt, it only has to be somewhere
// inside the code body.
Label push_approx_pc;
__ call(&push_approx_pc);
__ bind(&push_approx_pc);
// Push the continuation which was stashed were the ebp should
// be. Replace it with the saved ebp.
__ push(MemOperand(esp, 3 * kPointerSize));
__ mov(MemOperand(esp, 4 * kPointerSize), ebp);
__ lea(ebp, MemOperand(esp, 4 * kPointerSize));
__ ret(0); // Call the continuation without clobbering registers.
}
} else {
if (needs_frame_not_call.is_bound()) {
__ jmp(&needs_frame_not_call);
} else {
__ bind(&needs_frame_not_call);
__ push(esi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
// Push the continuation which was stashed were the ebp should
// be. Replace it with the saved ebp.
__ push(MemOperand(esp, 2 * kPointerSize));
__ mov(MemOperand(esp, 3 * kPointerSize), ebp);
__ lea(ebp, MemOperand(esp, 3 * kPointerSize));
__ ret(0); // Call the continuation without clobbering registers.
}
}
} else {
if (jump_table_[i].is_lazy_deopt) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
}
}
}
return !is_aborted();
}
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Deferred build frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
__ lea(ebp, Operand(esp, 2 * kPointerSize));
}
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_);
frame_is_built_ = false;
__ mov(esp, ebp);
__ pop(ebp);
}
__ jmp(code->exit());
}
}
@ -452,15 +349,6 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
if (!info()->IsStub()) {
// For lazy deoptimization we need space to patch a call after every call.
// Ensure there is always space for such patching, even if the code ends
// in a call.
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
while (masm()->pc_offset() < target_offset) {
masm()->nop();
}
}
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@ -476,11 +364,6 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
}
bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
return op->IsDoubleRegister();
}
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
@ -566,9 +449,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation,
arguments_index,
arguments_count);
bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure();
int closure_id = has_closure_id
int closure_id = *info()->closure() != *environment->closure()
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
@ -591,11 +472,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
default:
UNREACHABLE();
}
// Inlined frames which push their arguments cause the index to be
@ -730,8 +606,6 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
__ CallRuntime(fun, argc);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
ASSERT(info()->is_calling());
}
@ -756,8 +630,6 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
ASSERT(info()->is_calling());
}
@ -803,11 +675,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
Deoptimizer::BailoutType bailout_type = frame_is_built_
? Deoptimizer::EAGER
: Deoptimizer::LAZY;
Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@ -841,44 +709,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
__ popfd();
}
ASSERT(info()->IsStub() || frame_is_built_);
bool lazy_deopt_needed = info()->IsStub();
if (cc == no_condition) {
if (FLAG_trap_on_deopt) __ int3();
if (lazy_deopt_needed) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
}
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Label done;
if (FLAG_trap_on_deopt) {
Label done;
__ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
}
if (!lazy_deopt_needed && frame_is_built_) {
if (FLAG_trap_on_deopt) {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
}
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&done);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
jump_table_.Add(table_entry, zone());
}
if (FLAG_trap_on_deopt) {
__ jmp(&jump_table_.last().label);
} else {
__ j(cc, &jump_table_.last().label);
}
__ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
}
__ bind(&done);
}
}
@ -1579,8 +1422,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(SSE4_1);
CpuFeatures::Scope scope(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(res, Operand(temp));
@ -1592,7 +1434,6 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ pinsrd(res, Operand(temp), 1);
}
} else {
CpuFeatures::Scope scope(SSE2);
__ Set(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
@ -1746,7 +1587,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
CpuFeatures::Scope scope(SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
@ -1808,7 +1648,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
@ -1819,8 +1658,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ addsd(left, right);
break;
case Token::SUB:
__ subsd(left, right);
break;
__ subsd(left, right);
break;
case Token::MUL:
__ mulsd(left, right);
break;
@ -1893,7 +1732,6 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
CpuFeatures::Scope scope(SSE2);
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@ -2053,7 +1891,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
CpuFeatures::Scope scope(SSE2);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@ -2563,7 +2400,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
@ -2577,10 +2414,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
}
__ mov(esp, ebp);
__ pop(ebp);
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
@ -2593,12 +2428,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&no_padding);
}
if (info()->IsStub()) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ Ret();
} else {
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@ -2974,23 +2804,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
} else {
__ fld_s(operand);
HandleX87FPReturnValue(instr);
}
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
__ fld_d(operand);
HandleX87FPReturnValue(instr);
}
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
@ -3034,30 +2852,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
if (IsX87TopOfStack(instr->result())) {
// Return value is already on stack. If the value has no uses, then
// pop it off the FP stack. Otherwise, make sure that there are enough
// copies of the value on the stack to feed all of the usages, e.g.
// when the following instruction uses the return value in multiple
// inputs.
int count = instr->hydrogen_value()->UseCount();
if (count == 0) {
__ fstp(0);
} else {
count--;
ASSERT(count <= 7);
while (count-- > 0) {
__ fld(0);
}
}
} else {
__ fstp_d(ToOperand(instr->result()));
}
}
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@ -3078,14 +2875,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, double_load_operand);
} else {
__ fld_d(double_load_operand);
HandleX87FPReturnValue(instr);
}
__ movdbl(result, double_load_operand);
}
@ -3501,7 +3291,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
CpuFeatures::Scope scope(SSE2);
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
@ -3523,7 +3312,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@ -3588,7 +3376,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@ -3634,7 +3421,6 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
@ -3642,7 +3428,6 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
@ -3719,7 +3504,6 @@ void LCodeGen::DoRandom(LRandom* instr) {
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
CpuFeatures::Scope scope(SSE2);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
@ -3787,7 +3571,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
@ -3819,7 +3602,6 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
Register temp1 = ToRegister(instr->temp1());
@ -4088,11 +3870,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
DeoptimizeIf(below_equal, instr->environment());
} else {
if (instr->hydrogen()->index()->representation().IsTagged() &&
!instr->hydrogen()->index()->type().IsSmi()) {
__ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
DeoptimizeIf(above_equal, instr->environment());
}
@ -4115,11 +3892,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
CpuFeatures::Scope scope(SSE2);
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
@ -4155,7 +3930,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
if (instr->NeedsCanonicalization()) {
@ -4406,21 +4180,15 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
__ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else {
UNREACHABLE();
}
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
__ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatures::Scope scope(SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
LOperand* temp = instr->temp();
@ -4498,21 +4266,9 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope feature_scope(SSE2);
__ cvtsi2sd(xmm0, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
__ pop(reg);
}
__ cvtsi2sd(xmm0, Operand(reg));
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope feature_scope(SSE2);
__ LoadUint32(xmm0, reg, xmm1);
} else {
UNREACHABLE();
}
__ LoadUint32(xmm0, reg, xmm1);
}
if (FLAG_inline_new) {
@ -4541,12 +4297,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Done. Put the value in xmm0 into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope feature_scope(SSE2);
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
__ StoreToSafepointRegisterSlot(reg, reg);
}
@ -4562,6 +4313,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
@ -4572,16 +4324,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
if (!IsX87TopOfStack(instr->value())) {
__ fld_d(ToOperand(instr->value()));
}
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
@ -4738,8 +4481,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
} else {
// Deoptimize if we don't have a heap number.
__ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
@ -4761,8 +4503,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
} else {
UNREACHABLE();
}
__ bind(&done);
}
@ -4805,24 +4545,19 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
deoptimize_on_minus_zero,
instr->environment());
} else {
UNIMPLEMENTED();
}
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
deoptimize_on_minus_zero,
instr->environment());
}
@ -4831,7 +4566,6 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
@ -5021,10 +4755,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
LInstruction* instr) {
LEnvironment* env) {
Label success;
__ CompareMap(reg, map, &success, mode);
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(not_equal, env);
__ bind(&success);
}
@ -5042,13 +4776,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatures::Scope scope(SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampDoubleToUint8(value_reg, xmm0, result_reg);
@ -5063,8 +4796,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
CpuFeatures::Scope scope(SSE2);
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
Label is_smi, done, heap_number;
@ -5111,7 +4842,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr);
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
@ -5121,7 +4852,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
// Check the holder map.
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr);
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@ -5658,15 +5389,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt() {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
__ Nop(padding_size);
}
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
__ Nop(padding_size);
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}

View File

@ -55,7 +55,6 @@ class LCodeGen BASE_EMBEDDED {
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@ -65,7 +64,6 @@ class LCodeGen BASE_EMBEDDED {
dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@ -80,20 +78,10 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsX87TopOfStack(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
Immediate ToInteger32Immediate(LOperand* op) const {
@ -102,9 +90,6 @@ class LCodeGen BASE_EMBEDDED {
Handle<Object> ToHandle(LConstantOperand* op) const;
// A utility for instructions that return floating point values on X87.
void HandleX87FPReturnValue(LInstruction* instr);
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
@ -137,7 +122,7 @@ class LCodeGen BASE_EMBEDDED {
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
CompareMapMode mode, LInstruction* instr);
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@ -187,7 +172,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return info()->num_parameters(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@ -199,7 +184,9 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
// Pad the reloc info to ensure that we have enough space to patch during
// deoptimization.
bool GenerateRelocPadding();
bool GenerateSafepointTable();
enum SafepointMode {
@ -369,23 +356,10 @@ class LCodeGen BASE_EMBEDDED {
MacroAssembler* const masm_;
CompilationInfo* const info_;
struct JumpTableEntry {
inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
address(entry),
needs_frame(frame),
is_lazy_deopt(is_lazy) { }
Label label;
Address address;
bool needs_frame;
bool is_lazy_deopt;
};
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
@ -395,7 +369,6 @@ class LCodeGen BASE_EMBEDDED {
bool dynamic_frame_alignment_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@ -413,7 +386,6 @@ class LCodeGen BASE_EMBEDDED {
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
ASSERT(codegen_->info()->is_calling());
}
~PushSafepointRegistersScope() {

View File

@ -191,7 +191,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
return Register::FromAllocationIndex(i);
}
@ -204,7 +204,7 @@ bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (source_uses_[i] != 0) return false;
if (destination_uses_[i] != 0) return false;
}
@ -256,7 +256,7 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
Register scratch = Register::FromAllocationIndex(i);
__ push(scratch);
@ -324,38 +324,29 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movdbl(dst, src);
}
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
UNREACHABLE();
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movdbl(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope scope(SSE2);
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movdbl(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
__ movdbl(xmm0, src);
__ movdbl(dst, xmm0);
}
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movdbl(dst, src);
} else {
UNREACHABLE();
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
__ movdbl(xmm0, src);
__ movdbl(dst, xmm0);
}
} else {
UNREACHABLE();
}
@ -419,7 +410,6 @@ void LGapResolver::EmitSwap(int index) {
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
CpuFeatures::Scope scope(SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);

View File

@ -97,8 +97,8 @@ class LGapResolver BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
int source_uses_[Register::kMaxNumAllocatableRegisters];
int destination_uses_[Register::kMaxNumAllocatableRegisters];
int source_uses_[Register::kNumAllocatableRegisters];
int destination_uses_[Register::kNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.

View File

@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
double_register_spills_[i] = NULL;
}
}
@ -460,11 +460,9 @@ LPlatformChunk* LChunkBuilder::Build() {
status_ = BUILDING;
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
int alignment_state_index = chunk_->GetNextSpillIndex(false);
ASSERT_EQ(alignment_state_index, 0);
USE(alignment_state_index);
}
int alignment_state_index = chunk_->GetNextSpillIndex(false);
ASSERT_EQ(alignment_state_index, 0);
USE(alignment_state_index);
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@ -496,12 +494,6 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
}
LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
X87TopOfStackRegister::ToAllocationIndex(reg));
}
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
@ -634,13 +626,6 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineX87TOS(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, ToUnallocated(x87tos));
}
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
@ -653,8 +638,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@ -1697,12 +1680,8 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
// Only mark conversions that might need to allocate as calling rather than
// all changes. This makes simple, non-allocating conversion not have to force
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = instr->deoptimize_on_minus_zero()
@ -1727,10 +1706,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
LOperand* value = CpuFeatures::IsSupported(SSE2)
? UseRegisterAtStart(instr->value())
: UseAtStart(instr->value());
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
// Make sure that temp and result_temp are different registers.
@ -1748,7 +1724,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@ -2265,17 +2240,8 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LParameter* result = new(zone()) LParameter;
if (info()->IsOptimizing()) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(result, spill_index);
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params[instr->index()];
return DefineFixed(result, reg);
}
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
@ -2376,7 +2342,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LStackCheck(context), instr);

View File

@ -249,11 +249,7 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
virtual bool ClobbersDoubleRegisters() const {
return is_call_ || !CpuFeatures::IsSupported(SSE2);
}
bool IsMarkedAsCall() const { return is_call_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
@ -359,7 +355,6 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
virtual bool ClobbersDoubleRegisters() const { return false; }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
@ -1418,6 +1413,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
@ -1427,18 +1423,11 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
virtual bool ClobbersDoubleRegisters() const {
return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
}
};
@ -2419,9 +2408,8 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
LOperand* double_register_spills_[
DoubleRegister::kMaxNumAllocatableRegisters];
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};
@ -2585,7 +2573,6 @@ class LChunkBuilder BASE_EMBEDDED {
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@ -2646,8 +2633,6 @@ class LChunkBuilder BASE_EMBEDDED {
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
XMMRegister reg);
template<int I, int T>
LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);

View File

@ -1801,8 +1801,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function, isolate())));
CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
: kDontSaveFPRegs);
CEntryStub ces(1, kSaveFPRegs);
CallStub(&ces);
}

View File

@ -924,9 +924,9 @@ class MacroAssembler: public Assembler {
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
// Needs access to SafepointRegisterStackIndex for compiled frame
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class CompiledFrame;
friend class OptimizedFrame;
};

View File

@ -3398,17 +3398,9 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
Handle<Code> stub = KeyedLoadFastElementStub(
receiver_map->instance_type() == JS_ARRAY_TYPE,
elements_kind).GetCode();
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
} else {
Handle<Code> stub =
KeyedLoadDictionaryElementStub().GetCode();
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
}
Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -3669,6 +3661,157 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, failed_allocation, slow;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
// Check that the index is in range.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &miss_force_generic);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ SmiUntag(ecx); // Untag the index.
__ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
__ SmiUntag(ecx); // Untag the index.
__ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
__ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
__ mov(eax, Operand(ebx, ecx, times_2, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
__ fld_s(Operand(ebx, ecx, times_2, 0));
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ fld_d(Operand(ebx, ecx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// eax: value
// For floating-point array type:
// FP(0): value
if (elements_kind == EXTERNAL_INT_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
__ cmp(eax, 0xc0000000);
__ j(sign, &box_int);
} else {
ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, &box_int);
}
__ SmiTag(eax);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
__ push(eax);
__ fild_s(Operand(esp, 0));
__ pop(eax);
} else {
ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
__ push(eax);
__ fild_d(Operand(esp, 0));
__ pop(eax);
__ pop(eax);
}
// FP(0): value
__ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
// Set the value.
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
// Set the value.
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ SmiTag(eax);
__ ret(0);
}
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
__ fstp(0);
// Fall through to slow case.
// Slow case: Jump to runtime.
__ bind(&slow);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(ic, RelocInfo::CODE_TARGET);
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
// Miss case: Jump to runtime.
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@ -3868,6 +4011,106 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
// Get the elements array.
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
__ AssertFastElements(eax);
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Load the result and make sure it's not the hole.
__ mov(ebx, Operand(eax, ecx, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
__ j(equal, &miss_force_generic);
__ mov(eax, ebx);
__ ret(0);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
// Get the elements array.
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
__ AssertFastElements(eax);
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Check for the hole
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
__ j(equal, &miss_force_generic);
// Always allocate a heap number for the result.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
FixedDoubleArray::kHeaderSize));
} else {
__ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
}
__ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
// Set the value.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
__ ret(0);
__ bind(&slow_allocate_heapnumber);
// A value was pushed on the floating point stack before the allocation, if
// the allocation fails it needs to be removed.
if (!CpuFeatures::IsSupported(SSE2)) {
__ fstp(0);
}
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,

View File

@ -1054,13 +1054,7 @@ Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
if (IsFastElementsKind(elements_kind) ||
IsExternalArrayElementsKind(elements_kind)) {
return KeyedLoadFastElementStub(is_js_array, elements_kind).GetCode();
} else {
ASSERT(elements_kind == DICTIONARY_ELEMENTS);
return KeyedLoadDictionaryElementStub().GetCode();
}
return KeyedLoadElementStub(elements_kind).GetCode();
}

View File

@ -1619,7 +1619,6 @@ Isolate::Isolate()
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
deferred_handles_head_(NULL),
optimizing_compiler_thread_(this) {
@ -1782,9 +1781,6 @@ Isolate::~Isolate() {
delete date_cache_;
date_cache_ = NULL;
delete[] code_stub_interface_descriptors_;
code_stub_interface_descriptors_ = NULL;
delete regexp_stack_;
regexp_stack_ = NULL;
@ -1948,10 +1944,6 @@ bool Isolate::Init(Deserializer* des) {
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
code_stub_interface_descriptors_ =
new CodeStubInterfaceDescriptor*[CodeStub::NUMBER_OF_IDS];
memset(code_stub_interface_descriptors_, 0,
kPointerSize * CodeStub::NUMBER_OF_IDS);
// Enable logging before setting up the heap
logger_->SetUp();
@ -2012,8 +2004,6 @@ bool Isolate::Init(Deserializer* des) {
debug_->SetUp(create_heap_objects);
#endif
deoptimizer_data_ = new DeoptimizerData;
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize();
@ -2032,6 +2022,7 @@ bool Isolate::Init(Deserializer* des) {
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->SetUp();
@ -2053,17 +2044,6 @@ bool Isolate::Init(Deserializer* des) {
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the
// deopt entry table that might have been referred to by optimized code in
// the snapshot.
HandleScope scope(this);
Deoptimizer::EnsureCodeForDeoptimizationEntry(
Deoptimizer::LAZY,
kDeoptTableSerializeEntryCount - 1);
}
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
return true;
}

View File

@ -53,7 +53,6 @@ namespace internal {
class Bootstrapper;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
@ -1063,10 +1062,6 @@ class Isolate {
date_cache_ = date_cache;
}
CodeStubInterfaceDescriptor** code_stub_interface_descriptors() {
return code_stub_interface_descriptors_;
}
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@ -1250,7 +1245,6 @@ class Isolate {
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor** code_stub_interface_descriptors_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.

View File

@ -606,7 +606,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
int LAllocator::FixedDoubleLiveRangeID(int index) {
return -index - 1 - Register::kMaxNumAllocatableRegisters;
return -index - 1 - Register::kNumAllocatableRegisters;
}
@ -638,7 +638,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
ASSERT(index < Register::kMaxNumAllocatableRegisters);
ASSERT(index < Register::kNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
@ -651,7 +651,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
ASSERT(index < DoubleRegister::NumAllocatableRegisters());
ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
@ -768,7 +768,6 @@ void LAllocator::AddConstraintsGapMove(int index,
void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
if (start == -1) return;
for (int i = start; i <= end; ++i) {
if (IsGapAt(i)) {
LInstruction* instr = NULL;
@ -947,8 +946,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
Define(curr_position, output, NULL);
}
if (instr->ClobbersRegisters()) {
for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
if (instr->IsMarkedAsCall()) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i);
@ -959,8 +958,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
if (instr->ClobbersDoubleRegisters()) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (instr->IsMarkedAsCall()) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
@ -990,7 +989,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LOperand* temp = it.Current();
if (instr->ClobbersTemps()) {
if (instr->IsMarkedAsCall()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@ -1325,14 +1324,8 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
if (chunk_->info()->IsStub()) {
CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
} else {
ASSERT(chunk_->info()->IsOptimizing());
PrintF("Function: %s\n",
*chunk_->info()->function()->debug_name()->ToCString());
}
PrintF("Function: %s\n",
*chunk_->info()->function()->debug_name()->ToCString());
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@ -1478,14 +1471,14 @@ void LAllocator::ProcessOsrEntry() {
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("L_Allocate general registers", this);
num_registers_ = Register::NumAllocatableRegisters();
num_registers_ = Register::kNumAllocatableRegisters;
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("L_Allocate double registers", this);
num_registers_ = DoubleRegister::NumAllocatableRegisters();
num_registers_ = DoubleRegister::kNumAllocatableRegisters;
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@ -1764,14 +1757,14 @@ void LAllocator::InactiveToActive(LiveRange* range) {
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
Register::kMaxNumAllocatableRegisters);
STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
Register::kNumAllocatableRegisters);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@ -1860,10 +1853,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}

View File

@ -608,9 +608,9 @@ class LAllocator BASE_EMBEDDED {
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
fixed_live_ranges_;
EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;

View File

@ -414,7 +414,7 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
}
Handle<Code> LChunk::Codegen(Code::Kind kind) {
Handle<Code> LChunk::Codegen() {
MacroAssembler assembler(info()->isolate(), NULL, 0);
LCodeGen generator(this, &assembler, info());
@ -425,7 +425,7 @@ Handle<Code> LChunk::Codegen(Code::Kind kind) {
PrintF("Crankshaft Compiler - ");
}
CodeGenerator::MakeCodePrologue(info());
Code::Flags flags = Code::ComputeFlags(kind);
Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);

View File

@ -682,7 +682,7 @@ class LChunk: public ZoneObject {
Zone* zone() const { return info_->zone(); }
Handle<Code> Codegen(Code::Kind kind);
Handle<Code> Codegen();
protected:
LChunk(CompilationInfo* info, HGraph* graph)

View File

@ -1537,7 +1537,6 @@ void Logger::LogCodeObject(Object* object) {
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
case Code::COMPILED_STUB: // fall through
case Code::STUB:
description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);

View File

@ -70,8 +70,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_METHODS();
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

View File

@ -3225,7 +3225,6 @@ int Code::arguments_count() {
int Code::major_key() {
ASSERT(kind() == STUB ||
kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
@ -3237,7 +3236,6 @@ int Code::major_key() {
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
@ -3346,7 +3344,7 @@ void Code::set_profiler_ticks(int ticks) {
unsigned Code::stack_slots() {
ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(kind() == OPTIMIZED_FUNCTION);
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
@ -3354,7 +3352,7 @@ unsigned Code::stack_slots() {
void Code::set_stack_slots(unsigned slots) {
CHECK(slots <= (1 << kStackSlotsBitCount));
ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(kind() == OPTIMIZED_FUNCTION);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = StackSlotsField::update(previous, slots);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@ -3362,7 +3360,7 @@ void Code::set_stack_slots(unsigned slots) {
unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(kind() == OPTIMIZED_FUNCTION);
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
@ -3370,7 +3368,7 @@ unsigned Code::safepoint_table_offset() {
void Code::set_safepoint_table_offset(unsigned offset) {
CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = SafepointTableOffsetField::update(previous, offset);

View File

@ -8987,12 +8987,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
case Translation::COMPILED_STUB_FRAME: {
Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
PrintF(out, "{kind=%d}", stub_kind);
break;
}
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME: {
int function_id = iterator.Next();
@ -9107,7 +9101,6 @@ const char* Code::Kind2String(Kind kind) {
switch (kind) {
case FUNCTION: return "FUNCTION";
case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
case COMPILED_STUB: return "COMPILED_STUB";
case STUB: return "STUB";
case BUILTIN: return "BUILTIN";
case LOAD_IC: return "LOAD_IC";
@ -9227,7 +9220,7 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF("\n");
if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
if (kind() == OPTIMIZED_FUNCTION) {
SafepointTable table(this);
PrintF(out, "Safepoints (size = %u)\n", table.size());
for (unsigned i = 0; i < table.length(); i++) {

View File

@ -4233,7 +4233,6 @@ class Code: public HeapObject {
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
V(STUB) \
V(COMPILED_STUB) \
V(BUILTIN) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
@ -4850,10 +4849,6 @@ class Map: public HeapObject {
return IsFastDoubleElementsKind(elements_kind());
}
inline bool has_fast_elements() {
return IsFastElementsKind(elements_kind());
}
inline bool has_non_strict_arguments_elements() {
return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
}

View File

@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
class HOptimizedGraphBuilder;
class HGraphBuilder;
class OptimizingCompiler;
class SharedFunctionInfo;

View File

@ -42,7 +42,6 @@ PrettyPrinter::PrettyPrinter() {
output_ = NULL;
size_ = 0;
pos_ = 0;
InitializeAstVisitor();
}

View File

@ -74,8 +74,6 @@ class PrettyPrinter: public AstVisitor {
void PrintDeclarations(ZoneList<Declaration*>* declarations);
void PrintFunctionLiteral(FunctionLiteral* function);
void PrintCaseClause(CaseClause* clause);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};

View File

@ -43,9 +43,7 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
factory_(Isolate::Current(), zone) {
InitializeAstVisitor();
}
factory_(isolate(), zone) { }
virtual ~Processor() { }
@ -88,8 +86,6 @@ class Processor: public AstVisitor {
#undef DEF_VISIT
void VisitIterationStatement(IterationStatement* stmt);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};

View File

@ -7937,17 +7937,6 @@ class ActivationsFinder : public ThreadVisitor {
};
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyICMiss) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
delete deoptimizer;
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@ -7956,11 +7945,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
JavaScriptFrameIterator it(isolate);
// Make sure to materialize objects before causing any allocation.
JavaScriptFrameIterator it(isolate);
deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;

View File

@ -89,7 +89,6 @@ namespace internal {
F(ForceParallelRecompile, 1, 1) \
F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyICMiss, 0, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \

View File

@ -59,8 +59,7 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
SafepointTable::SafepointTable(Code* code) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::COMPILED_STUB);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code_ = code;
Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
@ -159,6 +158,14 @@ unsigned SafepointTableBuilder::GetCodeOffset() const {
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// For lazy deoptimization we need space to patch a call after every call.
// Ensure there is always space for such patching, even if the code ends
// in a call.
int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
while (assembler->pc_offset() < target_offset) {
assembler->nop();
}
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(kIntSize);
assembler->RecordComment(";;; Safepoint table.");

View File

@ -30,7 +30,6 @@
#include "accessors.h"
#include "api.h"
#include "bootstrapper.h"
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "ic-inl.h"
@ -528,17 +527,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
51,
"Code::MakeCodeYoung");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
HandleScope scope(Isolate::Current());
for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
Address address = Deoptimizer::GetDeoptimizationEntry(
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
}
}

View File

@ -47,11 +47,10 @@ enum TypeCode {
EXTENSION,
ACCESSOR,
RUNTIME_ENTRY,
STUB_CACHE_TABLE,
LAZY_DEOPTIMIZATION
STUB_CACHE_TABLE
};
const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
const int kFirstTypeCode = UNCLASSIFIED;
const int kReferenceIdBits = 16;
@ -60,7 +59,6 @@ const int kReferenceTypeShift = kReferenceIdBits;
const int kDebugRegisterBits = 4;
const int kDebugIdShift = kDebugRegisterBits;
const int kDeoptTableSerializeEntryCount = 8;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build

View File

@ -58,16 +58,11 @@ class SmartPointerBase {
// You can get the underlying pointer out with the * operator.
inline T* operator*() { return p_; }
// You can use [n] to index as if it was a plain pointer.
// You can use [n] to index as if it was a plain pointer
inline T& operator[](size_t i) {
return p_[i];
}
// You can use [n] to index as if it was a plain pointer.
const inline T& operator[](size_t i) const {
return p_[i];
}
// We don't have implicit conversion to a T* since that hinders migration:
// You would not be able to change a method from returning a T* to
// returning an SmartArrayPointer<T> and then get errors wherever it is used.
@ -82,11 +77,6 @@ class SmartPointerBase {
return temp;
}
inline void Reset(T* new_value) {
if (p_) Deallocator::Delete(p_);
p_ = new_value;
}
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.

View File

@ -1680,7 +1680,6 @@ static void ReportCodeKindStatistics() {
CASE(FUNCTION);
CASE(OPTIMIZED_FUNCTION);
CASE(STUB);
CASE(COMPILED_STUB);
CASE(BUILTIN);
CASE(LOAD_IC);
CASE(KEYED_LOAD_IC);

View File

@ -681,6 +681,13 @@ class KeyedLoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
CodeHandleList* handler_ics);
static void GenerateLoadExternalArray(MacroAssembler* masm,
ElementsKind elements_kind);
static void GenerateLoadFastElement(MacroAssembler* masm);
static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:

View File

@ -1015,7 +1015,6 @@ class BailoutId {
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@ -1031,12 +1030,9 @@ class BailoutId {
// code (function declarations).
static const int kDeclarationsId = 3;
// Every FunctionState starts with this id.
// Ever FunctionState starts with this id.
static const int kFirstUsableId = 4;
// Every compiled stub starts with this id.
static const int kStubEntryId = 5;
int id_;
};

View File

@ -201,8 +201,7 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// -----------------------------------------------------------------------------
// Register constants.
const int
Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};

View File

@ -95,24 +95,21 @@ struct Register {
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
static const int kMaxNumAllocatableRegisters = 10;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"rax",
"rbx",
@ -160,7 +157,7 @@ struct Register {
int code_;
private:
static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
@ -203,10 +200,7 @@ const Register no_reg = { kRegister_no_reg_Code };
struct XMMRegister {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 15;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
static const int kNumAllocatableRegisters = 15;
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
@ -214,13 +208,13 @@ struct XMMRegister {
}
static XMMRegister FromAllocationIndex(int index) {
ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
ASSERT(0 <= index && index < kNumAllocatableRegisters);
XMMRegister result = { index + 1 };
return result;
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",

View File

@ -646,25 +646,6 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
__ CallRuntime(Runtime::kNotifyICMiss, 0);
__ Popad();
// Tear down internal frame.
}
__ pop(MemOperand(rsp, 0)); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
@ -679,17 +660,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
__ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
__ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
__ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
__ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.

View File

@ -36,24 +36,6 @@
namespace v8 {
namespace internal {
CodeStubInterfaceDescriptor*
KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
static CodeStubInterfaceDescriptor* result = NULL;
if (result == NULL) {
Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
static Register registers[] = { rdx, rax };
static CodeStubInterfaceDescriptor info = {
2,
registers,
miss
};
result = &info;
}
return result;
}
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {

View File

@ -37,7 +37,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
class TranscendentalCacheStub: public CodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@ -60,7 +60,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
};
class StoreBufferOverflowStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@ -79,7 +79,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
class UnaryOpStub: public PlatformCodeStub {
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@ -216,7 +216,7 @@ enum StringAddFlags {
};
class StringAddStub: public PlatformCodeStub {
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@ -238,7 +238,7 @@ class StringAddStub: public PlatformCodeStub {
};
class SubStringStub: public PlatformCodeStub {
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
@ -250,7 +250,7 @@ class SubStringStub: public PlatformCodeStub {
};
class StringCompareStub: public PlatformCodeStub {
class StringCompareStub: public CodeStub {
public:
StringCompareStub() {}
@ -287,7 +287,7 @@ class StringCompareStub: public PlatformCodeStub {
};
class NumberToStringStub: public PlatformCodeStub {
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
@ -316,7 +316,7 @@ class NumberToStringStub: public PlatformCodeStub {
};
class StringDictionaryLookupStub: public PlatformCodeStub {
class StringDictionaryLookupStub: public CodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@ -378,7 +378,7 @@ class StringDictionaryLookupStub: public PlatformCodeStub {
};
class RecordWriteStub: public PlatformCodeStub {
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
@ -561,7 +561,7 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(rcx)) continue;
if (candidate.is(r1)) continue;

View File

@ -44,10 +44,6 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
CodeGenerator() {
InitializeAstVisitor();
}
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@ -67,8 +63,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

View File

@ -211,7 +211,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
compiled_code_->deoptimization_data());
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
@ -248,7 +248,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@ -340,7 +340,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
intptr_t pc = reinterpret_cast<intptr_t>(
compiled_code_->entry() + pc_offset);
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
@ -459,70 +459,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
int frame_index) {
//
// FROM TO <-rbp
// | .... | | .... |
// +-------------------------+ +-------------------------+
// | JSFunction continuation | | JSFunction continuation |
// +-------------------------+ +-------------------------+<-rsp
// | | saved frame (rbp) |
// | +=========================+<-rbp
// | | JSFunction context |
// v +-------------------------+
// | COMPILED_STUB marker | rbp = saved frame
// +-------------------------+ rsi = JSFunction context
// | |
// | ... |
// | |
// +-------------------------+<-rsp
//
//
int output_frame_size = 1 * kPointerSize;
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, 0);
Code* notify_miss =
isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_miss->entry()));
ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
int major_key = compiled_code_->major_key();
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptors()[major_key];
Handle<Code> miss_ic(descriptor->deoptimization_handler);
output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
unsigned input_frame_size = input_->GetFrameSize();
intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
output_frame->SetFrameSlot(0, value);
value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
output_frame->SetRegister(rbp.code(), value);
output_frame->SetFp(value);
value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
output_frame->SetRegister(rsi.code(), value);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
ASSERT(opcode == Translation::REGISTER);
USE(opcode);
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(rdx.code(), input_value);
int32_t next = iterator->Next();
opcode = static_cast<Translation::Opcode>(next);
ASSERT(opcode == Translation::REGISTER);
input_reg = iterator->Next();
input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(rax.code(), input_value);
ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
@ -942,7 +878,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@ -962,10 +898,10 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
XMMRegister::kNumAllocatableRegisters;
__ subq(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
@ -1054,7 +990,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
@ -1075,13 +1011,10 @@ void Deoptimizer::EntryGenerator::Generate() {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
@ -1098,33 +1031,28 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
Label outer_push_loop, inner_push_loop;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_8, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(rbx, src_offset));

View File

@ -119,45 +119,35 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// Strict mode functions need to replace the receiver with undefined
// when called as functions (without an explicit receiver
// object). rcx is zero for method calls and non-zero for function
// calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
}
// Strict mode functions need to replace the receiver with undefined
// when called as functions (without an explicit receiver
// object). rcx is zero for method calls and non-zero for function
// calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
if (info()->IsStub()) {
__ Push(Smi::FromInt(StackFrame::STUB));
} else {
__ push(rdi); // Callee's JS function.
}
}
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS function.
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
@ -187,7 +177,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Possibly allocate a local context.
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@ -223,7 +213,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@ -276,55 +266,9 @@ bool LCodeGen::GenerateBody() {
bool LCodeGen::GenerateJumpTable() {
Label needs_frame_not_call;
Label needs_frame_is_call;
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
Address entry = jump_table_[i].address;
if (jump_table_[i].needs_frame) {
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (jump_table_[i].is_lazy_deopt) {
if (needs_frame_is_call.is_bound()) {
__ jmp(&needs_frame_is_call);
} else {
__ bind(&needs_frame_is_call);
__ push(rbp);
__ movq(rbp, rsp);
__ push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
__ push(rsi);
__ movq(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
} else {
if (needs_frame_not_call.is_bound()) {
__ jmp(&needs_frame_not_call);
} else {
__ bind(&needs_frame_not_call);
__ push(rbp);
__ movq(rbp, rsp);
__ push(r8);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
__ push(rsi);
__ movq(rsi, MemOperand(rsp, kPointerSize));
__ jmp(kScratchRegister);
}
}
} else {
if (jump_table_[i].is_lazy_deopt) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
}
}
__ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
}
return !is_aborted();
}
@ -336,32 +280,10 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Deferred build frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(rbp); // Caller's frame pointer.
__ push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
__ lea(rbp, Operand(rsp, 2 * kPointerSize));
}
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_);
frame_is_built_ = false;
__ movq(rsp, rbp);
__ pop(rbp);
}
__ jmp(code->exit());
}
}
@ -474,9 +396,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation,
arguments_index,
arguments_count);
bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure();
int closure_id = has_closure_id
int closure_id = *info()->closure() != *environment->closure()
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@ -500,9 +420,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
}
// Inlined frames which push their arguments cause the index to be
@ -693,33 +610,20 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
ASSERT(info()->IsStub() || frame_is_built_);
bool lazy_deopt = info()->IsStub();
if (cc == no_condition) {
if (lazy_deopt) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
}
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
jump_table_.last().is_lazy_deopt != lazy_deopt) {
JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
jump_table_.Add(table_entry, zone());
jump_table_.last().address != entry) {
jump_table_.Add(JumpTableEntry(entry), zone());
}
__ j(cc, &jump_table_.last().label);
}
@ -2384,22 +2288,15 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime
// call to return the value in the same register.
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (NeedsEagerFrame()) {
__ movq(rsp, rbp);
__ pop(rbp);
}
if (info()->IsStub()) {
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Ret(0, r10);
} else {
__ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
}
__ movq(rsp, rbp);
__ pop(rbp);
__ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
}
@ -4630,10 +4527,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
LInstruction* instr) {
LEnvironment* env) {
Label success;
__ CompareMap(reg, map, &success, mode);
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(not_equal, env);
__ bind(&success);
}
@ -4651,7 +4548,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
}
@ -4718,7 +4615,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr);
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
@ -4727,7 +4624,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
// Check the holder map.
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr);
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@ -5263,7 +5160,6 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();

View File

@ -63,7 +63,6 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@ -78,15 +77,6 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
@ -120,7 +110,7 @@ class LCodeGen BASE_EMBEDDED {
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
CompareMapMode mode, LInstruction* instr);
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@ -168,7 +158,7 @@ class LCodeGen BASE_EMBEDDED {
Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return info()->num_parameters(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@ -337,15 +327,11 @@ class LCodeGen BASE_EMBEDDED {
int* offset);
struct JumpTableEntry {
inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
explicit inline JumpTableEntry(Address entry)
: label(),
address(entry),
needs_frame(frame),
is_lazy_deopt(is_lazy) { }
address(entry) { }
Label label;
Address address;
bool needs_frame;
bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt(int space_needed);
@ -374,7 +360,6 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@ -389,7 +374,6 @@ class LCodeGen BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;

View File

@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
double_register_spills_[i] = NULL;
}
}
@ -619,8 +619,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@ -1619,12 +1617,8 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
// Only mark conversions that might need to allocate as calling rather than
// all changes. This makes simple, non-allocating conversion not have to force
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@ -1642,7 +1636,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
@ -1656,7 +1649,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@ -2123,17 +2115,8 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LParameter* result = new(zone()) LParameter;
if (info()->IsOptimizing()) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(result, spill_index);
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params[instr->index()];
return DefineFixed(result, reg);
}
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
@ -2229,7 +2212,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
} else {

View File

@ -251,11 +251,6 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
@ -2271,9 +2266,8 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
LOperand* double_register_spills_[
DoubleRegister::kMaxNumAllocatableRegisters];
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};

View File

@ -3432,7 +3432,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
@ -3476,7 +3476,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}

View File

@ -1414,9 +1414,9 @@ class MacroAssembler: public Assembler {
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
}
// Needs access to SafepointRegisterStackIndex for compiled frame
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class CompiledFrame;
friend class OptimizedFrame;
};

View File

@ -3210,19 +3210,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -- rsp[0] : return address
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
Handle<Code> stub = KeyedLoadFastElementStub(
receiver_map->instance_type() == JS_ARRAY_TYPE,
elements_kind).GetCode();
__ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
} else {
Handle<Code> stub =
KeyedLoadDictionaryElementStub().GetCode();
__ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
}
Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
__ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(Code::NORMAL, factory()->empty_string());
@ -3464,6 +3457,140 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label slow, miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
// Check that the index is in range.
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rcx, rax);
__ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &miss_force_generic);
// rax: index (as a smi)
// rdx: receiver (JSObject)
// rcx: untagged index
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case EXTERNAL_INT_ELEMENTS:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
break;
default:
UNREACHABLE();
break;
}
// rax: index
// rdx: receiver
// For integer array types:
// rcx: value
// For floating-point array type:
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
// The value is zero-extended since we loaded the value from memory
// with movl.
__ cvtqsi2sd(xmm0, rcx);
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else {
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
// Slow case: Jump to runtime.
__ bind(&slow);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Miss case: Jump to runtime.
__ bind(&miss_force_generic);
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@ -3653,6 +3780,98 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
// Get the elements array.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
__ AssertFastElements(rcx);
// Check that the key is within bounds.
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Load the result and make sure it's not the hole.
SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
__ movq(rbx, FieldOperand(rcx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss_force_generic);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss_force_generic);
Code* code = masm->isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_MissForceGeneric);
Handle<Code> ic(code);
__ jmp(ic, RelocInfo::CODE_TARGET);
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
// Get the elements array.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
__ AssertFastElements(rcx);
// Check that the key is within bounds.
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Check for the hole
__ SmiToInteger32(kScratchRegister, rax);
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
Immediate(kHoleNanUpper32));
__ j(equal, &miss_force_generic);
// Always allocate a heap number for the result.
__ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
FixedDoubleArray::kHeaderSize));
__ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
// Set the value.
__ movq(rax, rcx);
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ ret(0);
__ bind(&slow_allocate_heapnumber);
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,

View File

@ -546,7 +546,7 @@ TEST(BootUpMemoryUse) {
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(delta, 2600 * 1024); // 2400.
CHECK_LE(delta, 2500 * 1024); // 2400.
} else {
CHECK_LE(delta, 2860 * 1024); // 2760.
}

View File

@ -152,7 +152,6 @@ var knownProblems = {
"LazyRecompile": true,
"ParallelRecompile": true,
"NotifyDeoptimized": true,
"NotifyICMiss": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
"CloneLiteralBoilerplate": true,

View File

@ -262,7 +262,6 @@
'../../src/circular-queue.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
'../../src/code-stubs-hydrogen.cc',
'../../src/code.h',
'../../src/codegen.cc',
'../../src/codegen.h',