Generate the TransitionElementsStub using Crankshaft
This includes: * Adding support for saving callee-clobbered double registers in Crankshaft code. * Adding a new "HTrapAllocationMemento" hydrogen instruction to handle AllocationSiteInfo data in crankshafted stubs. * Adding a new "HAllocate" hydrogen instruction that can allocate raw memory from the GC in crankshafted code. * Support for manipulation of the hole in HChange instructions for Crankshafted stubs. * Utility routines to manually build loops and if statements containing hydrogen code. Review URL: https://codereview.chromium.org/11659022 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13585 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
744d61ebe7
commit
0c3575c874
@ -302,13 +302,13 @@ struct SwVfpRegister {
|
||||
|
||||
// Double word VFP register.
|
||||
struct DwVfpRegister {
|
||||
static const int kNumRegisters = 32;
|
||||
static const int kMaxNumRegisters = 32;
|
||||
// A few double registers are reserved: one as a scratch register and one to
|
||||
// hold 0.0, that does not fit in the immediate field of vmov instructions.
|
||||
// d14: 0.0
|
||||
// d15: scratch register.
|
||||
static const int kNumReservedRegisters = 2;
|
||||
static const int kMaxNumAllocatableRegisters = kNumRegisters -
|
||||
static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
|
||||
kNumReservedRegisters;
|
||||
|
||||
// Note: the number of registers can be different at snapshot and run-time.
|
||||
@ -327,7 +327,7 @@ struct DwVfpRegister {
|
||||
}
|
||||
|
||||
bool is_valid() const {
|
||||
return 0 <= code_ && code_ < kNumRegisters;
|
||||
return 0 <= code_ && code_ < kMaxNumRegisters;
|
||||
}
|
||||
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
|
||||
SwVfpRegister low() const {
|
||||
|
@ -49,6 +49,18 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
|
||||
}
|
||||
|
||||
|
||||
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate,
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
static Register registers[] = { r0, r1 };
|
||||
descriptor->register_param_count_ = 2;
|
||||
descriptor->register_params_ = registers;
|
||||
Address entry =
|
||||
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
|
||||
descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
|
||||
}
|
||||
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
||||
@ -2075,8 +2087,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
__ CheckFor32DRegs(scratch);
|
||||
|
||||
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
|
||||
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
|
||||
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
|
||||
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
|
||||
DwVfpRegister reg = DwVfpRegister::from_code(i);
|
||||
__ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
|
||||
}
|
||||
@ -2096,11 +2108,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
__ CheckFor32DRegs(scratch);
|
||||
|
||||
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
|
||||
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
|
||||
DwVfpRegister reg = DwVfpRegister::from_code(i);
|
||||
__ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
|
||||
}
|
||||
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
|
||||
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
|
||||
}
|
||||
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
|
||||
}
|
||||
|
@ -156,8 +156,8 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
// -----------------------------------
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
ASSERT(allocation_site_info_found != NULL);
|
||||
masm->TestJSArrayForAllocationSiteInfo(r2, r4,
|
||||
allocation_site_info_found);
|
||||
__ TestJSArrayForAllocationSiteInfo(r2, r4);
|
||||
__ b(eq, allocation_site_info_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
@ -187,7 +187,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
masm->TestJSArrayForAllocationSiteInfo(r2, r4, fail);
|
||||
__ TestJSArrayForAllocationSiteInfo(r2, r4);
|
||||
__ b(eq, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
@ -327,7 +328,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Label entry, loop, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
masm->TestJSArrayForAllocationSiteInfo(r2, r4, fail);
|
||||
__ TestJSArrayForAllocationSiteInfo(r2, r4);
|
||||
__ b(eq, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
|
@ -533,6 +533,11 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
|
||||
DoTranslateCommand(iterator, 0, output_frame_offset);
|
||||
}
|
||||
|
||||
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
|
||||
double double_value = input_->GetDoubleRegister(i);
|
||||
output_frame->SetDoubleRegister(i, double_value);
|
||||
}
|
||||
|
||||
value = input_->GetRegister(fp.code());
|
||||
output_frame->SetRegister(fp.code(), value);
|
||||
output_frame->SetFp(value);
|
||||
@ -1158,21 +1163,18 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
|
||||
if (CpuFeatures::IsSupported(VFP2)) {
|
||||
CpuFeatures::Scope scope(VFP2);
|
||||
// In case of OSR, we have to restore the d registers.
|
||||
if (type() == OSR) {
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
__ CheckFor32DRegs(ip);
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
__ CheckFor32DRegs(ip);
|
||||
|
||||
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
|
||||
int src_offset = FrameDescription::double_registers_offset();
|
||||
for (int i = 0; i < DwVfpRegister::kNumRegisters; ++i) {
|
||||
if (i == kDoubleRegZero.code()) continue;
|
||||
if (i == kScratchDoubleReg.code()) continue;
|
||||
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
|
||||
int src_offset = FrameDescription::double_registers_offset();
|
||||
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
|
||||
if (i == kDoubleRegZero.code()) continue;
|
||||
if (i == kScratchDoubleReg.code()) continue;
|
||||
|
||||
const DwVfpRegister reg = DwVfpRegister::from_code(i);
|
||||
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
|
||||
src_offset += kDoubleSize;
|
||||
}
|
||||
const DwVfpRegister reg = DwVfpRegister::from_code(i);
|
||||
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
|
||||
src_offset += kDoubleSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2098,12 +2098,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoTransitionElementsKind(
|
||||
HTransitionElementsKind* instr) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
LOperand* new_map_reg = TempRegister();
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
|
||||
return DefineSameAsFirst(result);
|
||||
} else if (FLAG_compiled_transitions) {
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object, NULL, NULL);
|
||||
return AssignPointerMap(result);
|
||||
} else {
|
||||
LOperand* object = UseFixed(instr->object(), r0);
|
||||
LOperand* fixed_object_reg = FixedTemp(r2);
|
||||
@ -2112,11 +2116,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
|
||||
new(zone()) LTransitionElementsKind(object,
|
||||
new_map_reg,
|
||||
fixed_object_reg);
|
||||
return MarkAsCall(DefineFixed(result, r0), instr);
|
||||
return MarkAsCall(result, instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
|
||||
HTrapAllocationMemento* instr) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
LOperand* temp = TempRegister();
|
||||
LTrapAllocationMemento* result =
|
||||
new(zone()) LTrapAllocationMemento(object, temp);
|
||||
return AssignEnvironment(result);
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
|
||||
bool needs_write_barrier = instr->NeedsWriteBarrier();
|
||||
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
|
||||
@ -2183,12 +2197,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
|
||||
info()->MarkAsDeferredCalling();
|
||||
LAllocateObject* result =
|
||||
new(zone()) LAllocateObject(TempRegister(), TempRegister());
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
|
||||
info()->MarkAsDeferredCalling();
|
||||
LOperand* size = UseTempRegister(instr->size());
|
||||
LOperand* temp1 = TempRegister();
|
||||
LOperand* temp2 = TempRegister();
|
||||
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
|
||||
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
|
||||
}
|
||||
|
@ -50,6 +50,7 @@ class LCodeGen;
|
||||
V(AccessArgumentsAt) \
|
||||
V(AddI) \
|
||||
V(AllocateObject) \
|
||||
V(Allocate) \
|
||||
V(ApplyArguments) \
|
||||
V(ArgumentsElements) \
|
||||
V(ArgumentsLength) \
|
||||
@ -175,6 +176,7 @@ class LCodeGen;
|
||||
V(Throw) \
|
||||
V(ToFastProperties) \
|
||||
V(TransitionElementsKind) \
|
||||
V(TrapAllocationMemento) \
|
||||
V(Typeof) \
|
||||
V(TypeofIsAndBranch) \
|
||||
V(UnaryMathOperation) \
|
||||
@ -1619,6 +1621,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
|
||||
class LContext: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Context)
|
||||
};
|
||||
|
||||
|
||||
@ -1852,6 +1855,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
|
||||
LOperand* temp2() { return temps_[1]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Change)
|
||||
};
|
||||
|
||||
|
||||
@ -2036,10 +2040,10 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
|
||||
public:
|
||||
LTransitionElementsKind(LOperand* object,
|
||||
LOperand* new_map_temp,
|
||||
LOperand* temp) {
|
||||
LOperand* fixed_object_temp) {
|
||||
inputs_[0] = object;
|
||||
temps_[0] = new_map_temp;
|
||||
temps_[1] = temp;
|
||||
temps_[1] = fixed_object_temp;
|
||||
}
|
||||
|
||||
LOperand* object() { return inputs_[0]; }
|
||||
@ -2059,6 +2063,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
|
||||
};
|
||||
|
||||
|
||||
class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
|
||||
public:
|
||||
LTrapAllocationMemento(LOperand* object,
|
||||
LOperand* temp) {
|
||||
inputs_[0] = object;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
LOperand* object() { return inputs_[0]; }
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
|
||||
"trap-allocation-memento")
|
||||
};
|
||||
|
||||
|
||||
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
|
||||
public:
|
||||
LStringAdd(LOperand* left, LOperand* right) {
|
||||
@ -2239,7 +2259,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
|
||||
};
|
||||
|
||||
|
||||
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
|
||||
class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
|
||||
public:
|
||||
LAllocateObject(LOperand* temp, LOperand* temp2) {
|
||||
temps_[0] = temp;
|
||||
@ -2254,6 +2274,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
|
||||
};
|
||||
|
||||
|
||||
class LAllocate: public LTemplateInstruction<1, 2, 2> {
|
||||
public:
|
||||
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
|
||||
inputs_[1] = size;
|
||||
temps_[0] = temp1;
|
||||
temps_[1] = temp2;
|
||||
}
|
||||
|
||||
LOperand* size() { return inputs_[1]; }
|
||||
LOperand* temp1() { return temps_[0]; }
|
||||
LOperand* temp2() { return temps_[1]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Allocate)
|
||||
};
|
||||
|
||||
|
||||
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
|
||||
|
@ -146,16 +146,23 @@ bool LCodeGen::GeneratePrologue() {
|
||||
|
||||
info()->set_prologue_offset(masm_->pc_offset());
|
||||
if (NeedsEagerFrame()) {
|
||||
PredictableCodeSizeScope predictible_code_size_scope(
|
||||
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
|
||||
// The following three instructions must remain together and unmodified
|
||||
// for code aging to work properly.
|
||||
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
|
||||
// Load undefined value here, so the value is ready for the loop
|
||||
// below.
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
||||
// Adjust FP to point to saved FP.
|
||||
__ add(fp, sp, Operand(2 * kPointerSize));
|
||||
if (info()->IsStub()) {
|
||||
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
|
||||
__ Push(Smi::FromInt(StackFrame::STUB));
|
||||
// Adjust FP to point to saved FP.
|
||||
__ add(fp, sp, Operand(2 * kPointerSize));
|
||||
} else {
|
||||
PredictableCodeSizeScope predictible_code_size_scope(
|
||||
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
|
||||
// The following three instructions must remain together and unmodified
|
||||
// for code aging to work properly.
|
||||
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
|
||||
// Load undefined value here, so the value is ready for the loop
|
||||
// below.
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
||||
// Adjust FP to point to saved FP.
|
||||
__ add(fp, sp, Operand(2 * kPointerSize));
|
||||
}
|
||||
frame_is_built_ = true;
|
||||
}
|
||||
|
||||
@ -163,18 +170,38 @@ bool LCodeGen::GeneratePrologue() {
|
||||
int slots = GetStackSlotCount();
|
||||
if (slots > 0) {
|
||||
if (FLAG_debug_code) {
|
||||
__ mov(r0, Operand(slots));
|
||||
__ mov(r2, Operand(kSlotsZapValue));
|
||||
__ sub(sp, sp, Operand(slots * kPointerSize));
|
||||
__ push(r0);
|
||||
__ push(r1);
|
||||
__ add(r0, sp, Operand(slots * kPointerSize));
|
||||
__ mov(r1, Operand(kSlotsZapValue));
|
||||
Label loop;
|
||||
__ bind(&loop);
|
||||
__ push(r2);
|
||||
__ sub(r0, r0, Operand(1), SetCC);
|
||||
__ sub(r0, r0, Operand(kPointerSize));
|
||||
__ str(r1, MemOperand(r0, 2 * kPointerSize));
|
||||
__ cmp(r0, sp);
|
||||
__ b(ne, &loop);
|
||||
__ pop(r1);
|
||||
__ pop(r0);
|
||||
} else {
|
||||
__ sub(sp, sp, Operand(slots * kPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
|
||||
CpuFeatures::Scope scope(VFP2);
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Possibly allocate a local context.
|
||||
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
|
||||
if (heap_slots > 0) {
|
||||
@ -2820,11 +2847,26 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ push(r0);
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
|
||||
CpuFeatures::Scope scope(VFP2);
|
||||
ASSERT(NeedsEagerFrame());
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
if (NeedsEagerFrame()) {
|
||||
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
|
||||
__ mov(sp, fp);
|
||||
__ ldm(ia_w, sp, fp.bit() | lr.bit());
|
||||
__ add(sp, sp, Operand(sp_delta));
|
||||
if (!info()->IsStub()) {
|
||||
__ add(sp, sp, Operand(sp_delta));
|
||||
}
|
||||
}
|
||||
__ Jump(lr);
|
||||
}
|
||||
@ -3587,8 +3629,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoContext(LContext* instr) {
|
||||
// If there is a non-return use, the context must be moved to a register.
|
||||
Register result = ToRegister(instr->result());
|
||||
__ mov(result, cp);
|
||||
for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
|
||||
if (!it.value()->IsReturn()) {
|
||||
__ mov(result, cp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -4507,7 +4555,6 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
||||
|
||||
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
Register object_reg = ToRegister(instr->object());
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
Register scratch = scratch0();
|
||||
|
||||
Handle<Map> from_map = instr->original_map();
|
||||
@ -4519,18 +4566,29 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
|
||||
__ cmp(scratch, Operand(from_map));
|
||||
__ b(ne, ¬_applicable);
|
||||
__ mov(new_map_reg, Operand(to_map));
|
||||
|
||||
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
__ mov(new_map_reg, Operand(to_map));
|
||||
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
|
||||
// Write barrier.
|
||||
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
|
||||
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
|
||||
} else if (FLAG_compiled_transitions) {
|
||||
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
||||
__ Move(r0, object_reg);
|
||||
__ Move(r1, to_map);
|
||||
TransitionElementsKindStub stub(from_kind, to_kind);
|
||||
__ CallStub(&stub);
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
||||
} else if (IsFastSmiElementsKind(from_kind) &&
|
||||
IsFastDoubleElementsKind(to_kind)) {
|
||||
Register fixed_object_reg = ToRegister(instr->temp());
|
||||
ASSERT(fixed_object_reg.is(r2));
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
ASSERT(new_map_reg.is(r3));
|
||||
__ mov(new_map_reg, Operand(to_map));
|
||||
__ mov(fixed_object_reg, object_reg);
|
||||
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
|
||||
RelocInfo::CODE_TARGET, instr);
|
||||
@ -4538,7 +4596,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
IsFastObjectElementsKind(to_kind)) {
|
||||
Register fixed_object_reg = ToRegister(instr->temp());
|
||||
ASSERT(fixed_object_reg.is(r2));
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
ASSERT(new_map_reg.is(r3));
|
||||
__ mov(new_map_reg, Operand(to_map));
|
||||
__ mov(fixed_object_reg, object_reg);
|
||||
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
|
||||
RelocInfo::CODE_TARGET, instr);
|
||||
@ -4549,6 +4609,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
||||
Register object = ToRegister(instr->object());
|
||||
Register temp = ToRegister(instr->temp());
|
||||
__ TestJSArrayForAllocationSiteInfo(object, temp);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
||||
__ push(ToRegister(instr->left()));
|
||||
__ push(ToRegister(instr->right()));
|
||||
@ -4885,6 +4953,58 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
Register temp1 = ToRegister(instr->temp());
|
||||
Register temp2 = ToRegister(instr->temp2());
|
||||
|
||||
bool convert_hole = false;
|
||||
HValue* change_input = instr->hydrogen()->value();
|
||||
if (change_input->IsLoadKeyed()) {
|
||||
HLoadKeyed* load = HLoadKeyed::cast(change_input);
|
||||
convert_hole = load->UsesMustHandleHole();
|
||||
}
|
||||
|
||||
Label no_special_nan_handling;
|
||||
Label done;
|
||||
if (convert_hole) {
|
||||
if (CpuFeatures::IsSupported(VFP2)) {
|
||||
CpuFeatures::Scope scope(VFP2);
|
||||
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
|
||||
__ VFPCompareAndSetFlags(input_reg, input_reg);
|
||||
__ b(vc, &no_special_nan_handling);
|
||||
__ vmov(reg, scratch0(), input_reg);
|
||||
__ cmp(scratch0(), Operand(kHoleNanUpper32));
|
||||
Label canonicalize;
|
||||
__ b(ne, &canonicalize);
|
||||
__ Move(reg, factory()->the_hole_value());
|
||||
__ b(&done);
|
||||
__ bind(&canonicalize);
|
||||
__ Vmov(input_reg,
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
|
||||
no_reg);
|
||||
} else {
|
||||
Label not_hole;
|
||||
__ cmp(sfpd_hi, Operand(kHoleNanUpper32));
|
||||
__ b(ne, ¬_hole);
|
||||
__ Move(reg, factory()->the_hole_value());
|
||||
__ b(&done);
|
||||
__ bind(¬_hole);
|
||||
__ and_(scratch, sfpd_hi, Operand(0x7ff00000));
|
||||
__ cmp(scratch, Operand(0x7ff00000));
|
||||
__ b(ne, &no_special_nan_handling);
|
||||
Label special_nan_handling;
|
||||
__ tst(sfpd_hi, Operand(0x000FFFFF));
|
||||
__ b(ne, &special_nan_handling);
|
||||
__ cmp(sfpd_lo, Operand(0));
|
||||
__ b(eq, &no_special_nan_handling);
|
||||
__ bind(&special_nan_handling);
|
||||
double canonical_nan =
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double();
|
||||
uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
|
||||
__ mov(sfpd_lo,
|
||||
Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
|
||||
__ mov(sfpd_hi,
|
||||
Operand(static_cast<uint32_t>(casted_nan >> 32)));
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(&no_special_nan_handling);
|
||||
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
|
||||
if (FLAG_inline_new) {
|
||||
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
||||
@ -4904,6 +5024,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
}
|
||||
// Now that we have finished with the object's real address tag it
|
||||
__ add(reg, reg, Operand(kHeapObjectTag));
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
@ -4945,7 +5066,8 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
DwVfpRegister result_reg,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env) {
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode) {
|
||||
Register scratch = scratch0();
|
||||
SwVfpRegister flt_scratch = double_scratch0().low();
|
||||
ASSERT(!result_reg.is(double_scratch0()));
|
||||
@ -4953,43 +5075,57 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
|
||||
Label load_smi, heap_number, done;
|
||||
|
||||
// Smi check.
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
||||
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
|
||||
// Smi check.
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
||||
|
||||
// Heap number map check.
|
||||
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
||||
__ cmp(scratch, Operand(ip));
|
||||
if (deoptimize_on_undefined) {
|
||||
DeoptimizeIf(ne, env);
|
||||
} else {
|
||||
Label heap_number;
|
||||
__ b(eq, &heap_number);
|
||||
// Heap number map check.
|
||||
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
||||
__ cmp(scratch, Operand(ip));
|
||||
if (deoptimize_on_undefined) {
|
||||
DeoptimizeIf(ne, env);
|
||||
} else {
|
||||
Label heap_number;
|
||||
__ b(eq, &heap_number);
|
||||
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
||||
__ cmp(input_reg, Operand(ip));
|
||||
DeoptimizeIf(ne, env);
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
||||
__ cmp(input_reg, Operand(ip));
|
||||
DeoptimizeIf(ne, env);
|
||||
|
||||
// Convert undefined to NaN.
|
||||
__ LoadRoot(ip, Heap::kNanValueRootIndex);
|
||||
__ sub(ip, ip, Operand(kHeapObjectTag));
|
||||
// Convert undefined to NaN.
|
||||
__ LoadRoot(ip, Heap::kNanValueRootIndex);
|
||||
__ sub(ip, ip, Operand(kHeapObjectTag));
|
||||
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&heap_number);
|
||||
}
|
||||
// Heap number to double register conversion.
|
||||
__ sub(ip, input_reg, Operand(kHeapObjectTag));
|
||||
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
|
||||
if (deoptimize_on_minus_zero) {
|
||||
__ vmov(ip, result_reg.low());
|
||||
__ cmp(ip, Operand::Zero());
|
||||
__ b(ne, &done);
|
||||
__ vmov(ip, result_reg.high());
|
||||
__ cmp(ip, Operand(HeapNumber::kSignMask));
|
||||
DeoptimizeIf(eq, env);
|
||||
}
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&heap_number);
|
||||
} else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
|
||||
__ SmiUntag(scratch, input_reg, SetCC);
|
||||
DeoptimizeIf(cs, env);
|
||||
} else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
||||
__ Vmov(result_reg,
|
||||
FixedDoubleArray::hole_nan_as_double(),
|
||||
no_reg);
|
||||
__ b(&done);
|
||||
} else {
|
||||
__ SmiUntag(scratch, input_reg);
|
||||
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
|
||||
}
|
||||
// Heap number to double register conversion.
|
||||
__ sub(ip, input_reg, Operand(kHeapObjectTag));
|
||||
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
|
||||
if (deoptimize_on_minus_zero) {
|
||||
__ vmov(ip, result_reg.low());
|
||||
__ cmp(ip, Operand::Zero());
|
||||
__ b(ne, &done);
|
||||
__ vmov(ip, result_reg.high());
|
||||
__ cmp(ip, Operand(HeapNumber::kSignMask));
|
||||
DeoptimizeIf(eq, env);
|
||||
}
|
||||
__ jmp(&done);
|
||||
|
||||
// Smi to double register conversion
|
||||
__ bind(&load_smi);
|
||||
@ -5117,10 +5253,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
||||
Register input_reg = ToRegister(input);
|
||||
DwVfpRegister result_reg = ToDoubleRegister(result);
|
||||
|
||||
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
|
||||
HValue* value = instr->hydrogen()->value();
|
||||
if (value->type().IsSmi()) {
|
||||
if (value->IsLoadKeyed()) {
|
||||
HLoadKeyed* load = HLoadKeyed::cast(value);
|
||||
if (load->UsesMustHandleHole()) {
|
||||
if (load->hole_mode() == ALLOW_RETURN_HOLE) {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
|
||||
} else {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
|
||||
}
|
||||
} else {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EmitNumberUntagD(input_reg, result_reg,
|
||||
instr->hydrogen()->deoptimize_on_undefined(),
|
||||
instr->hydrogen()->deoptimize_on_minus_zero(),
|
||||
instr->environment());
|
||||
instr->environment(),
|
||||
mode);
|
||||
}
|
||||
|
||||
|
||||
@ -5421,6 +5575,63 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoAllocate(LAllocate* instr) {
|
||||
class DeferredAllocate: public LDeferredCode {
|
||||
public:
|
||||
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
|
||||
: LDeferredCode(codegen), instr_(instr) { }
|
||||
virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
|
||||
virtual LInstruction* instr() { return instr_; }
|
||||
private:
|
||||
LAllocate* instr_;
|
||||
};
|
||||
|
||||
DeferredAllocate* deferred =
|
||||
new(zone()) DeferredAllocate(this, instr);
|
||||
|
||||
Register size = ToRegister(instr->size());
|
||||
Register result = ToRegister(instr->result());
|
||||
Register scratch = ToRegister(instr->temp1());
|
||||
Register scratch2 = ToRegister(instr->temp2());
|
||||
|
||||
HAllocate* original_instr = instr->hydrogen();
|
||||
if (original_instr->size()->IsConstant()) {
|
||||
UNREACHABLE();
|
||||
} else {
|
||||
// Allocate memory for the object.
|
||||
AllocationFlags flags = TAG_OBJECT;
|
||||
if (original_instr->MustAllocateDoubleAligned()) {
|
||||
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
|
||||
}
|
||||
__ AllocateInNewSpace(size,
|
||||
result,
|
||||
scratch,
|
||||
scratch2,
|
||||
deferred->entry(),
|
||||
TAG_OBJECT);
|
||||
}
|
||||
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
||||
Register size = ToRegister(instr->size());
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
// TODO(3095996): Get rid of this. For now, we need to make the
|
||||
// result register contain a valid pointer because it is already
|
||||
// contained in the register pointer map.
|
||||
__ mov(result, Operand(Smi::FromInt(0)));
|
||||
|
||||
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
|
||||
__ SmiTag(size, size);
|
||||
__ push(size);
|
||||
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
|
||||
__ StoreToSafepointRegisterSlot(r0, result);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
|
||||
Handle<FixedArray> literals(instr->environment()->closure()->literals());
|
||||
ElementsKind boilerplate_elements_kind =
|
||||
|
@ -59,6 +59,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
status_(UNUSED),
|
||||
translations_(info->zone()),
|
||||
deferred_(8, info->zone()),
|
||||
support_aligned_spilled_doubles_(false),
|
||||
osr_pc_offset_(-1),
|
||||
last_lazy_deopt_pc_(0),
|
||||
frame_is_built_(false),
|
||||
@ -138,6 +139,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
|
||||
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
|
||||
void DoDeferredAllocateObject(LAllocateObject* instr);
|
||||
void DoDeferredAllocate(LAllocate* instr);
|
||||
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
Label* map_check);
|
||||
|
||||
@ -321,7 +323,8 @@ class LCodeGen BASE_EMBEDDED {
|
||||
DwVfpRegister result,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env);
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode);
|
||||
|
||||
void DeoptIfTaggedButNotSmi(LEnvironment* environment,
|
||||
HValue* value,
|
||||
@ -415,6 +418,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
Status status_;
|
||||
TranslationBuffer translations_;
|
||||
ZoneList<LDeferredCode*> deferred_;
|
||||
bool support_aligned_spilled_doubles_;
|
||||
int osr_pc_offset_;
|
||||
int last_lazy_deopt_pc_;
|
||||
bool frame_is_built_;
|
||||
|
@ -884,6 +884,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
|
||||
|
||||
// Optionally save all double registers.
|
||||
if (save_doubles) {
|
||||
CpuFeatures::Scope scope(VFP2);
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
CheckFor32DRegs(ip);
|
||||
|
||||
@ -893,7 +894,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
|
||||
sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
|
||||
vstm(db_w, sp, d0, d15);
|
||||
// Note that d0 will be accessible at
|
||||
// fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
|
||||
// fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
|
||||
// since the sp slot and code slot were pushed after the fp.
|
||||
}
|
||||
|
||||
@ -948,9 +949,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
|
||||
Register argument_count) {
|
||||
// Optionally restore all double registers.
|
||||
if (save_doubles) {
|
||||
CpuFeatures::Scope scope(VFP2);
|
||||
// Calculate the stack location of the saved doubles and restore them.
|
||||
const int offset = 2 * kPointerSize;
|
||||
sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
|
||||
sub(r3, fp,
|
||||
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
|
||||
|
||||
// Check CPU flags for number of registers, setting the Z condition flag.
|
||||
CheckFor32DRegs(ip);
|
||||
@ -3903,8 +3906,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
|
||||
|
||||
void MacroAssembler::TestJSArrayForAllocationSiteInfo(
|
||||
Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* allocation_info_present) {
|
||||
Register scratch_reg) {
|
||||
Label no_info_available;
|
||||
ExternalReference new_space_start =
|
||||
ExternalReference::new_space_start(isolate());
|
||||
@ -3921,7 +3923,6 @@ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
|
||||
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
|
||||
cmp(scratch_reg,
|
||||
Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
|
||||
b(eq, allocation_info_present);
|
||||
bind(&no_info_available);
|
||||
}
|
||||
|
||||
|
@ -1319,10 +1319,9 @@ class MacroAssembler: public Assembler {
|
||||
// to another type.
|
||||
// On entry, receiver_reg should point to the array object.
|
||||
// scratch_reg gets clobbered.
|
||||
// If allocation info is present, jump to allocation_info_present
|
||||
// If allocation info is present, condition flags are set to eq
|
||||
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* allocation_info_present);
|
||||
Register scratch_reg);
|
||||
|
||||
private:
|
||||
void CallCFunctionHelper(Register function,
|
||||
|
@ -62,8 +62,10 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
|
||||
protected:
|
||||
virtual void BuildCodeStub() = 0;
|
||||
HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
|
||||
CompilationInfo* info() { return &info_; }
|
||||
HydrogenCodeStub* stub() { return info_.code_stub(); }
|
||||
HContext* context() { return context_; }
|
||||
Isolate* isolate() { return info_.isolate(); }
|
||||
|
||||
private:
|
||||
SmartArrayPointer<HParameter*> parameters_;
|
||||
@ -84,9 +86,6 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
|
||||
graph()->entry_block()->Finish(jump);
|
||||
set_current_block(next_block);
|
||||
|
||||
context_ = new(zone()) HContext();
|
||||
AddInstruction(context_);
|
||||
|
||||
int major_key = stub()->MajorKey();
|
||||
CodeStubInterfaceDescriptor* descriptor =
|
||||
info_.isolate()->code_stub_interface_descriptor(major_key);
|
||||
@ -95,6 +94,11 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
|
||||
}
|
||||
parameters_.Reset(new HParameter*[descriptor->register_param_count_]);
|
||||
|
||||
HConstant* undefined_constant = new(zone()) HConstant(
|
||||
isolate()->factory()->undefined_value(), Representation::Tagged());
|
||||
AddInstruction(undefined_constant);
|
||||
graph()->set_undefined_constant(undefined_constant);
|
||||
|
||||
HGraph* graph = this->graph();
|
||||
Zone* zone = this->zone();
|
||||
for (int i = 0; i < descriptor->register_param_count_; ++i) {
|
||||
@ -103,6 +107,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
|
||||
graph->start_environment()->Push(param);
|
||||
parameters_[i] = param;
|
||||
}
|
||||
|
||||
context_ = new(zone) HContext();
|
||||
AddInstruction(context_);
|
||||
|
||||
AddSimulate(BailoutId::StubEntry());
|
||||
|
||||
BuildCodeStub();
|
||||
@ -144,4 +152,101 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
void CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
|
||||
Zone* zone = this->zone();
|
||||
|
||||
HValue* js_array = GetParameter(0);
|
||||
HValue* map = GetParameter(1);
|
||||
|
||||
info()->MarkAsSavesCallerDoubles();
|
||||
|
||||
AddInstruction(new(zone) HTrapAllocationMemento(js_array));
|
||||
|
||||
HInstruction* array_length =
|
||||
AddInstruction(new(zone) HJSArrayLength(js_array,
|
||||
js_array,
|
||||
HType::Smi()));
|
||||
|
||||
Heap* heap = isolate()->heap();
|
||||
const int kMinFreeNewSpaceAfterGC =
|
||||
((heap->InitialSemiSpaceSize() - sizeof(FixedArrayBase)) / 2) /
|
||||
kDoubleSize;
|
||||
|
||||
HConstant* max_alloc_size =
|
||||
new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32());
|
||||
AddInstruction(max_alloc_size);
|
||||
AddInstruction(new(zone) HBoundsCheck(array_length, max_alloc_size));
|
||||
|
||||
current_block()->UpdateEnvironment(new(zone) HEnvironment(zone));
|
||||
|
||||
IfBuilder if_builder(this);
|
||||
|
||||
if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ);
|
||||
|
||||
// Nothing to do, just change the map.
|
||||
|
||||
if_builder.BeginFalse();
|
||||
|
||||
HInstruction* elements =
|
||||
AddInstruction(new(zone) HLoadElements(js_array, js_array));
|
||||
|
||||
HInstruction* elements_length =
|
||||
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
|
||||
|
||||
ElementsKind to_kind = casted_stub()->to_kind();
|
||||
HValue* new_elements =
|
||||
BuildAllocateElements(context(), to_kind, elements_length);
|
||||
|
||||
// Fast elements kinds need to be initialized in case statements below cause a
|
||||
// garbage collection.
|
||||
Factory* factory = isolate()->factory();
|
||||
|
||||
ASSERT(!IsFastSmiElementsKind(to_kind));
|
||||
double nan_double = FixedDoubleArray::hole_nan_as_double();
|
||||
HValue* hole = IsFastObjectElementsKind(to_kind)
|
||||
? AddInstruction(new(zone) HConstant(factory->the_hole_value(),
|
||||
Representation::Tagged()))
|
||||
: AddInstruction(new(zone) HConstant(nan_double,
|
||||
Representation::Double()));
|
||||
|
||||
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
|
||||
|
||||
HValue* zero = graph()->GetConstant0();
|
||||
HValue* start = IsFastElementsKind(to_kind) ? zero : array_length;
|
||||
HValue* key = builder.BeginBody(start, elements_length, Token::LT);
|
||||
|
||||
AddInstruction(new(zone) HStoreKeyed(new_elements, key, hole, to_kind));
|
||||
AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
|
||||
|
||||
builder.EndBody();
|
||||
|
||||
BuildCopyElements(context(), elements,
|
||||
casted_stub()->from_kind(), new_elements,
|
||||
to_kind, array_length);
|
||||
|
||||
AddInstruction(new(zone) HStoreNamedField(js_array,
|
||||
factory->elements_field_symbol(),
|
||||
new_elements, true,
|
||||
JSArray::kElementsOffset));
|
||||
AddSimulate(BailoutId::StubEntry());
|
||||
|
||||
if_builder.End();
|
||||
|
||||
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_symbol(),
|
||||
map, true, JSArray::kMapOffset));
|
||||
AddSimulate(BailoutId::StubEntry());
|
||||
|
||||
HReturn* ret = new(zone) HReturn(js_array, context());
|
||||
current_block()->Finish(ret);
|
||||
}
|
||||
|
||||
|
||||
Handle<Code> TransitionElementsKindStub::GenerateCode() {
|
||||
CodeStubGraphBuilder<TransitionElementsKindStub> builder(this);
|
||||
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
|
||||
return chunk->Codegen(Code::COMPILED_STUB);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
@ -77,6 +77,7 @@ namespace internal {
|
||||
V(DebuggerStatement) \
|
||||
V(StringDictionaryLookup) \
|
||||
V(ElementsTransitionAndStore) \
|
||||
V(TransitionElementsKind) \
|
||||
V(StoreArrayLiteralElement) \
|
||||
V(StubFailureTrampoline) \
|
||||
V(ProfileEntryHook)
|
||||
@ -1218,6 +1219,40 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
|
||||
};
|
||||
|
||||
|
||||
class TransitionElementsKindStub : public HydrogenCodeStub {
|
||||
public:
|
||||
TransitionElementsKindStub(ElementsKind from_kind,
|
||||
ElementsKind to_kind) {
|
||||
bit_field_ = FromKindBits::encode(from_kind) |
|
||||
ToKindBits::encode(to_kind);
|
||||
}
|
||||
|
||||
Major MajorKey() { return TransitionElementsKind; }
|
||||
int MinorKey() { return bit_field_; }
|
||||
|
||||
ElementsKind from_kind() const {
|
||||
return FromKindBits::decode(bit_field_);
|
||||
}
|
||||
|
||||
ElementsKind to_kind() const {
|
||||
return ToKindBits::decode(bit_field_);
|
||||
}
|
||||
|
||||
virtual Handle<Code> GenerateCode();
|
||||
|
||||
virtual void InitializeInterfaceDescriptor(
|
||||
Isolate* isolate,
|
||||
CodeStubInterfaceDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
class FromKindBits: public BitField<ElementsKind, 8, 8> {};
|
||||
class ToKindBits: public BitField<ElementsKind, 0, 8> {};
|
||||
uint32_t bit_field_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
|
||||
};
|
||||
|
||||
|
||||
class KeyedStoreElementStub : public PlatformCodeStub {
|
||||
public:
|
||||
KeyedStoreElementStub(bool is_js_array,
|
||||
|
@ -130,6 +130,14 @@ class CompilationInfo {
|
||||
return IsNonDeferredCalling::decode(flags_);
|
||||
}
|
||||
|
||||
void MarkAsSavesCallerDoubles() {
|
||||
flags_ |= SavesCallerDoubles::encode(true);
|
||||
}
|
||||
|
||||
bool saves_caller_doubles() const {
|
||||
return SavesCallerDoubles::decode(flags_);
|
||||
}
|
||||
|
||||
void SetFunction(FunctionLiteral* literal) {
|
||||
ASSERT(function_ == NULL);
|
||||
function_ = literal;
|
||||
@ -275,6 +283,8 @@ class CompilationInfo {
|
||||
class IsDeferredCalling: public BitField<bool, 10, 1> {};
|
||||
// If the compiled code contains calls that require building a frame
|
||||
class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
|
||||
// If the compiled code saves double caller registers that it clobbers.
|
||||
class SavesCallerDoubles: public BitField<bool, 12, 1> {};
|
||||
|
||||
|
||||
unsigned flags_;
|
||||
|
@ -557,7 +557,7 @@ class FrameDescription {
|
||||
uintptr_t frame_size_; // Number of bytes.
|
||||
JSFunction* function_;
|
||||
intptr_t registers_[Register::kNumRegisters];
|
||||
double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
|
||||
double double_registers_[DoubleRegister::kMaxNumRegisters];
|
||||
intptr_t top_;
|
||||
intptr_t pc_;
|
||||
intptr_t fp_;
|
||||
|
@ -158,6 +158,8 @@ DEFINE_implication(harmony_observation, harmony_collections)
|
||||
// Flags for experimental implementation features.
|
||||
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
|
||||
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
|
||||
DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
|
||||
"generate array elements transition stubs")
|
||||
DEFINE_bool(clever_optimizations,
|
||||
true,
|
||||
"Optimize object size, Array shift, DOM strings and string +")
|
||||
|
@ -224,6 +224,9 @@ namespace internal {
|
||||
V(illegal_execution_state_symbol, "illegal execution state") \
|
||||
V(get_symbol, "get") \
|
||||
V(set_symbol, "set") \
|
||||
V(map_field_symbol, "%map") \
|
||||
V(elements_field_symbol, "%elements") \
|
||||
V(length_field_symbol, "%length") \
|
||||
V(function_class_symbol, "Function") \
|
||||
V(illegal_argument_symbol, "illegal argument") \
|
||||
V(MakeReferenceError_symbol, "MakeReferenceError") \
|
||||
|
@ -2171,23 +2171,34 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) {
|
||||
}
|
||||
|
||||
|
||||
bool HLoadKeyed::RequiresHoleCheck() const {
|
||||
bool HLoadKeyed::UsesMustHandleHole() const {
|
||||
if (IsFastPackedElementsKind(elements_kind())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hole_mode() == ALLOW_RETURN_HOLE) return true;
|
||||
|
||||
if (IsFastDoubleElementsKind(elements_kind())) {
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
|
||||
HValue* use = it.value();
|
||||
if (!use->IsChange()) {
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool HLoadKeyed::RequiresHoleCheck() const {
|
||||
if (IsFastPackedElementsKind(elements_kind())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !UsesMustHandleHole();
|
||||
}
|
||||
|
||||
|
||||
@ -2461,6 +2472,11 @@ HType HAllocateObject::CalculateInferredType() {
|
||||
}
|
||||
|
||||
|
||||
HType HAllocate::CalculateInferredType() {
|
||||
return type_;
|
||||
}
|
||||
|
||||
|
||||
HType HFastLiteral::CalculateInferredType() {
|
||||
// TODO(mstarzinger): Be smarter, could also be JSArray here.
|
||||
return HType::JSObject();
|
||||
@ -2582,12 +2598,21 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
|
||||
|
||||
|
||||
bool HStoreKeyed::NeedsCanonicalization() {
|
||||
// If value is an integer or comes from the result of a keyed load
|
||||
// then it will be a non-hole value: no need for canonicalization.
|
||||
if (value()->IsLoadKeyed() ||
|
||||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
|
||||
// If value is an integer or smi or comes from the result of a keyed load or
|
||||
// constant then it is either be a non-hole value or in the case of a constant
|
||||
// the hole is only being stored explicitly: no need for canonicalization.
|
||||
if (value()->IsLoadKeyed() || value()->IsConstant()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (value()->IsChange()) {
|
||||
if (HChange::cast(value())->from().IsInteger32()) {
|
||||
return false;
|
||||
}
|
||||
if (HChange::cast(value())->value()->type().IsSmi()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -64,6 +64,7 @@ class LChunkBuilder;
|
||||
V(AbnormalExit) \
|
||||
V(AccessArgumentsAt) \
|
||||
V(Add) \
|
||||
V(Allocate) \
|
||||
V(AllocateObject) \
|
||||
V(ApplyArguments) \
|
||||
V(ArgumentsElements) \
|
||||
@ -179,6 +180,7 @@ class LChunkBuilder;
|
||||
V(Throw) \
|
||||
V(ToFastProperties) \
|
||||
V(TransitionElementsKind) \
|
||||
V(TrapAllocationMemento) \
|
||||
V(Typeof) \
|
||||
V(TypeofIsAndBranch) \
|
||||
V(UnaryMathOperation) \
|
||||
@ -4160,6 +4162,106 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
|
||||
};
|
||||
|
||||
|
||||
class HAllocateObject: public HTemplateInstruction<1> {
|
||||
public:
|
||||
HAllocateObject(HValue* context, Handle<JSFunction> constructor)
|
||||
: constructor_(constructor) {
|
||||
SetOperandAt(0, context);
|
||||
set_representation(Representation::Tagged());
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
// Maximum instance size for which allocations will be inlined.
|
||||
static const int kMaxSize = 64 * kPointerSize;
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
Handle<JSFunction> constructor() { return constructor_; }
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::Tagged();
|
||||
}
|
||||
virtual Handle<Map> GetMonomorphicJSObjectMap() {
|
||||
ASSERT(constructor()->has_initial_map());
|
||||
return Handle<Map>(constructor()->initial_map());
|
||||
}
|
||||
virtual HType CalculateInferredType();
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
|
||||
|
||||
private:
|
||||
// TODO(svenpanne) Might be safe, but leave it out until we know for sure.
|
||||
// virtual bool IsDeletable() const { return true; }
|
||||
|
||||
Handle<JSFunction> constructor_;
|
||||
};
|
||||
|
||||
|
||||
class HAllocate: public HTemplateInstruction<2> {
|
||||
public:
|
||||
enum Flags {
|
||||
CAN_ALLOCATE_IN_NEW_SPACE = 1 << 0,
|
||||
CAN_ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
|
||||
CAN_ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
|
||||
ALLOCATE_DOUBLE_ALIGNED = 1 << 3
|
||||
};
|
||||
|
||||
HAllocate(HValue* context, HValue* size, HType type, Flags flags)
|
||||
: type_(type),
|
||||
flags_(flags) {
|
||||
ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented
|
||||
ASSERT((flags & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) == 0); // unimplemented
|
||||
SetOperandAt(0, context);
|
||||
SetOperandAt(1, size);
|
||||
set_representation(Representation::Tagged());
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
HValue* size() { return OperandAt(1); }
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
if (index == 0) {
|
||||
return Representation::Tagged();
|
||||
} else {
|
||||
return Representation::Integer32();
|
||||
}
|
||||
}
|
||||
|
||||
virtual HType CalculateInferredType();
|
||||
|
||||
bool CanAllocateInNewSpace() const {
|
||||
return (flags_ & CAN_ALLOCATE_IN_NEW_SPACE) != 0;
|
||||
}
|
||||
|
||||
bool CanAllocateInOldDataSpace() const {
|
||||
return (flags_ & CAN_ALLOCATE_IN_OLD_DATA_SPACE) != 0;
|
||||
}
|
||||
|
||||
bool CanAllocateInOldPointerSpace() const {
|
||||
return (flags_ & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) != 0;
|
||||
}
|
||||
|
||||
bool CanAllocateInOldSpace() const {
|
||||
return CanAllocateInOldDataSpace() ||
|
||||
CanAllocateInOldPointerSpace();
|
||||
}
|
||||
|
||||
bool GuaranteedInNewSpace() const {
|
||||
return CanAllocateInNewSpace() && !CanAllocateInOldSpace();
|
||||
}
|
||||
|
||||
bool MustAllocateDoubleAligned() const {
|
||||
return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Allocate)
|
||||
|
||||
private:
|
||||
HType type_;
|
||||
Flags flags_;
|
||||
};
|
||||
|
||||
|
||||
inline bool StoringValueNeedsWriteBarrier(HValue* value) {
|
||||
return !value->type().IsBoolean()
|
||||
&& !value->type().IsSmi()
|
||||
@ -4169,8 +4271,13 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
|
||||
|
||||
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
|
||||
HValue* new_space_dominator) {
|
||||
return (!object->IsAllocateObject() && !object->IsFastLiteral()) ||
|
||||
(object != new_space_dominator);
|
||||
if (object != new_space_dominator) return true;
|
||||
if (object->IsFastLiteral()) return false;
|
||||
if (object->IsAllocateObject()) return false;
|
||||
if (object->IsAllocate()) {
|
||||
return !HAllocate::cast(object)->GuaranteedInNewSpace();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -4502,15 +4609,23 @@ class ArrayInstructionInterface {
|
||||
};
|
||||
|
||||
|
||||
enum LoadKeyedHoleMode {
|
||||
NEVER_RETURN_HOLE,
|
||||
ALLOW_RETURN_HOLE
|
||||
};
|
||||
|
||||
|
||||
class HLoadKeyed
|
||||
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
|
||||
public:
|
||||
HLoadKeyed(HValue* obj,
|
||||
HValue* key,
|
||||
HValue* dependency,
|
||||
ElementsKind elements_kind)
|
||||
ElementsKind elements_kind,
|
||||
LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
|
||||
: bit_field_(0) {
|
||||
bit_field_ = ElementsKindField::encode(elements_kind);
|
||||
bit_field_ = ElementsKindField::encode(elements_kind) |
|
||||
HoleModeField::encode(mode);
|
||||
|
||||
SetOperandAt(0, obj);
|
||||
SetOperandAt(1, key);
|
||||
@ -4523,8 +4638,7 @@ class HLoadKeyed
|
||||
IsFastDoubleElementsKind(elements_kind));
|
||||
|
||||
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
|
||||
if (IsFastSmiElementsKind(elements_kind) &&
|
||||
IsFastPackedElementsKind(elements_kind)) {
|
||||
if (IsFastSmiElementsKind(elements_kind)) {
|
||||
set_type(HType::Smi());
|
||||
}
|
||||
|
||||
@ -4573,6 +4687,9 @@ class HLoadKeyed
|
||||
ElementsKind elements_kind() const {
|
||||
return ElementsKindField::decode(bit_field_);
|
||||
}
|
||||
LoadKeyedHoleMode hole_mode() const {
|
||||
return HoleModeField::decode(bit_field_);
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
// kind_fast: tagged[int32] (none)
|
||||
@ -4595,6 +4712,7 @@ class HLoadKeyed
|
||||
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
bool UsesMustHandleHole() const;
|
||||
bool RequiresHoleCheck() const;
|
||||
|
||||
virtual Range* InferRange(Zone* zone);
|
||||
@ -4619,11 +4737,13 @@ class HLoadKeyed
|
||||
// Establish some checks around our packed fields
|
||||
enum LoadKeyedBits {
|
||||
kBitsForElementsKind = 5,
|
||||
kBitsForIndexOffset = 26,
|
||||
kBitsForHoleMode = 1,
|
||||
kBitsForIndexOffset = 25,
|
||||
kBitsForIsDehoisted = 1,
|
||||
|
||||
kStartElementsKind = 0,
|
||||
kStartIndexOffset = kStartElementsKind + kBitsForElementsKind,
|
||||
kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
|
||||
kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
|
||||
kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
|
||||
};
|
||||
|
||||
@ -4633,6 +4753,9 @@ class HLoadKeyed
|
||||
class ElementsKindField:
|
||||
public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
|
||||
{}; // NOLINT
|
||||
class HoleModeField:
|
||||
public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
|
||||
{}; // NOLINT
|
||||
class IndexOffsetField:
|
||||
public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
|
||||
{}; // NOLINT
|
||||
@ -4771,11 +4894,18 @@ class HStoreKeyed
|
||||
public:
|
||||
HStoreKeyed(HValue* obj, HValue* key, HValue* val,
|
||||
ElementsKind elements_kind)
|
||||
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
|
||||
: elements_kind_(elements_kind),
|
||||
index_offset_(0),
|
||||
is_dehoisted_(false),
|
||||
new_space_dominator_(NULL) {
|
||||
SetOperandAt(0, obj);
|
||||
SetOperandAt(1, key);
|
||||
SetOperandAt(2, val);
|
||||
|
||||
if (IsFastObjectElementsKind(elements_kind)) {
|
||||
SetFlag(kTrackSideEffectDominators);
|
||||
SetGVNFlag(kDependsOnNewSpacePromotion);
|
||||
}
|
||||
if (is_external()) {
|
||||
SetGVNFlag(kChangesSpecializedArrayElements);
|
||||
} else if (IsFastDoubleElementsKind(elements_kind)) {
|
||||
@ -4843,11 +4973,19 @@ class HStoreKeyed
|
||||
bool IsDehoisted() { return is_dehoisted_; }
|
||||
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
|
||||
|
||||
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
|
||||
ASSERT(side_effect == kChangesNewSpacePromotion);
|
||||
new_space_dominator_ = dominator;
|
||||
}
|
||||
|
||||
HValue* new_space_dominator() const { return new_space_dominator_; }
|
||||
|
||||
bool NeedsWriteBarrier() {
|
||||
if (value_is_smi()) {
|
||||
return false;
|
||||
} else {
|
||||
return StoringValueNeedsWriteBarrier(value());
|
||||
return StoringValueNeedsWriteBarrier(value()) &&
|
||||
ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator());
|
||||
}
|
||||
}
|
||||
|
||||
@ -4861,6 +4999,7 @@ class HStoreKeyed
|
||||
ElementsKind elements_kind_;
|
||||
uint32_t index_offset_;
|
||||
bool is_dehoisted_;
|
||||
HValue* new_space_dominator_;
|
||||
};
|
||||
|
||||
|
||||
@ -4899,9 +5038,10 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
|
||||
};
|
||||
|
||||
|
||||
class HTransitionElementsKind: public HTemplateInstruction<1> {
|
||||
class HTransitionElementsKind: public HTemplateInstruction<2> {
|
||||
public:
|
||||
HTransitionElementsKind(HValue* object,
|
||||
HTransitionElementsKind(HValue* context,
|
||||
HValue* object,
|
||||
Handle<Map> original_map,
|
||||
Handle<Map> transitioned_map)
|
||||
: original_map_(original_map),
|
||||
@ -4909,6 +5049,7 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
|
||||
from_kind_(original_map->elements_kind()),
|
||||
to_kind_(transitioned_map->elements_kind()) {
|
||||
SetOperandAt(0, object);
|
||||
SetOperandAt(1, context);
|
||||
SetFlag(kUseGVN);
|
||||
SetGVNFlag(kChangesElementsKind);
|
||||
if (original_map->has_fast_double_elements()) {
|
||||
@ -4927,6 +5068,7 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
|
||||
}
|
||||
|
||||
HValue* object() { return OperandAt(0); }
|
||||
HValue* context() { return OperandAt(1); }
|
||||
Handle<Map> original_map() { return original_map_; }
|
||||
Handle<Map> transitioned_map() { return transitioned_map_; }
|
||||
ElementsKind from_kind() { return from_kind_; }
|
||||
@ -5079,40 +5221,6 @@ class HStringLength: public HUnaryOperation {
|
||||
};
|
||||
|
||||
|
||||
class HAllocateObject: public HTemplateInstruction<1> {
|
||||
public:
|
||||
HAllocateObject(HValue* context, Handle<JSFunction> constructor)
|
||||
: constructor_(constructor) {
|
||||
SetOperandAt(0, context);
|
||||
set_representation(Representation::Tagged());
|
||||
SetGVNFlag(kChangesNewSpacePromotion);
|
||||
}
|
||||
|
||||
// Maximum instance size for which allocations will be inlined.
|
||||
static const int kMaxSize = 64 * kPointerSize;
|
||||
|
||||
HValue* context() { return OperandAt(0); }
|
||||
Handle<JSFunction> constructor() { return constructor_; }
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::Tagged();
|
||||
}
|
||||
virtual Handle<Map> GetMonomorphicJSObjectMap() {
|
||||
ASSERT(constructor()->has_initial_map());
|
||||
return Handle<Map>(constructor()->initial_map());
|
||||
}
|
||||
virtual HType CalculateInferredType();
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
|
||||
|
||||
private:
|
||||
// TODO(svenpanne) Might be safe, but leave it out until we know for sure.
|
||||
// virtual bool IsDeletable() const { return true; }
|
||||
|
||||
Handle<JSFunction> constructor_;
|
||||
};
|
||||
|
||||
|
||||
template <int V>
|
||||
class HMaterializedLiteral: public HTemplateInstruction<V> {
|
||||
public:
|
||||
@ -5346,6 +5454,22 @@ class HTypeof: public HTemplateInstruction<2> {
|
||||
};
|
||||
|
||||
|
||||
class HTrapAllocationMemento : public HTemplateInstruction<1> {
|
||||
public:
|
||||
explicit HTrapAllocationMemento(HValue* obj) {
|
||||
SetOperandAt(0, obj);
|
||||
}
|
||||
|
||||
virtual Representation RequiredInputRepresentation(int index) {
|
||||
return Representation::Tagged();
|
||||
}
|
||||
|
||||
HValue* object() { return OperandAt(0); }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento)
|
||||
};
|
||||
|
||||
|
||||
class HToFastProperties: public HUnaryOperation {
|
||||
public:
|
||||
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
|
||||
|
306
src/hydrogen.cc
306
src/hydrogen.cc
@ -221,8 +221,9 @@ void HBasicBlock::SetJoinId(BailoutId ast_id) {
|
||||
HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
|
||||
// We only need to verify the ID once.
|
||||
ASSERT(i != 0 ||
|
||||
predecessor->last_environment()->closure()->shared()
|
||||
->VerifyBailoutId(ast_id));
|
||||
(predecessor->last_environment()->closure().is_null() ||
|
||||
predecessor->last_environment()->closure()->shared()
|
||||
->VerifyBailoutId(ast_id)));
|
||||
simulate->set_ast_id(ast_id);
|
||||
}
|
||||
}
|
||||
@ -602,6 +603,11 @@ HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
|
||||
}
|
||||
|
||||
|
||||
HConstant* HGraph::GetConstant0() {
|
||||
return GetConstantInt32(&constant_0_, 0);
|
||||
}
|
||||
|
||||
|
||||
HConstant* HGraph::GetConstant1() {
|
||||
return GetConstantInt32(&constant_1_, 1);
|
||||
}
|
||||
@ -627,6 +633,128 @@ HConstant* HGraph::GetConstantHole() {
|
||||
}
|
||||
|
||||
|
||||
HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, BailoutId id)
|
||||
: builder_(builder),
|
||||
finished_(false),
|
||||
id_(id) {
|
||||
HEnvironment* env = builder->environment();
|
||||
HEnvironment* true_env = env->Copy();
|
||||
HEnvironment* false_env = env->Copy();
|
||||
HEnvironment* merge_env = env->Copy();
|
||||
true_block_ = builder->CreateBasicBlock(true_env);
|
||||
false_block_ = builder->CreateBasicBlock(false_env);
|
||||
merge_block_ = builder->CreateBasicBlock(merge_env);
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::IfBuilder::BeginTrue(HValue* left,
|
||||
HValue* right,
|
||||
Token::Value token) {
|
||||
HCompareIDAndBranch* compare =
|
||||
new(zone()) HCompareIDAndBranch(left, right, token);
|
||||
compare->ChangeRepresentation(Representation::Integer32());
|
||||
compare->SetSuccessorAt(0, true_block_);
|
||||
compare->SetSuccessorAt(1, false_block_);
|
||||
builder_->current_block()->Finish(compare);
|
||||
builder_->set_current_block(true_block_);
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::IfBuilder::BeginFalse() {
|
||||
builder_->current_block()->Goto(merge_block_);
|
||||
builder_->set_current_block(false_block_);
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::IfBuilder::End() {
|
||||
ASSERT(!finished_);
|
||||
builder_->current_block()->Goto(merge_block_);
|
||||
builder_->set_current_block(merge_block_);
|
||||
merge_block_->SetJoinId(id_);
|
||||
finished_ = true;
|
||||
}
|
||||
|
||||
|
||||
HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
|
||||
HValue* context,
|
||||
LoopBuilder::Direction direction,
|
||||
BailoutId id)
|
||||
: builder_(builder),
|
||||
context_(context),
|
||||
direction_(direction),
|
||||
id_(id),
|
||||
finished_(false) {
|
||||
HEnvironment* env = builder_->environment();
|
||||
HEnvironment* body_env = env->Copy();
|
||||
HEnvironment* exit_env = env->Copy();
|
||||
header_block_ = builder->CreateLoopHeaderBlock();
|
||||
body_block_ = builder->CreateBasicBlock(body_env);
|
||||
exit_block_ = builder->CreateBasicBlock(exit_env);
|
||||
}
|
||||
|
||||
|
||||
HValue* HGraphBuilder::LoopBuilder::BeginBody(HValue* initial,
|
||||
HValue* terminating,
|
||||
Token::Value token) {
|
||||
phi_ = new(zone()) HPhi(0, zone());
|
||||
header_block_->AddPhi(phi_);
|
||||
phi_->AddInput(initial);
|
||||
phi_->ChangeRepresentation(Representation::Integer32());
|
||||
HEnvironment* env = builder_->environment();
|
||||
env->Push(initial);
|
||||
builder_->current_block()->Goto(header_block_);
|
||||
builder_->set_current_block(header_block_);
|
||||
|
||||
builder_->set_current_block(header_block_);
|
||||
HCompareIDAndBranch* compare =
|
||||
new(zone()) HCompareIDAndBranch(phi_, terminating, token);
|
||||
compare->ChangeRepresentation(Representation::Integer32());
|
||||
compare->SetSuccessorAt(0, body_block_);
|
||||
compare->SetSuccessorAt(1, exit_block_);
|
||||
builder_->current_block()->Finish(compare);
|
||||
|
||||
builder_->set_current_block(body_block_);
|
||||
if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
|
||||
HValue* one = builder_->graph()->GetConstant1();
|
||||
if (direction_ == kPreIncrement) {
|
||||
increment_ = new(zone()) HAdd(context_, phi_, one);
|
||||
} else {
|
||||
increment_ = new(zone()) HSub(context_, phi_, one);
|
||||
}
|
||||
increment_->ClearFlag(HValue::kCanOverflow);
|
||||
increment_->ChangeRepresentation(Representation::Integer32());
|
||||
builder_->AddInstruction(increment_);
|
||||
return increment_;
|
||||
} else {
|
||||
return phi_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::LoopBuilder::EndBody() {
|
||||
ASSERT(!finished_);
|
||||
|
||||
if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
|
||||
HValue* one = builder_->graph()->GetConstant1();
|
||||
if (direction_ == kPostIncrement) {
|
||||
increment_ = new(zone()) HAdd(context_, phi_, one);
|
||||
} else {
|
||||
increment_ = new(zone()) HSub(context_, phi_, one);
|
||||
}
|
||||
increment_->ClearFlag(HValue::kCanOverflow);
|
||||
increment_->ChangeRepresentation(Representation::Integer32());
|
||||
builder_->AddInstruction(increment_);
|
||||
}
|
||||
|
||||
builder_->environment()->Push(increment_);
|
||||
builder_->current_block()->Goto(header_block_);
|
||||
header_block_->loop_information()->RegisterBackEdge(body_block_);
|
||||
header_block_->SetJoinId(BailoutId::StubEntry());
|
||||
builder_->set_current_block(exit_block_);
|
||||
finished_ = true;
|
||||
}
|
||||
|
||||
|
||||
HGraph* HGraphBuilder::CreateGraph() {
|
||||
graph_ = new(zone()) HGraph(info_);
|
||||
if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
|
||||
@ -651,6 +779,22 @@ void HGraphBuilder::AddSimulate(BailoutId id,
|
||||
}
|
||||
|
||||
|
||||
HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
|
||||
HBasicBlock* b = graph()->CreateBasicBlock();
|
||||
b->SetInitialEnvironment(env);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
|
||||
HBasicBlock* header = graph()->CreateBasicBlock();
|
||||
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
|
||||
header->SetInitialEnvironment(entry_env);
|
||||
header->AttachLoopInformation();
|
||||
return header;
|
||||
}
|
||||
|
||||
|
||||
HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
|
||||
HValue* external_elements,
|
||||
HValue* checked_key,
|
||||
@ -799,6 +943,109 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
|
||||
}
|
||||
|
||||
|
||||
HValue* HGraphBuilder::BuildAllocateElements(HContext* context,
|
||||
ElementsKind kind,
|
||||
HValue* capacity) {
|
||||
Zone* zone = this->zone();
|
||||
|
||||
int elements_size = IsFastDoubleElementsKind(kind)
|
||||
? kDoubleSize : kPointerSize;
|
||||
HConstant* elements_size_value =
|
||||
new(zone) HConstant(elements_size, Representation::Integer32());
|
||||
AddInstruction(elements_size_value);
|
||||
HValue* mul = AddInstruction(
|
||||
new(zone) HMul(context, capacity, elements_size_value));
|
||||
mul->ChangeRepresentation(Representation::Integer32());
|
||||
mul->ClearFlag(HValue::kCanOverflow);
|
||||
|
||||
HConstant* header_size =
|
||||
new(zone) HConstant(FixedArray::kHeaderSize, Representation::Integer32());
|
||||
AddInstruction(header_size);
|
||||
HValue* total_size = AddInstruction(
|
||||
new(zone) HAdd(context, mul, header_size));
|
||||
total_size->ChangeRepresentation(Representation::Integer32());
|
||||
total_size->ClearFlag(HValue::kCanOverflow);
|
||||
|
||||
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
|
||||
if (IsFastDoubleElementsKind(kind)) {
|
||||
flags = static_cast<HAllocate::Flags>(
|
||||
flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
|
||||
}
|
||||
|
||||
HValue* elements =
|
||||
AddInstruction(new(zone) HAllocate(context, total_size,
|
||||
HType::JSArray(), flags));
|
||||
Isolate* isolate = graph()->isolate();
|
||||
|
||||
Factory* factory = isolate->factory();
|
||||
Handle<Map> map = IsFastDoubleElementsKind(kind)
|
||||
? factory->fixed_double_array_map()
|
||||
: factory->fixed_array_map();
|
||||
BuildStoreMap(elements, map, BailoutId::StubEntry());
|
||||
|
||||
Handle<String> fixed_array_length_field_name =
|
||||
isolate->factory()->length_field_symbol();
|
||||
HInstruction* store_length =
|
||||
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
|
||||
capacity, true, FixedArray::kLengthOffset);
|
||||
AddInstruction(store_length);
|
||||
AddSimulate(BailoutId::StubEntry(), FIXED_SIMULATE);
|
||||
|
||||
return elements;
|
||||
}
|
||||
|
||||
|
||||
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
|
||||
HValue* map,
|
||||
BailoutId id) {
|
||||
Zone* zone = this->zone();
|
||||
Isolate* isolate = graph()->isolate();
|
||||
Factory* factory = isolate->factory();
|
||||
Handle<String> map_field_name = factory->map_field_symbol();
|
||||
HInstruction* store_map =
|
||||
new(zone) HStoreNamedField(object, map_field_name, map,
|
||||
true, JSObject::kMapOffset);
|
||||
store_map->SetGVNFlag(kChangesMaps);
|
||||
AddInstruction(store_map);
|
||||
AddSimulate(id, FIXED_SIMULATE);
|
||||
return store_map;
|
||||
}
|
||||
|
||||
|
||||
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
|
||||
Handle<Map> map,
|
||||
BailoutId id) {
|
||||
Zone* zone = this->zone();
|
||||
HValue* map_constant =
|
||||
AddInstruction(new(zone) HConstant(map, Representation::Tagged()));
|
||||
return BuildStoreMap(object, map_constant, id);
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::BuildCopyElements(HContext* context,
|
||||
HValue* from_elements,
|
||||
ElementsKind from_elements_kind,
|
||||
HValue* to_elements,
|
||||
ElementsKind to_elements_kind,
|
||||
HValue* length) {
|
||||
LoopBuilder builder(this, context, LoopBuilder::kPostIncrement);
|
||||
|
||||
HValue* key = builder.BeginBody(graph()->GetConstant0(),
|
||||
length, Token::LT);
|
||||
|
||||
HValue* element =
|
||||
AddInstruction(new(zone()) HLoadKeyed(from_elements, key, NULL,
|
||||
from_elements_kind,
|
||||
ALLOW_RETURN_HOLE));
|
||||
|
||||
AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element,
|
||||
to_elements_kind));
|
||||
AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
|
||||
|
||||
builder.EndBody();
|
||||
}
|
||||
|
||||
|
||||
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
|
||||
TypeFeedbackOracle* oracle)
|
||||
: HGraphBuilder(info),
|
||||
@ -2258,7 +2505,7 @@ void HGlobalValueNumberer::ProcessLoopBlock(
|
||||
|
||||
|
||||
bool HGlobalValueNumberer::AllowCodeMotion() {
|
||||
return info()->opt_count() + 1 < FLAG_max_opt_count;
|
||||
return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
|
||||
}
|
||||
|
||||
|
||||
@ -3510,6 +3757,23 @@ bool HOptimizedGraphBuilder::BuildGraph() {
|
||||
}
|
||||
|
||||
|
||||
void HGraph::GlobalValueNumbering() {
|
||||
// Perform common subexpression elimination and loop-invariant code motion.
|
||||
if (FLAG_use_gvn) {
|
||||
HPhase phase("H_Global value numbering", this);
|
||||
HGlobalValueNumberer gvn(this, info());
|
||||
bool removed_side_effects = gvn.Analyze();
|
||||
// Trigger a second analysis pass to further eliminate duplicate values that
|
||||
// could only be discovered by removing side-effect-generating instructions
|
||||
// during the first pass.
|
||||
if (FLAG_smi_only_arrays && removed_side_effects) {
|
||||
removed_side_effects = gvn.Analyze();
|
||||
ASSERT(!removed_side_effects);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
|
||||
*bailout_reason = SmartArrayPointer<char>();
|
||||
OrderBlocks();
|
||||
@ -3563,19 +3827,7 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
|
||||
|
||||
Canonicalize();
|
||||
|
||||
// Perform common subexpression elimination and loop-invariant code motion.
|
||||
if (FLAG_use_gvn) {
|
||||
HPhase phase("H_Global value numbering", this);
|
||||
HGlobalValueNumberer gvn(this, info());
|
||||
bool removed_side_effects = gvn.Analyze();
|
||||
// Trigger a second analysis pass to further eliminate duplicate values that
|
||||
// could only be discovered by removing side-effect-generating instructions
|
||||
// during the first pass.
|
||||
if (FLAG_smi_only_arrays && removed_side_effects) {
|
||||
removed_side_effects = gvn.Analyze();
|
||||
ASSERT(!removed_side_effects);
|
||||
}
|
||||
}
|
||||
GlobalValueNumbering();
|
||||
|
||||
if (FLAG_use_range) {
|
||||
HRangeAnalysis rangeAnalysis(this);
|
||||
@ -4201,22 +4453,6 @@ void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
|
||||
}
|
||||
|
||||
|
||||
HBasicBlock* HOptimizedGraphBuilder::CreateBasicBlock(HEnvironment* env) {
|
||||
HBasicBlock* b = graph()->CreateBasicBlock();
|
||||
b->SetInitialEnvironment(env);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
|
||||
HBasicBlock* header = graph()->CreateBasicBlock();
|
||||
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
|
||||
header->SetInitialEnvironment(entry_env);
|
||||
header->AttachLoopInformation();
|
||||
return header;
|
||||
}
|
||||
|
||||
|
||||
void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
|
||||
ASSERT(!HasStackOverflow());
|
||||
ASSERT(current_block() != NULL);
|
||||
@ -6538,8 +6774,9 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
|
||||
ASSERT(Map::IsValidElementsTransition(
|
||||
map->elements_kind(),
|
||||
transition_target.at(i)->elements_kind()));
|
||||
HValue* context = environment()->LookupContext();
|
||||
transition = new(zone()) HTransitionElementsKind(
|
||||
object, map, transition_target.at(i));
|
||||
context, object, map, transition_target.at(i));
|
||||
AddInstruction(transition);
|
||||
} else {
|
||||
type_todo[map->elements_kind()] = true;
|
||||
@ -9791,7 +10028,7 @@ HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
|
||||
: values_(0, zone),
|
||||
frame_type_(JS_FUNCTION),
|
||||
parameter_count_(0),
|
||||
specials_count_(1),
|
||||
specials_count_(0),
|
||||
local_count_(0),
|
||||
outer_(NULL),
|
||||
entry_(NULL),
|
||||
@ -9846,6 +10083,7 @@ void HEnvironment::Initialize(const HEnvironment* other) {
|
||||
entry_ = other->entry_;
|
||||
pop_count_ = other->pop_count_;
|
||||
push_count_ = other->push_count_;
|
||||
specials_count_ = other->specials_count_;
|
||||
ast_id_ = other->ast_id_;
|
||||
}
|
||||
|
||||
|
@ -263,6 +263,7 @@ class HGraph: public ZoneObject {
|
||||
void MarkDeoptimizeOnUndefined();
|
||||
void ComputeMinusZeroChecks();
|
||||
void ComputeSafeUint32Operations();
|
||||
void GlobalValueNumbering();
|
||||
bool ProcessArgumentsObject();
|
||||
void EliminateRedundantPhis();
|
||||
void EliminateUnreachablePhis();
|
||||
@ -291,6 +292,7 @@ class HGraph: public ZoneObject {
|
||||
undefined_constant_.set(constant);
|
||||
}
|
||||
HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
|
||||
HConstant* GetConstant0();
|
||||
HConstant* GetConstant1();
|
||||
HConstant* GetConstantMinus1();
|
||||
HConstant* GetConstantTrue();
|
||||
@ -411,6 +413,7 @@ class HGraph: public ZoneObject {
|
||||
ZoneList<HPhi*>* phi_list_;
|
||||
ZoneList<HInstruction*>* uint32_instructions_;
|
||||
SetOncePointer<HConstant> undefined_constant_;
|
||||
SetOncePointer<HConstant> constant_0_;
|
||||
SetOncePointer<HConstant> constant_1_;
|
||||
SetOncePointer<HConstant> constant_minus1_;
|
||||
SetOncePointer<HConstant> constant_true_;
|
||||
@ -876,6 +879,9 @@ class HGraphBuilder {
|
||||
protected:
|
||||
virtual bool BuildGraph() = 0;
|
||||
|
||||
HBasicBlock* CreateBasicBlock(HEnvironment* env);
|
||||
HBasicBlock* CreateLoopHeaderBlock();
|
||||
|
||||
// Building common constructs
|
||||
HInstruction* BuildExternalArrayElementAccess(
|
||||
HValue* external_elements,
|
||||
@ -903,6 +909,78 @@ class HGraphBuilder {
|
||||
bool is_store,
|
||||
Representation checked_index_representation = Representation::None());
|
||||
|
||||
HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
|
||||
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);
|
||||
|
||||
class IfBuilder {
|
||||
public:
|
||||
IfBuilder(HGraphBuilder* builder,
|
||||
BailoutId id = BailoutId::StubEntry());
|
||||
~IfBuilder() {
|
||||
if (!finished_) End();
|
||||
}
|
||||
|
||||
void BeginTrue(HValue* left, HValue* right, Token::Value token);
|
||||
void BeginFalse();
|
||||
void End();
|
||||
|
||||
private:
|
||||
HGraphBuilder* builder_;
|
||||
bool finished_;
|
||||
HBasicBlock* true_block_;
|
||||
HBasicBlock* false_block_;
|
||||
HBasicBlock* merge_block_;
|
||||
BailoutId id_;
|
||||
|
||||
Zone* zone() { return builder_->zone(); }
|
||||
};
|
||||
|
||||
class LoopBuilder {
|
||||
public:
|
||||
enum Direction {
|
||||
kPreIncrement,
|
||||
kPostIncrement,
|
||||
kPreDecrement,
|
||||
kPostDecrement
|
||||
};
|
||||
|
||||
LoopBuilder(HGraphBuilder* builder,
|
||||
HValue* context,
|
||||
Direction direction,
|
||||
BailoutId id = BailoutId::StubEntry());
|
||||
~LoopBuilder() {
|
||||
ASSERT(finished_);
|
||||
}
|
||||
|
||||
HValue* BeginBody(HValue* initial, HValue* terminating, Token::Value token);
|
||||
void EndBody();
|
||||
|
||||
private:
|
||||
HGraphBuilder* builder_;
|
||||
HValue* context_;
|
||||
HInstruction* increment_;
|
||||
HPhi* phi_;
|
||||
HBasicBlock* header_block_;
|
||||
HBasicBlock* body_block_;
|
||||
HBasicBlock* exit_block_;
|
||||
Direction direction_;
|
||||
BailoutId id_;
|
||||
bool finished_;
|
||||
|
||||
Zone* zone() { return builder_->zone(); }
|
||||
};
|
||||
|
||||
HValue* BuildAllocateElements(HContext* context,
|
||||
ElementsKind kind,
|
||||
HValue* capacity);
|
||||
|
||||
void BuildCopyElements(HContext* context,
|
||||
HValue* from_elements,
|
||||
ElementsKind from_elements_kind,
|
||||
HValue* to_elements,
|
||||
ElementsKind to_elements_kind,
|
||||
HValue* length);
|
||||
|
||||
private:
|
||||
HGraphBuilder();
|
||||
CompilationInfo* info_;
|
||||
@ -1135,9 +1213,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
|
||||
AST_NODE_LIST(DECLARE_VISIT)
|
||||
#undef DECLARE_VISIT
|
||||
|
||||
HBasicBlock* CreateBasicBlock(HEnvironment* env);
|
||||
HBasicBlock* CreateLoopHeaderBlock();
|
||||
|
||||
// Helpers for flow graph construction.
|
||||
enum GlobalPropertyAccess {
|
||||
kUseCell,
|
||||
|
@ -142,6 +142,7 @@ inline Register Register::FromAllocationIndex(int index) {
|
||||
|
||||
|
||||
struct IntelDoubleRegister {
|
||||
static const int kMaxNumRegisters = 8;
|
||||
static const int kMaxNumAllocatableRegisters = 7;
|
||||
static int NumAllocatableRegisters();
|
||||
static int NumRegisters();
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "isolate.h"
|
||||
#include "jsregexp.h"
|
||||
#include "regexp-macro-assembler.h"
|
||||
#include "runtime.h"
|
||||
#include "stub-cache.h"
|
||||
#include "codegen.h"
|
||||
|
||||
@ -52,6 +53,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
|
||||
}
|
||||
|
||||
|
||||
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate,
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
static Register registers[] = { eax, ebx };
|
||||
descriptor->register_param_count_ = 2;
|
||||
descriptor->register_params_ = registers;
|
||||
descriptor->deoptimization_handler_ =
|
||||
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
|
||||
}
|
||||
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ToNumberStub::Generate(MacroAssembler* masm) {
|
||||
|
@ -401,8 +401,8 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
// -----------------------------------
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
ASSERT(allocation_site_info_found != NULL);
|
||||
masm->TestJSArrayForAllocationSiteInfo(edx, edi,
|
||||
allocation_site_info_found);
|
||||
__ TestJSArrayForAllocationSiteInfo(edx, edi);
|
||||
__ j(equal, allocation_site_info_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
@ -429,7 +429,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
masm->TestJSArrayForAllocationSiteInfo(edx, edi, fail);
|
||||
__ TestJSArrayForAllocationSiteInfo(edx, edi);
|
||||
__ j(equal, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
@ -568,7 +569,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map, success;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
masm->TestJSArrayForAllocationSiteInfo(edx, edi, fail);
|
||||
__ TestJSArrayForAllocationSiteInfo(edx, edi);
|
||||
__ j(equal, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
|
@ -644,6 +644,11 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
|
||||
output_frame->SetRegister(ebp.code(), value);
|
||||
output_frame->SetFp(value);
|
||||
|
||||
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
|
||||
double double_value = input_->GetDoubleRegister(i);
|
||||
output_frame->SetDoubleRegister(i, double_value);
|
||||
}
|
||||
|
||||
intptr_t handler =
|
||||
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
|
||||
output_frame->SetRegister(eax.code(), descriptor->register_param_count_);
|
||||
@ -1263,15 +1268,13 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
__ cmp(eax, edx);
|
||||
__ j(below, &outer_push_loop);
|
||||
|
||||
// In case of OSR, we have to restore the XMM registers.
|
||||
if (type() == OSR) {
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
|
||||
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
|
||||
int src_offset = i * kDoubleSize + double_regs_offset;
|
||||
__ movdbl(xmm_reg, Operand(ebx, src_offset));
|
||||
}
|
||||
// In case of OSR or a failed STUB, we have to restore the XMM registers.
|
||||
if (CpuFeatures::IsSupported(SSE2)) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
|
||||
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
|
||||
int src_offset = i * kDoubleSize + double_regs_offset;
|
||||
__ movdbl(xmm_reg, Operand(ebx, src_offset));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,6 +77,8 @@ bool LCodeGen::GenerateCode() {
|
||||
// the frame (that is done in GeneratePrologue).
|
||||
FrameScope frame_scope(masm_, StackFrame::MANUAL);
|
||||
|
||||
support_aligned_spilled_doubles_ = info()->IsOptimizing();
|
||||
|
||||
dynamic_frame_alignment_ = info()->IsOptimizing() &&
|
||||
((chunk()->num_double_slots() > 2 &&
|
||||
!chunk()->graph()->is_recursive()) ||
|
||||
@ -153,7 +155,7 @@ bool LCodeGen::GeneratePrologue() {
|
||||
__ bind(&ok);
|
||||
}
|
||||
|
||||
if (dynamic_frame_alignment_) {
|
||||
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
|
||||
// Move state of dynamic frame alignment into edx.
|
||||
__ mov(edx, Immediate(kNoAlignmentPadding));
|
||||
|
||||
@ -212,12 +214,16 @@ bool LCodeGen::GeneratePrologue() {
|
||||
}
|
||||
} else {
|
||||
if (FLAG_debug_code) {
|
||||
__ sub(Operand(esp), Immediate(slots * kPointerSize));
|
||||
__ push(eax);
|
||||
__ mov(Operand(eax), Immediate(slots));
|
||||
Label loop;
|
||||
__ bind(&loop);
|
||||
__ push(Immediate(kSlotsZapValue));
|
||||
__ mov(MemOperand(esp, eax, times_4, 0),
|
||||
Immediate(kSlotsZapValue));
|
||||
__ dec(eax);
|
||||
__ j(not_zero, &loop);
|
||||
__ pop(eax);
|
||||
} else {
|
||||
__ sub(Operand(esp), Immediate(slots * kPointerSize));
|
||||
#ifdef _MSC_VER
|
||||
@ -233,15 +239,29 @@ bool LCodeGen::GeneratePrologue() {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Store dynamic frame alignment state in the first local.
|
||||
if (dynamic_frame_alignment_) {
|
||||
__ mov(Operand(ebp,
|
||||
JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
|
||||
edx);
|
||||
} else {
|
||||
__ mov(Operand(ebp,
|
||||
JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
|
||||
Immediate(kNoAlignmentPadding));
|
||||
if (support_aligned_spilled_doubles_) {
|
||||
Comment(";;; Store dynamic frame alignment tag for spilled doubles");
|
||||
// Store dynamic frame alignment state in the first local.
|
||||
int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
|
||||
if (dynamic_frame_alignment_) {
|
||||
__ mov(Operand(ebp, offset), edx);
|
||||
} else {
|
||||
__ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ movdbl(MemOperand(esp, count * kDoubleSize),
|
||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -740,10 +760,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
||||
int argc,
|
||||
LInstruction* instr,
|
||||
LOperand* context) {
|
||||
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
|
||||
if (context->IsRegister()) {
|
||||
if (!ToRegister(context).is(esi)) {
|
||||
__ mov(esi, ToRegister(context));
|
||||
@ -757,6 +774,13 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
||||
int argc,
|
||||
LInstruction* instr,
|
||||
LOperand* context) {
|
||||
LoadContextFromDeferred(context);
|
||||
|
||||
__ CallRuntimeSaveDoubles(id);
|
||||
RecordSafepointWithRegisters(
|
||||
@ -2644,6 +2668,19 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||
ASSERT(NeedsEagerFrame());
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(esp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
if (dynamic_frame_alignment_) {
|
||||
// Fetch the state of the dynamic frame alignment.
|
||||
__ mov(edx, Operand(ebp,
|
||||
@ -4305,9 +4342,16 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
||||
Register object = ToRegister(instr->object());
|
||||
Register temp = ToRegister(instr->temp());
|
||||
__ TestJSArrayForAllocationSiteInfo(object, temp);
|
||||
DeoptimizeIf(equal, instr->environment());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
Register object_reg = ToRegister(instr->object());
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
|
||||
Handle<Map> from_map = instr->original_map();
|
||||
Handle<Map> to_map = instr->transitioned_map();
|
||||
@ -4322,7 +4366,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
|
||||
__ j(not_equal, ¬_applicable, branch_distance);
|
||||
if (is_simple_map_transition) {
|
||||
Register object_reg = ToRegister(instr->object());
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
Handle<Map> map = instr->hydrogen()->transitioned_map();
|
||||
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
|
||||
Immediate(map));
|
||||
@ -4331,8 +4375,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
|
||||
ToRegister(instr->temp()),
|
||||
kDontSaveFPRegs);
|
||||
} else if (FLAG_compiled_transitions) {
|
||||
PushSafepointRegistersScope scope(this);
|
||||
if (!object_reg.is(eax)) {
|
||||
__ push(object_reg);
|
||||
}
|
||||
LoadContextFromDeferred(instr->context());
|
||||
if (!object_reg.is(eax)) {
|
||||
__ pop(eax);
|
||||
}
|
||||
__ mov(ebx, to_map);
|
||||
TransitionElementsKindStub stub(from_kind, to_kind);
|
||||
__ CallStub(&stub);
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
||||
} else if (IsFastSmiElementsKind(from_kind) &&
|
||||
IsFastDoubleElementsKind(to_kind)) {
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
__ mov(new_map_reg, to_map);
|
||||
Register fixed_object_reg = ToRegister(instr->temp());
|
||||
ASSERT(fixed_object_reg.is(edx));
|
||||
@ -4342,6 +4401,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
RelocInfo::CODE_TARGET, instr);
|
||||
} else if (IsFastDoubleElementsKind(from_kind) &&
|
||||
IsFastObjectElementsKind(to_kind)) {
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
__ mov(new_map_reg, to_map);
|
||||
Register fixed_object_reg = ToRegister(instr->temp());
|
||||
ASSERT(fixed_object_reg.is(edx));
|
||||
@ -4638,6 +4698,62 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
|
||||
Register reg = ToRegister(instr->result());
|
||||
|
||||
bool convert_hole = false;
|
||||
HValue* change_input = instr->hydrogen()->value();
|
||||
if (change_input->IsLoadKeyed()) {
|
||||
HLoadKeyed* load = HLoadKeyed::cast(change_input);
|
||||
convert_hole = load->UsesMustHandleHole();
|
||||
}
|
||||
|
||||
Label no_special_nan_handling;
|
||||
Label done;
|
||||
if (convert_hole) {
|
||||
bool use_sse2 = CpuFeatures::IsSupported(SSE2);
|
||||
if (use_sse2) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
||||
__ ucomisd(input_reg, input_reg);
|
||||
} else {
|
||||
if (!IsX87TopOfStack(instr->value())) {
|
||||
__ fld_d(ToOperand(instr->value()));
|
||||
}
|
||||
__ fld(0);
|
||||
__ fld(0);
|
||||
__ FCmp();
|
||||
}
|
||||
|
||||
__ j(parity_odd, &no_special_nan_handling);
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
if (use_sse2) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
||||
__ movdbl(MemOperand(esp, 0), input_reg);
|
||||
} else {
|
||||
__ fld(0);
|
||||
__ fstp_d(MemOperand(esp, 0));
|
||||
}
|
||||
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
|
||||
Immediate(kHoleNanUpper32));
|
||||
Label canonicalize;
|
||||
__ j(not_equal, &canonicalize);
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ mov(reg, factory()->the_hole_value());
|
||||
__ jmp(&done);
|
||||
__ bind(&canonicalize);
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
ExternalReference nan =
|
||||
ExternalReference::address_of_canonical_non_hole_nan();
|
||||
if (use_sse2) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
||||
__ movdbl(input_reg, Operand::StaticVariable(nan));
|
||||
} else {
|
||||
__ fstp(0);
|
||||
__ fld_d(Operand::StaticVariable(nan));
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(&no_special_nan_handling);
|
||||
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
|
||||
if (FLAG_inline_new) {
|
||||
Register tmp = ToRegister(instr->temp());
|
||||
@ -4656,6 +4772,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
}
|
||||
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
||||
}
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
@ -4706,44 +4823,59 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
XMMRegister result_reg,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env) {
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode) {
|
||||
Label load_smi, done;
|
||||
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
|
||||
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
|
||||
|
||||
// Heap number map check.
|
||||
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
||||
factory()->heap_number_map());
|
||||
if (deoptimize_on_undefined) {
|
||||
DeoptimizeIf(not_equal, env);
|
||||
} else {
|
||||
Label heap_number;
|
||||
__ j(equal, &heap_number, Label::kNear);
|
||||
// Heap number map check.
|
||||
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
||||
factory()->heap_number_map());
|
||||
if (deoptimize_on_undefined) {
|
||||
DeoptimizeIf(not_equal, env);
|
||||
} else {
|
||||
Label heap_number;
|
||||
__ j(equal, &heap_number, Label::kNear);
|
||||
|
||||
__ cmp(input_reg, factory()->undefined_value());
|
||||
DeoptimizeIf(not_equal, env);
|
||||
__ cmp(input_reg, factory()->undefined_value());
|
||||
DeoptimizeIf(not_equal, env);
|
||||
|
||||
// Convert undefined to NaN.
|
||||
ExternalReference nan =
|
||||
ExternalReference::address_of_canonical_non_hole_nan();
|
||||
__ movdbl(result_reg, Operand::StaticVariable(nan));
|
||||
// Convert undefined to NaN.
|
||||
ExternalReference nan =
|
||||
ExternalReference::address_of_canonical_non_hole_nan();
|
||||
__ movdbl(result_reg, Operand::StaticVariable(nan));
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
__ bind(&heap_number);
|
||||
}
|
||||
// Heap number to XMM conversion.
|
||||
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
||||
if (deoptimize_on_minus_zero) {
|
||||
XMMRegister xmm_scratch = xmm0;
|
||||
__ xorps(xmm_scratch, xmm_scratch);
|
||||
__ ucomisd(result_reg, xmm_scratch);
|
||||
__ j(not_zero, &done, Label::kNear);
|
||||
__ movmskpd(temp_reg, result_reg);
|
||||
__ test_b(temp_reg, 1);
|
||||
DeoptimizeIf(not_zero, env);
|
||||
}
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
__ bind(&heap_number);
|
||||
} else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
|
||||
__ test(input_reg, Immediate(kSmiTagMask));
|
||||
DeoptimizeIf(not_equal, env);
|
||||
} else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
|
||||
__ test(input_reg, Immediate(kSmiTagMask));
|
||||
__ j(zero, &load_smi);
|
||||
ExternalReference hole_nan_reference =
|
||||
ExternalReference::address_of_the_hole_nan();
|
||||
__ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
|
||||
__ jmp(&done, Label::kNear);
|
||||
} else {
|
||||
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
|
||||
}
|
||||
// Heap number to XMM conversion.
|
||||
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
||||
if (deoptimize_on_minus_zero) {
|
||||
XMMRegister xmm_scratch = xmm0;
|
||||
__ xorps(xmm_scratch, xmm_scratch);
|
||||
__ ucomisd(result_reg, xmm_scratch);
|
||||
__ j(not_zero, &done, Label::kNear);
|
||||
__ movmskpd(temp_reg, result_reg);
|
||||
__ test_b(temp_reg, 1);
|
||||
DeoptimizeIf(not_zero, env);
|
||||
}
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// Smi to XMM conversion
|
||||
__ bind(&load_smi);
|
||||
@ -4889,12 +5021,30 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
||||
instr->hydrogen()->deoptimize_on_minus_zero();
|
||||
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
|
||||
|
||||
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
|
||||
HValue* value = instr->hydrogen()->value();
|
||||
if (value->type().IsSmi()) {
|
||||
if (value->IsLoadKeyed()) {
|
||||
HLoadKeyed* load = HLoadKeyed::cast(value);
|
||||
if (load->UsesMustHandleHole()) {
|
||||
if (load->hole_mode() == ALLOW_RETURN_HOLE) {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
|
||||
} else {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
|
||||
}
|
||||
} else {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EmitNumberUntagD(input_reg,
|
||||
temp_reg,
|
||||
result_reg,
|
||||
instr->hydrogen()->deoptimize_on_undefined(),
|
||||
deoptimize_on_minus_zero,
|
||||
instr->environment());
|
||||
instr->environment(),
|
||||
mode);
|
||||
} else {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -5287,6 +5437,60 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoAllocate(LAllocate* instr) {
|
||||
class DeferredAllocate: public LDeferredCode {
|
||||
public:
|
||||
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
|
||||
: LDeferredCode(codegen), instr_(instr) { }
|
||||
virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
|
||||
virtual LInstruction* instr() { return instr_; }
|
||||
private:
|
||||
LAllocate* instr_;
|
||||
};
|
||||
|
||||
DeferredAllocate* deferred =
|
||||
new(zone()) DeferredAllocate(this, instr);
|
||||
|
||||
Register size = ToRegister(instr->size());
|
||||
Register result = ToRegister(instr->result());
|
||||
Register temp = ToRegister(instr->temp());
|
||||
|
||||
HAllocate* original_instr = instr->hydrogen();
|
||||
if (original_instr->size()->IsConstant()) {
|
||||
UNREACHABLE();
|
||||
} else {
|
||||
// Allocate memory for the object.
|
||||
AllocationFlags flags = TAG_OBJECT;
|
||||
if (original_instr->MustAllocateDoubleAligned()) {
|
||||
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
|
||||
}
|
||||
__ AllocateInNewSpace(size, result, temp, no_reg,
|
||||
deferred->entry(), flags);
|
||||
}
|
||||
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
||||
Register size = ToRegister(instr->size());
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
__ SmiTag(size);
|
||||
PushSafepointRegistersScope scope(this);
|
||||
// TODO(3095996): Get rid of this. For now, we need to make the
|
||||
// result register contain a valid pointer because it is already
|
||||
// contained in the register pointer map.
|
||||
if (!size.is(result)) {
|
||||
__ StoreToSafepointRegisterSlot(result, size);
|
||||
}
|
||||
__ push(size);
|
||||
CallRuntimeFromDeferred(
|
||||
Runtime::kAllocateInNewSpace, 1, instr, instr->context());
|
||||
__ StoreToSafepointRegisterSlot(result, eax);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
|
||||
ASSERT(ToRegister(instr->context()).is(esi));
|
||||
Handle<FixedArray> literals(instr->environment()->closure()->literals());
|
||||
|
@ -63,6 +63,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
translations_(info->zone()),
|
||||
deferred_(8, info->zone()),
|
||||
dynamic_frame_alignment_(false),
|
||||
support_aligned_spilled_doubles_(false),
|
||||
osr_pc_offset_(-1),
|
||||
last_lazy_deopt_pc_(0),
|
||||
frame_is_built_(false),
|
||||
@ -133,6 +134,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
|
||||
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
|
||||
void DoDeferredAllocateObject(LAllocateObject* instr);
|
||||
void DoDeferredAllocate(LAllocate* instr);
|
||||
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
Label* map_check);
|
||||
|
||||
@ -232,6 +234,8 @@ class LCodeGen BASE_EMBEDDED {
|
||||
LInstruction* instr,
|
||||
LOperand* context);
|
||||
|
||||
void LoadContextFromDeferred(LOperand* context);
|
||||
|
||||
enum EDIState {
|
||||
EDI_UNINITIALIZED,
|
||||
EDI_CONTAINS_TARGET
|
||||
@ -301,12 +305,14 @@ class LCodeGen BASE_EMBEDDED {
|
||||
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
|
||||
void EmitGoto(int block);
|
||||
void EmitBranch(int left_block, int right_block, Condition cc);
|
||||
void EmitNumberUntagD(Register input,
|
||||
Register temp,
|
||||
XMMRegister result,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env);
|
||||
void EmitNumberUntagD(
|
||||
Register input,
|
||||
Register temp,
|
||||
XMMRegister result,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
|
||||
|
||||
void DeoptIfTaggedButNotSmi(LEnvironment* environment,
|
||||
HValue* value,
|
||||
@ -394,6 +400,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
TranslationBuffer translations_;
|
||||
ZoneList<LDeferredCode*> deferred_;
|
||||
bool dynamic_frame_alignment_;
|
||||
bool support_aligned_spilled_doubles_;
|
||||
int osr_pc_offset_;
|
||||
int last_lazy_deopt_pc_;
|
||||
bool frame_is_built_;
|
||||
|
@ -2158,26 +2158,44 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoTransitionElementsKind(
|
||||
HTransitionElementsKind* instr) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
LOperand* new_map_reg = TempRegister();
|
||||
LOperand* temp_reg = TempRegister();
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
|
||||
return DefineSameAsFirst(result);
|
||||
new(zone()) LTransitionElementsKind(object, NULL,
|
||||
new_map_reg, temp_reg);
|
||||
return result;
|
||||
} else if (FLAG_compiled_transitions) {
|
||||
LOperand* context = UseRegister(instr->context());
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
|
||||
return AssignPointerMap(result);
|
||||
} else {
|
||||
LOperand* object = UseFixed(instr->object(), eax);
|
||||
LOperand* fixed_object_reg = FixedTemp(edx);
|
||||
LOperand* new_map_reg = FixedTemp(ebx);
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object,
|
||||
NULL,
|
||||
new_map_reg,
|
||||
fixed_object_reg);
|
||||
return MarkAsCall(DefineFixed(result, eax), instr);
|
||||
return MarkAsCall(result, instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
|
||||
HTrapAllocationMemento* instr) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
LOperand* temp = TempRegister();
|
||||
LTrapAllocationMemento* result =
|
||||
new(zone()) LTrapAllocationMemento(object, temp);
|
||||
return AssignEnvironment(result);
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
|
||||
bool needs_write_barrier = instr->NeedsWriteBarrier();
|
||||
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
|
||||
@ -2256,13 +2274,24 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), esi);
|
||||
info()->MarkAsDeferredCalling();
|
||||
LOperand* context = UseAny(instr->context());
|
||||
LOperand* temp = TempRegister();
|
||||
LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
|
||||
info()->MarkAsDeferredCalling();
|
||||
LOperand* context = UseAny(instr->context());
|
||||
LOperand* size = UseTempRegister(instr->size());
|
||||
LOperand* temp = TempRegister();
|
||||
LAllocate* result = new(zone()) LAllocate(context, size, temp);
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
|
||||
LOperand* context = UseFixed(instr->context(), esi);
|
||||
return MarkAsCall(
|
||||
|
@ -43,6 +43,7 @@ class LCodeGen;
|
||||
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
|
||||
V(AccessArgumentsAt) \
|
||||
V(AddI) \
|
||||
V(Allocate) \
|
||||
V(AllocateObject) \
|
||||
V(ApplyArguments) \
|
||||
V(ArgumentsElements) \
|
||||
@ -168,6 +169,7 @@ class LCodeGen;
|
||||
V(Throw) \
|
||||
V(ToFastProperties) \
|
||||
V(TransitionElementsKind) \
|
||||
V(TrapAllocationMemento) \
|
||||
V(Typeof) \
|
||||
V(TypeofIsAndBranch) \
|
||||
V(UnaryMathOperation) \
|
||||
@ -1466,7 +1468,8 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
|
||||
}
|
||||
|
||||
virtual bool ClobbersDoubleRegisters() const {
|
||||
return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
|
||||
return !CpuFeatures::IsSupported(SSE2) &&
|
||||
!IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
|
||||
@ -1642,6 +1645,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
|
||||
class LContext: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Context)
|
||||
};
|
||||
|
||||
|
||||
@ -1906,6 +1910,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Change)
|
||||
};
|
||||
|
||||
|
||||
@ -2091,16 +2096,19 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
|
||||
};
|
||||
|
||||
|
||||
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
|
||||
class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
|
||||
public:
|
||||
LTransitionElementsKind(LOperand* object,
|
||||
LOperand* context,
|
||||
LOperand* new_map_temp,
|
||||
LOperand* temp) {
|
||||
inputs_[0] = object;
|
||||
inputs_[1] = context;
|
||||
temps_[0] = new_map_temp;
|
||||
temps_[1] = temp;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[1]; }
|
||||
LOperand* object() { return inputs_[0]; }
|
||||
LOperand* new_map_temp() { return temps_[0]; }
|
||||
LOperand* temp() { return temps_[1]; }
|
||||
@ -2118,6 +2126,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
|
||||
};
|
||||
|
||||
|
||||
class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
|
||||
public:
|
||||
LTrapAllocationMemento(LOperand* object,
|
||||
LOperand* temp) {
|
||||
inputs_[0] = object;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
LOperand* object() { return inputs_[0]; }
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
|
||||
"trap-allocation-memento")
|
||||
};
|
||||
|
||||
|
||||
class LStringAdd: public LTemplateInstruction<1, 3, 0> {
|
||||
public:
|
||||
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
|
||||
@ -2315,6 +2339,23 @@ class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
|
||||
};
|
||||
|
||||
|
||||
class LAllocate: public LTemplateInstruction<1, 2, 1> {
|
||||
public:
|
||||
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = size;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* size() { return inputs_[1]; }
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Allocate)
|
||||
};
|
||||
|
||||
|
||||
class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
|
||||
public:
|
||||
explicit LFastLiteral(LOperand* context) {
|
||||
|
@ -1386,8 +1386,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
||||
Register scratch,
|
||||
Label* gc_required,
|
||||
AllocationFlags flags) {
|
||||
ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
|
||||
SIZE_IN_WORDS)) == 0);
|
||||
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
@ -1406,6 +1405,19 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
||||
// Load address of new object into result.
|
||||
LoadAllocationTopHelper(result, scratch, flags);
|
||||
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// always safe because the limit of the heap is always aligned.
|
||||
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
||||
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
||||
Label aligned;
|
||||
test(result, Immediate(kDoubleAlignmentMask));
|
||||
j(zero, &aligned, Label::kNear);
|
||||
mov(Operand(result, 0),
|
||||
Immediate(isolate()->factory()->one_pointer_filler_map()));
|
||||
add(result, Immediate(kDoubleSize / 2));
|
||||
bind(&aligned);
|
||||
}
|
||||
|
||||
// Calculate new top and bail out if new space is exhausted.
|
||||
ExternalReference new_space_allocation_limit =
|
||||
ExternalReference::new_space_allocation_limit_address(isolate());
|
||||
@ -1419,7 +1431,8 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
||||
|
||||
// Tag result if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
lea(result, Operand(result, kHeapObjectTag));
|
||||
ASSERT(kHeapObjectTag == 1);
|
||||
inc(result);
|
||||
}
|
||||
|
||||
// Update allocation top.
|
||||
@ -3052,8 +3065,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
|
||||
|
||||
void MacroAssembler::TestJSArrayForAllocationSiteInfo(
|
||||
Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* allocation_info_present) {
|
||||
Register scratch_reg) {
|
||||
Label no_info_available;
|
||||
|
||||
ExternalReference new_space_start =
|
||||
@ -3069,7 +3081,6 @@ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
|
||||
j(greater, &no_info_available);
|
||||
cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
|
||||
Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
|
||||
j(equal, allocation_info_present);
|
||||
bind(&no_info_available);
|
||||
}
|
||||
|
||||
|
@ -864,10 +864,9 @@ class MacroAssembler: public Assembler {
|
||||
// to another type.
|
||||
// On entry, receiver_reg should point to the array object.
|
||||
// scratch_reg gets clobbered.
|
||||
// If allocation info is present, jump to allocation_info_present
|
||||
// If allocation info is present, conditional code is set to equal
|
||||
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* allocation_info_present);
|
||||
Register scratch_reg);
|
||||
|
||||
private:
|
||||
bool generating_stub_;
|
||||
|
@ -144,6 +144,21 @@ void UseIterator::Advance() {
|
||||
: input_iterator_.Advance();
|
||||
}
|
||||
|
||||
|
||||
void LAllocator::SetLiveRangeAssignedRegister(
|
||||
LiveRange* range,
|
||||
int reg,
|
||||
RegisterKind register_kind,
|
||||
Zone* zone) {
|
||||
if (register_kind == DOUBLE_REGISTERS) {
|
||||
assigned_double_registers_->Add(reg);
|
||||
} else {
|
||||
assigned_registers_->Add(reg);
|
||||
}
|
||||
range->set_assigned_register(reg, register_kind, zone);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_LITHIUM_ALLOCATOR_INL_H_
|
||||
|
@ -643,7 +643,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
|
||||
if (result == NULL) {
|
||||
result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
|
||||
ASSERT(result->IsFixed());
|
||||
result->set_assigned_register(index, GENERAL_REGISTERS, zone_);
|
||||
SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS, zone_);
|
||||
fixed_live_ranges_[index] = result;
|
||||
}
|
||||
return result;
|
||||
@ -656,7 +656,7 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
|
||||
if (result == NULL) {
|
||||
result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
|
||||
ASSERT(result->IsFixed());
|
||||
result->set_assigned_register(index, DOUBLE_REGISTERS, zone_);
|
||||
SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS, zone_);
|
||||
fixed_double_live_ranges_[index] = result;
|
||||
}
|
||||
return result;
|
||||
@ -1066,6 +1066,13 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
|
||||
bool LAllocator::Allocate(LChunk* chunk) {
|
||||
ASSERT(chunk_ == NULL);
|
||||
chunk_ = static_cast<LPlatformChunk*>(chunk);
|
||||
assigned_registers_ =
|
||||
new(zone()) BitVector(Register::NumAllocatableRegisters(), zone());
|
||||
assigned_registers_->Clear();
|
||||
assigned_double_registers_ =
|
||||
new(zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
|
||||
zone());
|
||||
assigned_double_registers_->Clear();
|
||||
MeetRegisterConstraints();
|
||||
if (!AllocationOk()) return false;
|
||||
ResolvePhis();
|
||||
@ -1808,7 +1815,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
|
||||
TraceAlloc("Assigning preferred reg %s to live range %d\n",
|
||||
RegisterName(register_index),
|
||||
current->id());
|
||||
current->set_assigned_register(register_index, mode_, zone_);
|
||||
SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1844,7 +1851,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
|
||||
TraceAlloc("Assigning free reg %s to live range %d\n",
|
||||
RegisterName(reg),
|
||||
current->id());
|
||||
current->set_assigned_register(reg, mode_, zone_);
|
||||
SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1934,7 +1941,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
|
||||
TraceAlloc("Assigning blocked reg %s to live range %d\n",
|
||||
RegisterName(reg),
|
||||
current->id());
|
||||
current->set_assigned_register(reg, mode_, zone_);
|
||||
SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
|
||||
|
||||
// This register was not free. Thus we need to find and spill
|
||||
// parts of active and inactive live regions that use the same register
|
||||
|
@ -445,6 +445,13 @@ class LAllocator BASE_EMBEDDED {
|
||||
void Verify() const;
|
||||
#endif
|
||||
|
||||
BitVector* assigned_registers() {
|
||||
return assigned_registers_;
|
||||
}
|
||||
BitVector* assigned_double_registers() {
|
||||
return assigned_double_registers_;
|
||||
}
|
||||
|
||||
private:
|
||||
void MeetRegisterConstraints();
|
||||
void ResolvePhis();
|
||||
@ -537,6 +544,11 @@ class LAllocator BASE_EMBEDDED {
|
||||
HBasicBlock* block,
|
||||
HBasicBlock* pred);
|
||||
|
||||
inline void SetLiveRangeAssignedRegister(LiveRange* range,
|
||||
int reg,
|
||||
RegisterKind register_kind,
|
||||
Zone* zone);
|
||||
|
||||
// Return parallel move that should be used to connect ranges split at the
|
||||
// given position.
|
||||
LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
|
||||
@ -591,6 +603,9 @@ class LAllocator BASE_EMBEDDED {
|
||||
RegisterKind mode_;
|
||||
int num_registers_;
|
||||
|
||||
BitVector* assigned_registers_;
|
||||
BitVector* assigned_double_registers_;
|
||||
|
||||
HGraph* graph_;
|
||||
|
||||
bool has_osr_entry_;
|
||||
|
@ -257,6 +257,16 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
|
||||
}
|
||||
|
||||
|
||||
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
|
||||
: spill_slot_count_(0),
|
||||
info_(info),
|
||||
graph_(graph),
|
||||
instructions_(32, graph->zone()),
|
||||
pointer_maps_(8, graph->zone()),
|
||||
inlined_closures_(1, graph->zone()) {
|
||||
}
|
||||
|
||||
|
||||
LLabel* LChunk::GetLabel(int block_id) const {
|
||||
HBasicBlock* block = graph_->blocks()->at(block_id);
|
||||
int first_instruction = block->first_instruction_index();
|
||||
@ -410,6 +420,9 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
chunk->set_allocated_double_registers(
|
||||
allocator.assigned_double_registers());
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
@ -463,4 +476,22 @@ void LChunk::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
|
||||
allocated_double_registers_ = allocated_registers;
|
||||
BitVector* doubles = allocated_double_registers();
|
||||
BitVector::Iterator iterator(doubles);
|
||||
while (!iterator.Done()) {
|
||||
if (info()->saves_caller_doubles()) {
|
||||
if (kDoubleSize == kPointerSize * 2) {
|
||||
spill_slot_count_ += 2;
|
||||
} else {
|
||||
spill_slot_count_++;
|
||||
}
|
||||
}
|
||||
iterator.Advance();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
@ -686,14 +686,13 @@ class LChunk: public ZoneObject {
|
||||
|
||||
Handle<Code> Codegen(Code::Kind kind);
|
||||
|
||||
void set_allocated_double_registers(BitVector* allocated_registers);
|
||||
BitVector* allocated_double_registers() {
|
||||
return allocated_double_registers_;
|
||||
}
|
||||
|
||||
protected:
|
||||
LChunk(CompilationInfo* info, HGraph* graph)
|
||||
: spill_slot_count_(0),
|
||||
info_(info),
|
||||
graph_(graph),
|
||||
instructions_(32, graph->zone()),
|
||||
pointer_maps_(8, graph->zone()),
|
||||
inlined_closures_(1, graph->zone()) { }
|
||||
LChunk(CompilationInfo* info, HGraph* graph);
|
||||
|
||||
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
|
||||
|
||||
@ -702,6 +701,7 @@ class LChunk: public ZoneObject {
|
||||
private:
|
||||
CompilationInfo* info_;
|
||||
HGraph* const graph_;
|
||||
BitVector* allocated_double_registers_;
|
||||
ZoneList<LInstruction*> instructions_;
|
||||
ZoneList<LPointerMap*> pointer_maps_;
|
||||
ZoneList<Handle<JSFunction> > inlined_closures_;
|
||||
@ -710,6 +710,13 @@ class LChunk: public ZoneObject {
|
||||
|
||||
int ElementsKindToShiftSize(ElementsKind elements_kind);
|
||||
|
||||
enum NumberUntagDMode {
|
||||
NUMBER_CANDIDATE_IS_SMI,
|
||||
NUMBER_CANDIDATE_IS_SMI_OR_HOLE,
|
||||
NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE,
|
||||
NUMBER_CANDIDATE_IS_ANY_TAGGED
|
||||
};
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
|
@ -4377,6 +4377,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
|
||||
HandleScope scope(isolate);
|
||||
RUNTIME_ASSERT(args.length() == 2);
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
|
||||
CONVERT_ARG_HANDLE_CHECKED(Map, map, 1);
|
||||
JSObject::TransitionElementsKind(array, map->elements_kind());
|
||||
return *array;
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
|
||||
NoHandleAllocation ha;
|
||||
RUNTIME_ASSERT(args.length() == 1);
|
||||
|
@ -409,6 +409,7 @@ namespace internal {
|
||||
F(HasExternalFloatElements, 1, 1) \
|
||||
F(HasExternalDoubleElements, 1, 1) \
|
||||
F(HasFastProperties, 1, 1) \
|
||||
F(TransitionElementsKind, 2, 1) \
|
||||
F(TransitionElementsSmiToDouble, 1, 1) \
|
||||
F(TransitionElementsDoubleToObject, 1, 1) \
|
||||
F(HaveSameMap, 2, 1) \
|
||||
|
@ -532,6 +532,18 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
|
||||
UNCLASSIFIED,
|
||||
52,
|
||||
"cpu_features");
|
||||
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
|
||||
UNCLASSIFIED,
|
||||
53,
|
||||
"Heap::NewSpaceAllocationTopAddress");
|
||||
Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
|
||||
UNCLASSIFIED,
|
||||
54,
|
||||
"Heap::NewSpaceAllocationLimitAddress");
|
||||
Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
|
||||
UNCLASSIFIED,
|
||||
55,
|
||||
"Runtime::AllocateInNewSpace");
|
||||
|
||||
// Add a small set of deopt entry addresses to encoder without generating the
|
||||
// deopt table code, which isn't possible at deserialization time.
|
||||
@ -541,7 +553,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
|
||||
entry,
|
||||
Deoptimizer::LAZY,
|
||||
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
|
||||
Add(address, LAZY_DEOPTIMIZATION, 53 + entry, "lazy_deopt");
|
||||
Add(address, LAZY_DEOPTIMIZATION, 56 + entry, "lazy_deopt");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ const Register no_reg = { kRegister_no_reg_Code };
|
||||
|
||||
|
||||
struct XMMRegister {
|
||||
static const int kNumRegisters = 16;
|
||||
static const int kMaxNumRegisters = 16;
|
||||
static const int kMaxNumAllocatableRegisters = 15;
|
||||
static int NumAllocatableRegisters() {
|
||||
return kMaxNumAllocatableRegisters;
|
||||
@ -243,11 +243,11 @@ struct XMMRegister {
|
||||
|
||||
static XMMRegister from_code(int code) {
|
||||
ASSERT(code >= 0);
|
||||
ASSERT(code < kNumRegisters);
|
||||
ASSERT(code < kMaxNumRegisters);
|
||||
XMMRegister r = { code };
|
||||
return r;
|
||||
}
|
||||
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
|
||||
bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
|
||||
bool is(XMMRegister reg) const { return code_ == reg.code_; }
|
||||
int code() const {
|
||||
ASSERT(is_valid());
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "code-stubs.h"
|
||||
#include "regexp-macro-assembler.h"
|
||||
#include "stub-cache.h"
|
||||
#include "runtime.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -49,6 +50,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
|
||||
}
|
||||
|
||||
|
||||
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
||||
Isolate* isolate,
|
||||
CodeStubInterfaceDescriptor* descriptor) {
|
||||
static Register registers[] = { rax, rbx };
|
||||
descriptor->register_param_count_ = 2;
|
||||
descriptor->register_params_ = registers;
|
||||
descriptor->deoptimization_handler_ =
|
||||
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
|
||||
}
|
||||
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void ToNumberStub::Generate(MacroAssembler* masm) {
|
||||
|
@ -262,8 +262,8 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
||||
// -----------------------------------
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
ASSERT(allocation_site_info_found != NULL);
|
||||
masm->TestJSArrayForAllocationSiteInfo(rdx, rdi,
|
||||
allocation_site_info_found);
|
||||
__ TestJSArrayForAllocationSiteInfo(rdx, rdi);
|
||||
__ j(equal, allocation_site_info_found);
|
||||
}
|
||||
|
||||
// Set transitioned map.
|
||||
@ -291,7 +291,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
Label allocated, new_backing_store, only_change_map, done;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
masm->TestJSArrayForAllocationSiteInfo(rdx, rdi, fail);
|
||||
__ TestJSArrayForAllocationSiteInfo(rdx, rdi);
|
||||
__ j(equal, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
@ -416,7 +417,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Label loop, entry, convert_hole, gc_required, only_change_map;
|
||||
|
||||
if (mode == TRACK_ALLOCATION_SITE) {
|
||||
masm->TestJSArrayForAllocationSiteInfo(rdx, rdi, fail);
|
||||
__ TestJSArrayForAllocationSiteInfo(rdx, rdi);
|
||||
__ j(equal, fail);
|
||||
}
|
||||
|
||||
// Check for empty arrays, which only require a map transition and no changes
|
||||
|
@ -533,6 +533,11 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
|
||||
output_frame->SetRegister(rbp.code(), value);
|
||||
output_frame->SetFp(value);
|
||||
|
||||
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
|
||||
double double_value = input_->GetDoubleRegister(i);
|
||||
output_frame->SetDoubleRegister(i, double_value);
|
||||
}
|
||||
|
||||
intptr_t handler =
|
||||
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
|
||||
output_frame->SetRegister(rax.code(), descriptor->register_param_count_);
|
||||
@ -1139,13 +1144,10 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
__ cmpq(rax, rdx);
|
||||
__ j(below, &outer_push_loop);
|
||||
|
||||
// In case of OSR, we have to restore the XMM registers.
|
||||
if (type() == OSR) {
|
||||
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
|
||||
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
|
||||
int src_offset = i * kDoubleSize + double_regs_offset;
|
||||
__ movsd(xmm_reg, Operand(rbx, src_offset));
|
||||
}
|
||||
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
|
||||
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
|
||||
int src_offset = i * kDoubleSize + double_regs_offset;
|
||||
__ movsd(xmm_reg, Operand(rbx, src_offset));
|
||||
}
|
||||
|
||||
// Push state, pc, and continuation from the last output frame.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -163,13 +163,17 @@ bool LCodeGen::GeneratePrologue() {
|
||||
int slots = GetStackSlotCount();
|
||||
if (slots > 0) {
|
||||
if (FLAG_debug_code) {
|
||||
__ subq(rsp, Immediate(slots * kPointerSize));
|
||||
__ push(rax);
|
||||
__ Set(rax, slots);
|
||||
__ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
|
||||
Label loop;
|
||||
__ bind(&loop);
|
||||
__ push(kScratchRegister);
|
||||
__ movq(MemOperand(rsp, rax, times_pointer_size, 0),
|
||||
kScratchRegister);
|
||||
__ decl(rax);
|
||||
__ j(not_zero, &loop);
|
||||
__ pop(rax);
|
||||
} else {
|
||||
__ subq(rsp, Immediate(slots * kPointerSize));
|
||||
#ifdef _MSC_VER
|
||||
@ -184,6 +188,19 @@ bool LCodeGen::GeneratePrologue() {
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles()) {
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(MemOperand(rsp, count * kDoubleSize),
|
||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Possibly allocate a local context.
|
||||
@ -2465,6 +2482,18 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ push(rax);
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(NeedsEagerFrame());
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(rsp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
if (NeedsEagerFrame()) {
|
||||
__ movq(rsp, rbp);
|
||||
__ pop(rbp);
|
||||
@ -4135,7 +4164,6 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
||||
|
||||
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
Register object_reg = ToRegister(instr->object());
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
|
||||
Handle<Map> from_map = instr->original_map();
|
||||
Handle<Map> to_map = instr->transitioned_map();
|
||||
@ -4145,18 +4173,31 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
Label not_applicable;
|
||||
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
|
||||
__ j(not_equal, ¬_applicable);
|
||||
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
|
||||
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
|
||||
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
|
||||
// Write barrier.
|
||||
ASSERT_NE(instr->temp(), NULL);
|
||||
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
|
||||
ToRegister(instr->temp()), kDontSaveFPRegs);
|
||||
} else if (FLAG_compiled_transitions) {
|
||||
PushSafepointRegistersScope scope(this);
|
||||
if (!object_reg.is(rax)) {
|
||||
__ movq(rax, object_reg);
|
||||
}
|
||||
__ Move(rbx, to_map);
|
||||
TransitionElementsKindStub stub(from_kind, to_kind);
|
||||
__ CallStub(&stub);
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
||||
} else if (IsFastSmiElementsKind(from_kind) &&
|
||||
IsFastDoubleElementsKind(to_kind)) {
|
||||
IsFastDoubleElementsKind(to_kind)) {
|
||||
Register fixed_object_reg = ToRegister(instr->temp());
|
||||
ASSERT(fixed_object_reg.is(rdx));
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
ASSERT(new_map_reg.is(rbx));
|
||||
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
|
||||
__ movq(fixed_object_reg, object_reg);
|
||||
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
|
||||
RelocInfo::CODE_TARGET, instr);
|
||||
@ -4164,7 +4205,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
IsFastObjectElementsKind(to_kind)) {
|
||||
Register fixed_object_reg = ToRegister(instr->temp());
|
||||
ASSERT(fixed_object_reg.is(rdx));
|
||||
Register new_map_reg = ToRegister(instr->new_map_temp());
|
||||
ASSERT(new_map_reg.is(rbx));
|
||||
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
|
||||
__ movq(fixed_object_reg, object_reg);
|
||||
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
|
||||
RelocInfo::CODE_TARGET, instr);
|
||||
@ -4175,6 +4218,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
||||
Register object = ToRegister(instr->object());
|
||||
Register temp = ToRegister(instr->temp());
|
||||
__ TestJSArrayForAllocationSiteInfo(object, temp);
|
||||
DeoptimizeIf(equal, instr->environment());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
||||
EmitPushTaggedOperand(instr->left());
|
||||
EmitPushTaggedOperand(instr->right());
|
||||
@ -4401,6 +4452,36 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
Register reg = ToRegister(instr->result());
|
||||
Register tmp = ToRegister(instr->temp());
|
||||
|
||||
bool convert_hole = false;
|
||||
HValue* change_input = instr->hydrogen()->value();
|
||||
if (change_input->IsLoadKeyed()) {
|
||||
HLoadKeyed* load = HLoadKeyed::cast(change_input);
|
||||
convert_hole = load->UsesMustHandleHole();
|
||||
}
|
||||
|
||||
Label no_special_nan_handling;
|
||||
Label done;
|
||||
if (convert_hole) {
|
||||
XMMRegister input_reg = ToDoubleRegister(instr->value());
|
||||
__ ucomisd(input_reg, input_reg);
|
||||
__ j(parity_odd, &no_special_nan_handling);
|
||||
__ subq(rsp, Immediate(kDoubleSize));
|
||||
__ movsd(MemOperand(rsp, 0), input_reg);
|
||||
__ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)),
|
||||
Immediate(kHoleNanUpper32));
|
||||
Label canonicalize;
|
||||
__ j(not_equal, &canonicalize);
|
||||
__ addq(rsp, Immediate(kDoubleSize));
|
||||
__ Move(reg, factory()->the_hole_value());
|
||||
__ jmp(&done);
|
||||
__ bind(&canonicalize);
|
||||
__ addq(rsp, Immediate(kDoubleSize));
|
||||
__ Set(kScratchRegister, BitCast<uint64_t>(
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
__ movq(input_reg, kScratchRegister);
|
||||
}
|
||||
|
||||
__ bind(&no_special_nan_handling);
|
||||
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
|
||||
if (FLAG_inline_new) {
|
||||
__ AllocateHeapNumber(reg, tmp, deferred->entry());
|
||||
@ -4409,6 +4490,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
}
|
||||
__ bind(deferred->exit());
|
||||
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
@ -4454,43 +4537,58 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
XMMRegister result_reg,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env) {
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode) {
|
||||
Label load_smi, done;
|
||||
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
|
||||
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
|
||||
|
||||
// Heap number map check.
|
||||
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
if (deoptimize_on_undefined) {
|
||||
DeoptimizeIf(not_equal, env);
|
||||
} else {
|
||||
Label heap_number;
|
||||
__ j(equal, &heap_number, Label::kNear);
|
||||
// Heap number map check.
|
||||
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
if (deoptimize_on_undefined) {
|
||||
DeoptimizeIf(not_equal, env);
|
||||
} else {
|
||||
Label heap_number;
|
||||
__ j(equal, &heap_number, Label::kNear);
|
||||
|
||||
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
|
||||
DeoptimizeIf(not_equal, env);
|
||||
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
|
||||
DeoptimizeIf(not_equal, env);
|
||||
|
||||
// Convert undefined to NaN. Compute NaN as 0/0.
|
||||
__ xorps(result_reg, result_reg);
|
||||
__ divsd(result_reg, result_reg);
|
||||
// Convert undefined to NaN. Compute NaN as 0/0.
|
||||
__ xorps(result_reg, result_reg);
|
||||
__ divsd(result_reg, result_reg);
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
__ bind(&heap_number);
|
||||
}
|
||||
// Heap number to XMM conversion.
|
||||
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
||||
if (deoptimize_on_minus_zero) {
|
||||
XMMRegister xmm_scratch = xmm0;
|
||||
__ xorps(xmm_scratch, xmm_scratch);
|
||||
__ ucomisd(xmm_scratch, result_reg);
|
||||
__ j(not_equal, &done, Label::kNear);
|
||||
__ movmskpd(kScratchRegister, result_reg);
|
||||
__ testq(kScratchRegister, Immediate(1));
|
||||
DeoptimizeIf(not_zero, env);
|
||||
}
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
__ bind(&heap_number);
|
||||
} else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
|
||||
__ testq(input_reg, Immediate(kSmiTagMask));
|
||||
DeoptimizeIf(not_equal, env);
|
||||
} else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
|
||||
__ testq(input_reg, Immediate(kSmiTagMask));
|
||||
__ j(zero, &load_smi);
|
||||
__ Set(kScratchRegister, BitCast<uint64_t>(
|
||||
FixedDoubleArray::hole_nan_as_double()));
|
||||
__ movq(result_reg, kScratchRegister);
|
||||
__ jmp(&done, Label::kNear);
|
||||
} else {
|
||||
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
|
||||
}
|
||||
// Heap number to XMM conversion.
|
||||
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
||||
if (deoptimize_on_minus_zero) {
|
||||
XMMRegister xmm_scratch = xmm0;
|
||||
__ xorps(xmm_scratch, xmm_scratch);
|
||||
__ ucomisd(xmm_scratch, result_reg);
|
||||
__ j(not_equal, &done, Label::kNear);
|
||||
__ movmskpd(kScratchRegister, result_reg);
|
||||
__ testq(kScratchRegister, Immediate(1));
|
||||
DeoptimizeIf(not_zero, env);
|
||||
}
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// Smi to XMM conversion
|
||||
__ bind(&load_smi);
|
||||
@ -4579,10 +4677,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
||||
Register input_reg = ToRegister(input);
|
||||
XMMRegister result_reg = ToDoubleRegister(result);
|
||||
|
||||
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
|
||||
HValue* value = instr->hydrogen()->value();
|
||||
if (value->type().IsSmi()) {
|
||||
if (value->IsLoadKeyed()) {
|
||||
HLoadKeyed* load = HLoadKeyed::cast(value);
|
||||
if (load->UsesMustHandleHole()) {
|
||||
if (load->hole_mode() == ALLOW_RETURN_HOLE) {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
|
||||
} else {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
|
||||
}
|
||||
} else {
|
||||
mode = NUMBER_CANDIDATE_IS_SMI;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EmitNumberUntagD(input_reg, result_reg,
|
||||
instr->hydrogen()->deoptimize_on_undefined(),
|
||||
instr->hydrogen()->deoptimize_on_minus_zero(),
|
||||
instr->environment());
|
||||
instr->environment(),
|
||||
mode);
|
||||
}
|
||||
|
||||
|
||||
@ -4894,6 +5010,58 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoAllocate(LAllocate* instr) {
|
||||
class DeferredAllocate: public LDeferredCode {
|
||||
public:
|
||||
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
|
||||
: LDeferredCode(codegen), instr_(instr) { }
|
||||
virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
|
||||
virtual LInstruction* instr() { return instr_; }
|
||||
private:
|
||||
LAllocate* instr_;
|
||||
};
|
||||
|
||||
DeferredAllocate* deferred =
|
||||
new(zone()) DeferredAllocate(this, instr);
|
||||
|
||||
Register size = ToRegister(instr->size());
|
||||
Register result = ToRegister(instr->result());
|
||||
Register temp = ToRegister(instr->temp());
|
||||
|
||||
HAllocate* original_instr = instr->hydrogen();
|
||||
if (original_instr->size()->IsConstant()) {
|
||||
UNREACHABLE();
|
||||
} else {
|
||||
// Allocate memory for the object.
|
||||
AllocationFlags flags = TAG_OBJECT;
|
||||
if (original_instr->MustAllocateDoubleAligned()) {
|
||||
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
|
||||
}
|
||||
__ AllocateInNewSpace(size, result, temp, no_reg,
|
||||
deferred->entry(), flags);
|
||||
}
|
||||
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
||||
Register size = ToRegister(instr->size());
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
// TODO(3095996): Get rid of this. For now, we need to make the
|
||||
// result register contain a valid pointer because it is already
|
||||
// contained in the register pointer map.
|
||||
__ Set(result, 0);
|
||||
|
||||
PushSafepointRegistersScope scope(this);
|
||||
__ Integer32ToSmi(size, size);
|
||||
__ push(size);
|
||||
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
|
||||
__ StoreToSafepointRegisterSlot(result, rax);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
|
||||
Handle<FixedArray> literals(instr->environment()->closure()->literals());
|
||||
ElementsKind boilerplate_elements_kind =
|
||||
|
@ -116,6 +116,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
|
||||
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
|
||||
void DoDeferredAllocateObject(LAllocateObject* instr);
|
||||
void DoDeferredAllocate(LAllocate* instr);
|
||||
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
Label* map_check);
|
||||
|
||||
@ -282,11 +283,13 @@ class LCodeGen BASE_EMBEDDED {
|
||||
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
|
||||
void EmitGoto(int block);
|
||||
void EmitBranch(int left_block, int right_block, Condition cc);
|
||||
void EmitNumberUntagD(Register input,
|
||||
XMMRegister result,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env);
|
||||
void EmitNumberUntagD(
|
||||
Register input,
|
||||
XMMRegister result,
|
||||
bool deoptimize_on_undefined,
|
||||
bool deoptimize_on_minus_zero,
|
||||
LEnvironment* env,
|
||||
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
|
||||
|
||||
|
||||
void DeoptIfTaggedButNotSmi(LEnvironment* environment,
|
||||
|
@ -2039,13 +2039,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
|
||||
|
||||
LInstruction* LChunkBuilder::DoTransitionElementsKind(
|
||||
HTransitionElementsKind* instr) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
LOperand* new_map_reg = TempRegister();
|
||||
LOperand* temp_reg = TempRegister();
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
|
||||
return DefineSameAsFirst(result);
|
||||
return result;
|
||||
} else if (FLAG_compiled_transitions) {
|
||||
LTransitionElementsKind* result =
|
||||
new(zone()) LTransitionElementsKind(object, NULL, NULL);
|
||||
return AssignPointerMap(result);
|
||||
} else {
|
||||
LOperand* object = UseFixed(instr->object(), rax);
|
||||
LOperand* fixed_object_reg = FixedTemp(rdx);
|
||||
@ -2054,11 +2059,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
|
||||
new(zone()) LTransitionElementsKind(object,
|
||||
new_map_reg,
|
||||
fixed_object_reg);
|
||||
return MarkAsCall(DefineFixed(result, rax), instr);
|
||||
return MarkAsCall(result, instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
|
||||
HTrapAllocationMemento* instr) {
|
||||
LOperand* object = UseRegister(instr->object());
|
||||
LOperand* temp = TempRegister();
|
||||
LTrapAllocationMemento* result =
|
||||
new(zone()) LTrapAllocationMemento(object, temp);
|
||||
return AssignEnvironment(result);
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
|
||||
bool needs_write_barrier = instr->NeedsWriteBarrier();
|
||||
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
|
||||
@ -2127,11 +2142,21 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
|
||||
info()->MarkAsDeferredCalling();
|
||||
LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
|
||||
info()->MarkAsDeferredCalling();
|
||||
LOperand* size = UseTempRegister(instr->size());
|
||||
LOperand* temp = TempRegister();
|
||||
LAllocate* result = new(zone()) LAllocate(size, temp);
|
||||
return AssignPointerMap(DefineAsRegister(result));
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
|
||||
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ class LCodeGen;
|
||||
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
|
||||
V(AccessArgumentsAt) \
|
||||
V(AddI) \
|
||||
V(Allocate) \
|
||||
V(AllocateObject) \
|
||||
V(ApplyArguments) \
|
||||
V(ArgumentsElements) \
|
||||
@ -173,6 +174,7 @@ class LCodeGen;
|
||||
V(Throw) \
|
||||
V(ToFastProperties) \
|
||||
V(TransitionElementsKind) \
|
||||
V(TrapAllocationMemento) \
|
||||
V(Typeof) \
|
||||
V(TypeofIsAndBranch) \
|
||||
V(UnaryMathOperation) \
|
||||
@ -1577,6 +1579,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
|
||||
class LContext: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Context)
|
||||
};
|
||||
|
||||
|
||||
@ -1804,6 +1807,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Change)
|
||||
};
|
||||
|
||||
|
||||
@ -1998,6 +2002,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
|
||||
};
|
||||
|
||||
|
||||
class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
|
||||
public:
|
||||
LTrapAllocationMemento(LOperand* object,
|
||||
LOperand* temp) {
|
||||
inputs_[0] = object;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
LOperand* object() { return inputs_[0]; }
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
|
||||
"trap-allocation-memento")
|
||||
};
|
||||
|
||||
|
||||
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
|
||||
public:
|
||||
LStringAdd(LOperand* left, LOperand* right) {
|
||||
@ -2187,6 +2207,21 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
|
||||
};
|
||||
|
||||
|
||||
class LAllocate: public LTemplateInstruction<1, 1, 1> {
|
||||
public:
|
||||
LAllocate(LOperand* size, LOperand* temp) {
|
||||
inputs_[0] = size;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
LOperand* size() { return inputs_[0]; }
|
||||
LOperand* temp() { return temps_[0]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
|
||||
DECLARE_HYDROGEN_ACCESSOR(Allocate)
|
||||
};
|
||||
|
||||
|
||||
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
|
||||
|
@ -899,8 +899,8 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
|
||||
// R12 to r15 are callee save on all platforms.
|
||||
if (fp_mode == kSaveFPRegs) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
|
||||
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
|
||||
subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
|
||||
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
|
||||
XMMRegister reg = XMMRegister::from_code(i);
|
||||
movsd(Operand(rsp, i * kDoubleSize), reg);
|
||||
}
|
||||
@ -914,11 +914,11 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
|
||||
Register exclusion3) {
|
||||
if (fp_mode == kSaveFPRegs) {
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
|
||||
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
|
||||
XMMRegister reg = XMMRegister::from_code(i);
|
||||
movsd(reg, Operand(rsp, i * kDoubleSize));
|
||||
}
|
||||
addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
|
||||
addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
|
||||
}
|
||||
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
|
||||
Register reg = saved_regs[i];
|
||||
@ -3423,7 +3423,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
|
||||
#endif
|
||||
// Optionally save all XMM registers.
|
||||
if (save_doubles) {
|
||||
int space = XMMRegister::kNumRegisters * kDoubleSize +
|
||||
int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
|
||||
arg_stack_space * kPointerSize;
|
||||
subq(rsp, Immediate(space));
|
||||
int offset = -2 * kPointerSize;
|
||||
@ -3877,8 +3877,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
||||
Register scratch,
|
||||
Label* gc_required,
|
||||
AllocationFlags flags) {
|
||||
ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
|
||||
SIZE_IN_WORDS)) == 0);
|
||||
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
|
||||
if (!FLAG_inline_new) {
|
||||
if (emit_debug_code()) {
|
||||
// Trash the registers to simulate an allocation failure.
|
||||
@ -3912,6 +3911,13 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
||||
// Update allocation top.
|
||||
UpdateAllocationTopHelper(result_end, scratch);
|
||||
|
||||
// Align the next allocation. Storing the filler map without checking top is
|
||||
// always safe because the limit of the heap is always aligned.
|
||||
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
|
||||
testq(result, Immediate(kDoubleAlignmentMask));
|
||||
Check(zero, "Allocation is not double aligned");
|
||||
}
|
||||
|
||||
// Tag the result if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
addq(result, Immediate(kHeapObjectTag));
|
||||
@ -4606,8 +4612,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
|
||||
|
||||
void MacroAssembler::TestJSArrayForAllocationSiteInfo(
|
||||
Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* allocation_info_present) {
|
||||
Register scratch_reg) {
|
||||
Label no_info_available;
|
||||
ExternalReference new_space_start =
|
||||
ExternalReference::new_space_start(isolate());
|
||||
@ -4623,7 +4628,6 @@ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
|
||||
j(greater, &no_info_available);
|
||||
CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
|
||||
Heap::kAllocationSiteInfoMapRootIndex);
|
||||
j(equal, allocation_info_present);
|
||||
bind(&no_info_available);
|
||||
}
|
||||
|
||||
|
@ -1320,10 +1320,9 @@ class MacroAssembler: public Assembler {
|
||||
// to another type.
|
||||
// On entry, receiver_reg should point to the array object.
|
||||
// scratch_reg gets clobbered.
|
||||
// If allocation info is present, jump to allocation_info_present
|
||||
// If allocation info is present, condition flags are set to equal
|
||||
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
|
||||
Register scratch_reg,
|
||||
Label* allocation_info_present);
|
||||
Register scratch_reg);
|
||||
|
||||
private:
|
||||
// Order general registers are pushed by Pushad.
|
||||
|
218
test/mjsunit/generated-transition-stub.js
Normal file
218
test/mjsunit/generated-transition-stub.js
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax --compiled_transitions
|
||||
|
||||
try {} catch (e) {}
|
||||
|
||||
var iteration_count = 1;
|
||||
|
||||
function transition1(a, i, v) {
|
||||
a[i] = v;
|
||||
}
|
||||
|
||||
//
|
||||
// Test PACKED SMI -> PACKED DOUBLE
|
||||
//
|
||||
|
||||
var a1 = [0, 1, 2, 3, 4];
|
||||
transition1(a1, 0, 2.5);
|
||||
var a2 = [0, 1, 2, 3, 4];
|
||||
transition1(a2, 0, 2.5);
|
||||
assertFalse(%HasFastHoleyElements(a2));
|
||||
%OptimizeFunctionOnNextCall(transition1);
|
||||
|
||||
var a3 = [0, 1, 2, 3, 4];
|
||||
assertTrue(%HasFastSmiElements(a3));
|
||||
transition1(a3, 0, 2.5);
|
||||
assertFalse(%HasFastHoleyElements(a3));
|
||||
assertEquals(4, a3[4]);
|
||||
assertEquals(2.5, a3[0]);
|
||||
|
||||
// Test handling of hole.
|
||||
var a4 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
a4.length = 7;
|
||||
assertTrue(%HasFastSmiElements(a4));
|
||||
transition1(a4, 0, 2.5);
|
||||
assertFalse(%HasFastHoleyElements(a4));
|
||||
assertEquals(2.5, a4[0]);
|
||||
assertEquals(undefined, a4[8]);
|
||||
|
||||
// Large array should deopt to runtimea
|
||||
for (j = 0; j < iteration_count; ++j) {
|
||||
a5 = new Array();
|
||||
for (i = 0; i < 0x40000; ++i) {
|
||||
a5[i] = 0;
|
||||
}
|
||||
assertTrue(%HasFastSmiElements(a5));
|
||||
transition1(a5, 0, 2.5);
|
||||
assertEquals(2.5, a5[0]);
|
||||
}
|
||||
|
||||
//
|
||||
// Test HOLEY SMI -> HOLEY DOUBLE
|
||||
//
|
||||
|
||||
function transition2(a, i, v) {
|
||||
a[i] = v;
|
||||
}
|
||||
|
||||
var b1 = [0, 1, 2, , 4];
|
||||
transition2(b1, 0, 2.5);
|
||||
var b2 = [0, 1, 2, , 4];
|
||||
transition2(b2, 0, 2.5);
|
||||
assertTrue(%HasFastHoleyElements(b2));
|
||||
%OptimizeFunctionOnNextCall(transition2);
|
||||
|
||||
var b3 = [0, 1, 2, , 4];
|
||||
assertTrue(%HasFastSmiElements(b3));
|
||||
assertTrue(%HasFastHoleyElements(b3));
|
||||
transition2(b3, 0, 2.5);
|
||||
assertTrue(%HasFastHoleyElements(b3));
|
||||
assertEquals(4, b3[4]);
|
||||
assertEquals(2.5, b3[0]);
|
||||
|
||||
// Large array should deopt to runtime
|
||||
for (j = 0; j < iteration_count; ++j) {
|
||||
b4 = [0, ,0];
|
||||
for (i = 3; i < 0x40000; ++i) {
|
||||
b4[i] = 0;
|
||||
}
|
||||
assertTrue(%HasFastSmiElements(b4));
|
||||
transition2(b4, 0, 2.5);
|
||||
assertEquals(2.5, b4[0]);
|
||||
}
|
||||
|
||||
//
|
||||
// Test PACKED DOUBLE -> PACKED OBJECT
|
||||
//
|
||||
|
||||
function transition3(a, i, v) {
|
||||
a[i] = v;
|
||||
}
|
||||
|
||||
var c1 = [0, 1, 2, 3.5, 4];
|
||||
transition3(c1, 0, new Object());
|
||||
var c2 = [0, 1, 2, 3.5, 4];
|
||||
transition3(c2, 0, new Object());
|
||||
assertTrue(%HasFastObjectElements(c2));
|
||||
assertTrue(!%HasFastHoleyElements(c2));
|
||||
%OptimizeFunctionOnNextCall(transition3);
|
||||
|
||||
var c3 = [0, 1, 2, 3.5, 4];
|
||||
assertTrue(%HasFastDoubleElements(c3));
|
||||
assertTrue(!%HasFastHoleyElements(c3));
|
||||
transition3(c3, 0, new Array());
|
||||
assertTrue(!%HasFastHoleyElements(c3));
|
||||
assertTrue(%HasFastObjectElements(c3));
|
||||
assertEquals(4, c3[4]);
|
||||
assertEquals(0, c3[0].length);
|
||||
|
||||
// Large array under the deopt threshold should be able to trigger GC without
|
||||
// causing crashes.
|
||||
for (j = 0; j < iteration_count; ++j) {
|
||||
c4 = [0, 2.5, 0];
|
||||
for (i = 3; i < 0xa000; ++i) {
|
||||
c4[i] = 0;
|
||||
}
|
||||
assertTrue(%HasFastDoubleElements(c4));
|
||||
assertTrue(!%HasFastHoleyElements(c4));
|
||||
transition3(c4, 0, new Array(5));
|
||||
assertTrue(!%HasFastHoleyElements(c4));
|
||||
assertTrue(%HasFastObjectElements(c4));
|
||||
assertEquals(5, c4[0].length);
|
||||
}
|
||||
|
||||
// Large array should deopt to runtime
|
||||
for (j = 0; j < iteration_count; ++j) {
|
||||
c5 = [0, 2.5, 0];
|
||||
for (i = 3; i < 0x40000; ++i) {
|
||||
c5[i] = 0;
|
||||
}
|
||||
assertTrue(%HasFastDoubleElements(c5));
|
||||
assertTrue(!%HasFastHoleyElements(c5));
|
||||
transition3(c5, 0, new Array(5));
|
||||
assertTrue(!%HasFastHoleyElements(c5));
|
||||
assertTrue(%HasFastObjectElements(c5));
|
||||
assertEquals(5, c5[0].length);
|
||||
}
|
||||
|
||||
//
|
||||
// Test HOLEY DOUBLE -> HOLEY OBJECT
|
||||
//
|
||||
|
||||
function transition4(a, i, v) {
|
||||
a[i] = v;
|
||||
}
|
||||
|
||||
var d1 = [0, 1, , 3.5, 4];
|
||||
transition4(d1, 0, new Object());
|
||||
var d2 = [0, 1, , 3.5, 4];
|
||||
transition4(d2, 0, new Object());
|
||||
assertTrue(%HasFastObjectElements(d2));
|
||||
assertTrue(%HasFastHoleyElements(d2));
|
||||
%OptimizeFunctionOnNextCall(transition4);
|
||||
|
||||
var d3 = [0, 1, , 3.5, 4];
|
||||
assertTrue(%HasFastDoubleElements(d3));
|
||||
assertTrue(%HasFastHoleyElements(d3));
|
||||
transition4(d3, 0, new Array());
|
||||
assertTrue(%HasFastHoleyElements(d3));
|
||||
assertTrue(%HasFastObjectElements(d3));
|
||||
assertEquals(4, d3[4]);
|
||||
assertEquals(0, d3[0].length);
|
||||
|
||||
// Large array under the deopt threshold should be able to trigger GC without
|
||||
// causing crashes.
|
||||
for (j = 0; j < iteration_count; ++j) {
|
||||
d4 = [, 2.5, ,];
|
||||
for (i = 3; i < 0xa000; ++i) {
|
||||
d4[i] = 0;
|
||||
}
|
||||
assertTrue(%HasFastDoubleElements(d4));
|
||||
assertTrue(%HasFastHoleyElements(d4));
|
||||
transition4(d4, 0, new Array(5));
|
||||
assertTrue(%HasFastHoleyElements(d4));
|
||||
assertTrue(%HasFastObjectElements(d4));
|
||||
assertEquals(5, d4[0].length);
|
||||
assertEquals(undefined, d4[2]);
|
||||
}
|
||||
|
||||
// Large array should deopt to runtime
|
||||
for (j = 0; j < iteration_count; ++j) {
|
||||
d5 = [, 2.5, ,];
|
||||
for (i = 3; i < 0x40000; ++i) {
|
||||
d5[i] = 0;
|
||||
}
|
||||
assertTrue(%HasFastDoubleElements(d5));
|
||||
assertTrue(%HasFastHoleyElements(d5));
|
||||
transition4(d5, 0, new Array(5));
|
||||
assertTrue(%HasFastHoleyElements(d5));
|
||||
assertTrue(%HasFastObjectElements(d5));
|
||||
assertEquals(5, d5[0].length);
|
||||
assertEquals(undefined, d5[2]);
|
||||
}
|
@ -71,6 +71,7 @@ json-recursive: PASS, (PASS || FAIL) if $mode == debug
|
||||
##############################################################################
|
||||
# Skip long running test that times out in debug mode.
|
||||
regress/regress-crbug-160010: PASS, SKIP if $mode == debug
|
||||
generated-transition-stub: PASS, SKIP if $mode == debug
|
||||
|
||||
##############################################################################
|
||||
# This test sets the umask on a per-process basis and hence cannot be
|
||||
|
Loading…
Reference in New Issue
Block a user