MIPS: Generate the TransitionElementsStub using Crankshaft

Port r13585 (4174b9d2)

Original commit message:
This includes:
* Adding support for saving callee-clobbered double registers in Crankshaft code.
* Adding a new "HTrapAllocationMemento" hydrogen instruction to handle AllocationSiteInfo data in crankshafted stubs.
* Adding a new "HAllocate" hydrogen instruction that can allocate raw memory from the GC in crankshafted code.
* Support for manipulation of the hole in HChange instructions for Crankshafted stubs.
* Utility routines to manually build loops and if statements containing hydrogen code.

BUG=
TEST=

Review URL: https://chromiumcodereview.appspot.com/12212080

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13655 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
palfia@homejinni.com 2013-02-13 14:01:22 +00:00
parent 1d33c7721d
commit eb573e1122
11 changed files with 369 additions and 71 deletions

View File

@ -91,7 +91,7 @@ int Register::NumAllocatableRegisters() {
int DoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kNumRegisters;
return FPURegister::kMaxNumRegisters;
} else {
return 1;
}

View File

@ -189,7 +189,7 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
static const int kNumRegisters = v8::internal::kNumFPURegisters;
static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
// TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
@ -200,7 +200,7 @@ struct FPURegister {
// f28: 0.0
// f30: scratch register.
static const int kNumReservedRegisters = 2;
static const int kMaxNumAllocatableRegisters = kNumRegisters / 2 -
static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
kNumReservedRegisters;
inline static int NumRegisters();
@ -218,7 +218,7 @@ struct FPURegister {
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.

View File

@ -50,6 +50,18 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0, a1 };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
Address entry =
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,

View File

@ -155,7 +155,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_site_info_found != NULL);
masm->TestJSArrayForAllocationSiteInfo(a2, t0,
masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
allocation_site_info_found);
}
@ -188,7 +188,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
if (mode == TRACK_ALLOCATION_SITE) {
masm->TestJSArrayForAllocationSiteInfo(a2, t0, fail);
masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@ -332,7 +332,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
masm->TestJSArrayForAllocationSiteInfo(a2, t0, fail);
masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
}
// Check for empty arrays, which only require a map transition and no changes

View File

@ -526,6 +526,11 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
DoTranslateCommand(iterator, 0, output_frame_offset);
}
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
value = input_->GetRegister(fp.code());
output_frame->SetRegister(fp.code(), value);
output_frame->SetFp(value);
@ -1076,11 +1081,11 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
int double_regs_offset = FrameDescription::double_registers_offset();
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
@ -1131,16 +1136,16 @@ void Deoptimizer::EntryGenerator::Generate() {
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a0 = current "FrameDescription** output_",
// Outer loop state: t0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
__ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(a0, 0)); // output_[ix]
__ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
@ -1151,10 +1156,20 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(a0, a0, Operand(kPointerSize));
__ Addu(t0, t0, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, a0, Operand(a1));
__ Branch(&outer_push_loop, lt, t0, Operand(a1));
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {

View File

@ -147,14 +147,21 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
// The following three instructions must remain together and unmodified for
// code aging to work properly.
__ Push(ra, fp, cp, a1);
// Add unused load of ip to ensure prologue sequence is identical for
// full-codegen and lithium-codegen.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adj. FP to point to saved FP.
__ Addu(fp, sp, Operand(2 * kPointerSize));
if (info()->IsStub()) {
__ Push(ra, fp, cp);
__ Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
__ Addu(fp, sp, Operand(2 * kPointerSize));
} else {
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ Push(ra, fp, cp, a1);
// Add unused load of ip to ensure prologue sequence is identical for
// full-codegen and lithium-codegen.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adj. FP to point to saved FP.
__ Addu(fp, sp, Operand(2 * kPointerSize));
}
frame_is_built_ = true;
}
@ -162,18 +169,37 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ li(a0, Operand(slots));
__ li(a2, Operand(kSlotsZapValue));
__ Subu(sp, sp, Operand(slots * kPointerSize));
__ push(a0);
__ push(a1);
__ Addu(a0, sp, Operand(slots * kPointerSize));
__ li(a1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
__ push(a2);
__ Subu(a0, a0, 1);
__ Branch(&loop, ne, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(kPointerSize));
__ sw(a1, MemOperand(a0, 2 * kPointerSize));
__ Branch(&loop, ne, a0, Operand(sp));
__ pop(a1);
__ pop(a0);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
// Possibly allocate a local context.
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
@ -2464,11 +2490,26 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
ASSERT(NeedsEagerFrame());
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
if (NeedsEagerFrame()) {
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ Pop(ra, fp);
__ Addu(sp, sp, Operand(sp_delta));
if (!info()->IsStub()) {
__ Addu(sp, sp, Operand(sp_delta));
}
}
__ Jump(ra);
}
@ -3245,8 +3286,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
__ mov(result, cp);
for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
if (!it.value()->IsReturn()) {
__ mov(result, cp);
return;
}
}
}
@ -4173,7 +4220,6 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
@ -4181,23 +4227,32 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
ElementsKind from_kind = instr->from_kind();
ElementsKind to_kind = instr->to_kind();
__ mov(ToRegister(instr->result()), object_reg);
Label not_applicable;
__ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ li(new_map_reg, Operand(to_map));
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_compiled_transitions) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ mov(a0, object_reg);
__ li(a1, Operand(to_map));
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(a3));
__ li(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
@ -4205,7 +4260,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(a3));
__ li(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
@ -4216,6 +4273,16 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
}
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label fail;
__ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
DeoptimizeIf(al, instr->environment());
__ bind(&fail);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@ -4552,6 +4619,52 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
bool convert_hole = false;
HValue* change_input = instr->hydrogen()->value();
if (change_input->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(change_input);
convert_hole = load->UsesMustHandleHole();
}
Label no_special_nan_handling;
Label done;
if (convert_hole) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
DoubleRegister input_reg = ToDoubleRegister(instr->value());
__ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
__ Move(reg, scratch0(), input_reg);
Label canonicalize;
__ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
__ li(reg, factory()->the_hole_value());
__ Branch(&done);
__ bind(&canonicalize);
__ Move(input_reg,
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
} else {
Label not_hole;
__ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
__ li(reg, factory()->the_hole_value());
__ Branch(&done);
__ bind(&not_hole);
__ And(scratch, sfpd_hi, Operand(0x7ff00000));
__ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
Label special_nan_handling;
__ And(at, sfpd_hi, Operand(0x000FFFFF));
__ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
__ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
__ bind(&special_nan_handling);
double canonical_nan =
FixedDoubleArray::canonical_not_the_hole_nan_as_double();
uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
__ li(sfpd_lo,
Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
__ li(sfpd_hi,
Operand(static_cast<uint32_t>(casted_nan >> 32)));
}
}
__ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@ -4571,6 +4684,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
}
// Now that we have finished with the object's real address tag it
__ Addu(reg, reg, kHeapObjectTag);
__ bind(&done);
}
@ -4614,43 +4728,57 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env) {
LEnvironment* env,
NumberUntagDMode mode) {
Register scratch = scratch0();
CpuFeatures::Scope scope(FPU);
Label load_smi, heap_number, done;
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
if (deoptimize_on_undefined) {
DeoptimizeIf(ne, env, scratch, Operand(at));
} else {
Label heap_number;
__ Branch(&heap_number, eq, scratch, Operand(at));
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
if (deoptimize_on_undefined) {
DeoptimizeIf(ne, env, scratch, Operand(at));
} else {
Label heap_number;
__ Branch(&heap_number, eq, scratch, Operand(at));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(ne, env, input_reg, Operand(at));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(ne, env, input_reg, Operand(at));
// Convert undefined to NaN.
__ LoadRoot(at, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
// Convert undefined to NaN.
__ LoadRoot(at, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
__ Branch(&done);
__ bind(&heap_number);
}
// Heap number to double register conversion.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfc1(scratch, result_reg.high());
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
__ bind(&heap_number);
} else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
__ SmiUntag(scratch, input_reg);
DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
} else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
__ Move(result_reg,
FixedDoubleArray::hole_nan_as_double());
__ Branch(&done);
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
// Heap number to double register conversion.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfc1(scratch, result_reg.high());
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
// Smi to double register conversion
__ bind(&load_smi);
@ -4777,10 +4905,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DoubleRegister result_reg = ToDoubleRegister(result);
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
if (value->type().IsSmi()) {
if (value->IsLoadKeyed()) {
HLoadKeyed* load = HLoadKeyed::cast(value);
if (load->UsesMustHandleHole()) {
if (load->hole_mode() == ALLOW_RETURN_HOLE) {
mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
} else {
mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
}
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
}
}
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment());
instr->environment(),
mode);
}
@ -5074,6 +5220,63 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocate* instr_;
};
DeferredAllocate* deferred =
new(zone()) DeferredAllocate(this, instr);
Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
HAllocate* original_instr = instr->hydrogen();
if (original_instr->size()->IsConstant()) {
UNREACHABLE();
} else {
// Allocate memory for the object.
AllocationFlags flags = TAG_OBJECT;
if (original_instr->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
__ AllocateInNewSpace(size,
result,
scratch,
scratch2,
deferred->entry(),
TAG_OBJECT);
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(size, size);
__ push(size);
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(v0, result);
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =

View File

@ -133,6 +133,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@ -326,7 +327,8 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
LEnvironment* env,
NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to

View File

@ -2011,12 +2011,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
} else if (FLAG_compiled_transitions) {
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, NULL);
return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), a0);
LOperand* fixed_object_reg = FixedTemp(a2);
@ -2025,11 +2029,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
return MarkAsCall(DefineFixed(result, v0), instr);
return MarkAsCall(result, instr);
}
}
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
HTrapAllocationMemento* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* temp = TempRegister();
LTrapAllocationMemento* result =
new(zone()) LTrapAllocationMemento(object, temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@ -2096,12 +2110,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
}

View File

@ -50,6 +50,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -173,6 +174,7 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@ -1583,6 +1585,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
@ -1816,6 +1819,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@ -2000,10 +2004,10 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
LOperand* temp) {
LOperand* fixed_object_temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
temps_[1] = temp;
temps_[1] = fixed_object_temp;
}
LOperand* object() { return inputs_[0]; }
@ -2023,6 +2027,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
};
class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
inputs_[0] = object;
temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
"trap-allocation-memento")
};
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
@ -2203,7 +2223,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@ -2218,6 +2238,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
};
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")

View File

@ -4632,16 +4632,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
CpuFeatures::Scope scope(FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
int space = FPURegister::kNumRegisters * kDoubleSize;
int space = FPURegister::kMaxNumRegisters * kDoubleSize;
Subu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@ -4669,9 +4670,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
CpuFeatures::Scope scope(FPU);
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
@ -5448,6 +5450,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
void MacroAssembler::TestJSArrayForAllocationSiteInfo(
Register receiver_reg,
Register scratch_reg,
Condition cond,
Label* allocation_info_present) {
Label no_info_available;
ExternalReference new_space_start =
@ -5461,7 +5464,7 @@ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
lw(at, MemOperand(at));
Branch(&no_info_available, gt, scratch_reg, Operand(at));
lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
Branch(allocation_info_present, eq, scratch_reg,
Branch(allocation_info_present, cond, scratch_reg,
Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
bind(&no_info_available);
}

View File

@ -1448,6 +1448,7 @@ class MacroAssembler: public Assembler {
// If allocation info is present, jump to allocation_info_present
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
Register scratch_reg,
Condition cond,
Label* allocation_info_present);
private: