PPC: Use platform specific stubs for vector-based Load/KeyedLoad.

Port 34a1a76ddf

Original commit message:
A hydrogen code stub is not the best approach because it builds a frame
and doesn't have the technology to discard roots at tail call exits.
Platform-specific stubs provide much better performance at this point.

R=verwaest@chromium.org, mbrandy@us.ibm.com

BUG=

Review URL: https://codereview.chromium.org/1019003002

Cr-Commit-Position: refs/heads/master@{#27365}
This commit is contained in:
michael_dawson 2015-03-23 06:25:15 -07:00 committed by Commit bot
parent fdc1745e33
commit 8977d6c973

View File

@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@ -4549,15 +4550,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorLoadStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
VectorRawLoadStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorKeyedLoadStub stub(isolate());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
VectorRawKeyedLoadStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@ -4575,6 +4576,250 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, Register scratch1,
Register scratch2, Register scratch3,
bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register receiver_map = scratch1;
Register cached_map = scratch2;
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&compare_map);
__ LoadP(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ bne(&start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ LoadP(handler,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
Register length = scratch3;
__ bind(&start_polymorphic);
__ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
// If the IC could be monomorphic we have to make sure we don't go past the
// end of the feedback array.
__ CmpSmiLiteral(length, Smi::FromInt(2), r0);
__ beq(miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch3
// also need receiver_map (aka scratch1)
// use cached_map (scratch2) to look in the weak map values.
__ SmiToPtrArrayOffset(r0, length);
__ add(too_far, feedback, r0);
__ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ addi(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
__ bind(&next_loop);
__ LoadP(cached_map, MemOperand(pointer_reg));
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ bne(&prepare_next);
__ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
__ bind(&prepare_next);
__ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
__ cmp(pointer_reg, too_far);
__ blt(&next_loop);
// We exhausted our array of map handler pairs.
__ b(miss);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ b(&compare_map);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register weak_cell, Register scratch,
Label* miss) {
// feedback initially contains the feedback array
Label compare_smi_map;
Register receiver_map = scratch;
Register cached_map = weak_cell;
// Move the weak map into the weak_cell register.
__ LoadP(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &compare_smi_map);
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ cmp(cached_map, receiver_map);
__ bne(miss);
Register handler = weak_cell;
__ SmiToPtrArrayOffset(r0, slot);
__ add(handler, vector, r0);
__ LoadP(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
__ bind(&compare_smi_map);
__ CompareRoot(weak_cell, Heap::kHeapNumberMapRootIndex);
__ bne(miss);
__ SmiToPtrArrayOffset(r0, slot);
__ add(handler, vector, r0);
__ LoadP(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4
Register name = VectorLoadICDescriptor::NameRegister(); // r5
Register vector = VectorLoadICDescriptor::VectorRegister(); // r6
Register slot = VectorLoadICDescriptor::SlotRegister(); // r3
Register feedback = r7;
Register scratch1 = r8;
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
__ bne(&try_array);
HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
&miss);
// Is it a fixed array?
__ bind(&try_array);
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, r9,
r10, true, &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&miss);
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
scratch1, r9, r10);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4
Register key = VectorLoadICDescriptor::NameRegister(); // r5
Register vector = VectorLoadICDescriptor::VectorRegister(); // r6
Register slot = VectorLoadICDescriptor::SlotRegister(); // r3
Register feedback = r7;
Register scratch1 = r8;
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
__ bne(&try_array);
__ JumpIfNotSmi(key, &miss);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
&miss);
__ bind(&try_array);
// Is it a fixed array?
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
// We have a polymorphic element handler.
__ JumpIfNotSmi(key, &miss);
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r9,
r10, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ bne(&miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r9,
r10, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,