Hydrogen code stubs for vector-based ICs.
This patch finally allows running and passing tests with vector-based Load and KeyedLoad ICs. BUG= R=jkummerow@chromium.org Review URL: https://codereview.chromium.org/767743002 Cr-Commit-Position: refs/heads/master@{#25800}
This commit is contained in:
parent
c39eae9aac
commit
22302b5179
@ -1438,9 +1438,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
// Ensure that the vector and slot registers won't be clobbered before
|
||||
// calling the miss handler.
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
|
||||
VectorLoadICDescriptor::SlotRegister()));
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
|
||||
r4, &miss);
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
|
||||
r5, &miss);
|
||||
__ bind(&miss);
|
||||
PropertyAccessCompiler::TailCallBuiltin(
|
||||
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
|
||||
@ -1453,10 +1458,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register scratch = r3;
|
||||
Register scratch = r5;
|
||||
Register result = r0;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
&miss, // When not a number.
|
||||
|
@ -1098,9 +1098,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -2119,7 +2127,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadGlobalGeneric* result =
|
||||
@ -2178,7 +2186,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -2245,7 +2253,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
|
@ -476,25 +476,30 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
|
||||
LOperand* name, LOperand* slot,
|
||||
LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
|
||||
};
|
||||
|
||||
|
||||
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
|
||||
public:
|
||||
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
|
||||
@ -1893,9 +1898,10 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
|
||||
|
||||
const CallInterfaceDescriptor descriptor() { return descriptor_; }
|
||||
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
private:
|
||||
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
void PrintDataTo(StringStream* stream) OVERRIDE;
|
||||
|
||||
|
@ -2996,13 +2996,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(r0));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ Move(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ mov(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ mov(slot_register, Operand(Smi::FromInt(index)));
|
||||
}
|
||||
|
||||
|
||||
@ -3984,54 +3988,91 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
DCHECK(name.is(LoadDescriptor::NameRegister()));
|
||||
DCHECK(receiver.is(r1));
|
||||
DCHECK(name.is(r2));
|
||||
Register scratch = r4;
|
||||
Register extra = r5;
|
||||
Register extra2 = r6;
|
||||
Register extra3 = r9;
|
||||
|
||||
Register scratch = r3;
|
||||
Register extra = r4;
|
||||
Register extra2 = r5;
|
||||
Register extra3 = r6;
|
||||
#ifdef DEBUG
|
||||
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
|
||||
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(slot, vector, scratch, extra, extra2, extra3));
|
||||
#endif
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, extra, extra2, extra3);
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(ToRegister(instr->result()).is(r0));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
PlatformInterfaceDescriptor* call_descriptor =
|
||||
instr->descriptor().platform_specific_descriptor();
|
||||
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
|
||||
call_descriptor->storage_mode());
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
// Make sure we don't emit any additional entries in the constant pool
|
||||
// before the call to ensure that the CallCodeSize() calculated the correct
|
||||
// number of instructions for the constant pool load.
|
||||
{
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
|
||||
__ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
// Make sure we don't emit any additional entries in the constant pool
|
||||
// before the call to ensure that the CallCodeSize() calculated the
|
||||
// correct
|
||||
// number of instructions for the constant pool load.
|
||||
{
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
|
||||
__ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
__ Jump(target);
|
||||
}
|
||||
__ Call(target);
|
||||
} else {
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
PlatformInterfaceDescriptor* call_descriptor =
|
||||
instr->descriptor().platform_specific_descriptor();
|
||||
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
|
||||
call_descriptor->storage_mode());
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
// Make sure we don't emit any additional entries in the constant pool
|
||||
// before the call to ensure that the CallCodeSize() calculated the
|
||||
// correct
|
||||
// number of instructions for the constant pool load.
|
||||
{
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
|
||||
__ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
__ Call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1412,6 +1412,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
// Ensure that the vector and slot registers won't be clobbered before
|
||||
// calling the miss handler.
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
|
||||
VectorLoadICDescriptor::SlotRegister()));
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
|
||||
x11, &miss);
|
||||
@ -1429,9 +1434,15 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register result = x0;
|
||||
Register scratch = x3;
|
||||
Register scratch = x10;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
&miss, // When not a number.
|
||||
|
@ -1564,9 +1564,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -1675,7 +1683,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -1738,7 +1746,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -1760,7 +1768,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
|
@ -320,19 +320,23 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
|
||||
LOperand* name, LOperand* slot,
|
||||
LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
@ -1539,9 +1543,10 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
|
||||
|
||||
CallInterfaceDescriptor descriptor() { return descriptor_; }
|
||||
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
private:
|
||||
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
void PrintDataTo(StringStream* stream) OVERRIDE;
|
||||
|
||||
|
@ -2042,23 +2042,33 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
DCHECK(name.is(LoadDescriptor::NameRegister()));
|
||||
DCHECK(receiver.is(x1));
|
||||
DCHECK(name.is(x2));
|
||||
|
||||
Register scratch = x3;
|
||||
Register extra = x4;
|
||||
Register extra2 = x5;
|
||||
Register extra3 = x6;
|
||||
Register scratch = x4;
|
||||
Register extra = x5;
|
||||
Register extra2 = x6;
|
||||
Register extra3 = x7;
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
|
||||
scratch, extra, extra2, extra3));
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, extra, extra2, extra3);
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2066,25 +2076,44 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(instr->IsMarkedAsCall());
|
||||
DCHECK(ToRegister(instr->result()).Is(x0));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
// TODO(all): on ARM we use a call descriptor to specify a storage mode
|
||||
// but on ARM64 we only have one storage mode so it isn't necessary. Check
|
||||
// this understanding is correct.
|
||||
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
// TODO(all): on ARM we use a call descriptor to specify a storage mode
|
||||
// but on ARM64 we only have one storage mode so it isn't necessary. Check
|
||||
// this understanding is correct.
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Br(target);
|
||||
}
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Call(target);
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
// TODO(all): on ARM we use a call descriptor to specify a storage mode
|
||||
// but on ARM64 we only have one storage mode so it isn't necessary. Check
|
||||
// this understanding is correct.
|
||||
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
|
||||
after_push_argument_ = false;
|
||||
}
|
||||
|
||||
@ -3367,13 +3396,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(x0));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ Mov(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ Mov(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ Mov(slot_register, Smi::FromInt(index));
|
||||
}
|
||||
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/code-stubs.h"
|
||||
#include "src/field-index.h"
|
||||
#include "src/hydrogen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/lithium.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -99,6 +100,21 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
|
||||
HValue* shared_info,
|
||||
HValue* native_context);
|
||||
|
||||
// Tail calls handler found at array[map_index + 1].
|
||||
void TailCallHandler(HValue* receiver, HValue* name, HValue* array,
|
||||
HValue* map_index, HValue* slot, HValue* vector);
|
||||
|
||||
// Tail calls handler_code.
|
||||
void TailCallHandler(HValue* receiver, HValue* name, HValue* slot,
|
||||
HValue* vector, HValue* handler_code);
|
||||
|
||||
void TailCallMiss(HValue* receiver, HValue* name, HValue* slot,
|
||||
HValue* vector, bool keyed_load);
|
||||
|
||||
// Handle MONOMORPHIC and POLYMORPHIC LoadIC and KeyedLoadIC cases.
|
||||
void HandleArrayCases(HValue* array, HValue* receiver, HValue* name,
|
||||
HValue* slot, HValue* vector, bool keyed_load);
|
||||
|
||||
private:
|
||||
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
|
||||
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
|
||||
@ -2004,11 +2020,126 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
|
||||
HValue* array, HValue* map_index,
|
||||
HValue* slot, HValue* vector) {
|
||||
// The handler is at array[map_index + 1]. Compute this with a custom offset
|
||||
// to HLoadKeyed.
|
||||
int offset =
|
||||
GetDefaultHeaderSizeForElementsKind(FAST_ELEMENTS) + kPointerSize;
|
||||
HValue* handler_code =
|
||||
Add<HLoadKeyed>(array, map_index, static_cast<HValue*>(NULL),
|
||||
FAST_ELEMENTS, NEVER_RETURN_HOLE, offset);
|
||||
TailCallHandler(receiver, name, slot, vector, handler_code);
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
|
||||
HValue* slot, HValue* vector,
|
||||
HValue* handler_code) {
|
||||
VectorLoadICDescriptor descriptor(isolate());
|
||||
HValue* op_vals[] = {context(), receiver, name, slot, vector};
|
||||
Add<HCallWithDescriptor>(handler_code, 0, descriptor,
|
||||
Vector<HValue*>(op_vals, 5), TAIL_CALL);
|
||||
// We never return here, it is a tail call.
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::TailCallMiss(HValue* receiver, HValue* name,
|
||||
HValue* slot, HValue* vector,
|
||||
bool keyed_load) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Add<HTailCallThroughMegamorphicCache>(
|
||||
receiver, name, slot, vector,
|
||||
HTailCallThroughMegamorphicCache::ComputeFlags(keyed_load, true));
|
||||
// We never return here, it is a tail call.
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
|
||||
HValue* name, HValue* slot,
|
||||
HValue* vector,
|
||||
bool keyed_load) {
|
||||
IfBuilder if_receiver_heap_object(this);
|
||||
if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
|
||||
if_receiver_heap_object.Then();
|
||||
{
|
||||
HConstant* constant_two = Add<HConstant>(2);
|
||||
HConstant* constant_three = Add<HConstant>(3);
|
||||
|
||||
HValue* receiver_map = AddLoadMap(receiver, static_cast<HValue*>(NULL));
|
||||
HValue* start =
|
||||
keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
|
||||
HValue* array_map =
|
||||
Add<HLoadKeyed>(array, start, static_cast<HValue*>(NULL), FAST_ELEMENTS,
|
||||
ALLOW_RETURN_HOLE);
|
||||
IfBuilder if_correct_map(this);
|
||||
if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
|
||||
if_correct_map.Then();
|
||||
{ TailCallHandler(receiver, name, array, start, slot, vector); }
|
||||
if_correct_map.Else();
|
||||
{
|
||||
// If our array has more elements, the ic is polymorphic. Look for the
|
||||
// receiver map in the rest of the array.
|
||||
HValue* length =
|
||||
AddLoadFixedArrayLength(array, static_cast<HValue*>(NULL));
|
||||
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
|
||||
constant_two);
|
||||
start = keyed_load ? constant_three : constant_two;
|
||||
HValue* key = builder.BeginBody(start, length, Token::LT);
|
||||
{
|
||||
HValue* array_map =
|
||||
Add<HLoadKeyed>(array, key, static_cast<HValue*>(NULL),
|
||||
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
|
||||
IfBuilder if_correct_poly_map(this);
|
||||
if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
|
||||
array_map);
|
||||
if_correct_poly_map.Then();
|
||||
{ TailCallHandler(receiver, name, array, key, slot, vector); }
|
||||
}
|
||||
builder.EndBody();
|
||||
}
|
||||
if_correct_map.End();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
|
||||
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
|
||||
Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
|
||||
return receiver;
|
||||
HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
|
||||
HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
|
||||
HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
|
||||
|
||||
// If the feedback is an array, then the IC is in the monomorphic or
|
||||
// polymorphic state.
|
||||
HValue* feedback = Add<HLoadKeyed>(vector, slot, static_cast<HValue*>(NULL),
|
||||
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
|
||||
IfBuilder array_checker(this);
|
||||
array_checker.If<HCompareMap>(feedback,
|
||||
isolate()->factory()->fixed_array_map());
|
||||
array_checker.Then();
|
||||
{ HandleArrayCases(feedback, receiver, name, slot, vector, false); }
|
||||
array_checker.Else();
|
||||
{
|
||||
// Is the IC megamorphic?
|
||||
IfBuilder mega_checker(this);
|
||||
HConstant* megamorphic_symbol =
|
||||
Add<HConstant>(isolate()->factory()->megamorphic_symbol());
|
||||
mega_checker.If<HCompareObjectEqAndBranch>(feedback, megamorphic_symbol);
|
||||
mega_checker.Then();
|
||||
{
|
||||
// Probe the stub cache.
|
||||
Add<HTailCallThroughMegamorphicCache>(
|
||||
receiver, name, slot, vector,
|
||||
HTailCallThroughMegamorphicCache::ComputeFlags(false, false));
|
||||
}
|
||||
mega_checker.End();
|
||||
}
|
||||
array_checker.End();
|
||||
|
||||
TailCallMiss(receiver, name, slot, vector, false);
|
||||
return graph()->GetConstant0();
|
||||
}
|
||||
|
||||
|
||||
@ -2018,8 +2149,66 @@ Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
|
||||
template <>
|
||||
HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
|
||||
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
|
||||
Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
|
||||
return receiver;
|
||||
HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
|
||||
HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
|
||||
HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
|
||||
HConstant* zero = graph()->GetConstant0();
|
||||
|
||||
// If the feedback is an array, then the IC is in the monomorphic or
|
||||
// polymorphic state.
|
||||
HValue* feedback = Add<HLoadKeyed>(vector, slot, static_cast<HValue*>(NULL),
|
||||
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
|
||||
IfBuilder array_checker(this);
|
||||
array_checker.If<HCompareMap>(feedback,
|
||||
isolate()->factory()->fixed_array_map());
|
||||
array_checker.Then();
|
||||
{
|
||||
// If feedback[0] is 0, then the IC has element handlers and name should be
|
||||
// a smi. If feedback[0] is a string, verify that it matches name.
|
||||
HValue* recorded_name =
|
||||
Add<HLoadKeyed>(feedback, zero, static_cast<HValue*>(NULL),
|
||||
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
|
||||
|
||||
IfBuilder recorded_name_is_zero(this);
|
||||
recorded_name_is_zero.If<HCompareObjectEqAndBranch>(recorded_name, zero);
|
||||
recorded_name_is_zero.Then();
|
||||
{ Add<HCheckSmi>(name); }
|
||||
recorded_name_is_zero.Else();
|
||||
{
|
||||
IfBuilder strings_match(this);
|
||||
strings_match.IfNot<HCompareObjectEqAndBranch>(name, recorded_name);
|
||||
strings_match.Then();
|
||||
TailCallMiss(receiver, name, slot, vector, true);
|
||||
strings_match.End();
|
||||
}
|
||||
recorded_name_is_zero.End();
|
||||
|
||||
HandleArrayCases(feedback, receiver, name, slot, vector, true);
|
||||
}
|
||||
array_checker.Else();
|
||||
{
|
||||
// Check if the IC is in generic state.
|
||||
IfBuilder generic_checker(this);
|
||||
HConstant* generic_symbol =
|
||||
Add<HConstant>(isolate()->factory()->generic_symbol());
|
||||
generic_checker.If<HCompareObjectEqAndBranch>(feedback, generic_symbol);
|
||||
generic_checker.Then();
|
||||
{
|
||||
// Tail-call to the generic KeyedLoadIC, treating it like a handler.
|
||||
Handle<Code> stub = KeyedLoadIC::generic_stub(isolate());
|
||||
HValue* constant_stub = Add<HConstant>(stub);
|
||||
LoadDescriptor descriptor(isolate());
|
||||
HValue* op_vals[] = {context(), receiver, name};
|
||||
Add<HCallWithDescriptor>(constant_stub, 0, descriptor,
|
||||
Vector<HValue*>(op_vals, 3), TAIL_CALL);
|
||||
// We never return here, it is a tail call.
|
||||
}
|
||||
generic_checker.End();
|
||||
}
|
||||
array_checker.End();
|
||||
|
||||
TailCallMiss(receiver, name, slot, vector, true);
|
||||
return zero;
|
||||
}
|
||||
|
||||
|
||||
@ -2035,14 +2224,15 @@ Handle<Code> MegamorphicLoadStub::GenerateCode() {
|
||||
|
||||
template <>
|
||||
HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
|
||||
// The return address is on the stack.
|
||||
HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
|
||||
HValue* name = GetParameter(LoadDescriptor::kNameIndex);
|
||||
|
||||
// We shouldn't generate this when FLAG_vector_ics is true because the
|
||||
// megamorphic case is handled as part of the default stub.
|
||||
DCHECK(!FLAG_vector_ics);
|
||||
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::LOAD_IC));
|
||||
Add<HTailCallThroughMegamorphicCache>(receiver, name, flags);
|
||||
Add<HTailCallThroughMegamorphicCache>(receiver, name);
|
||||
|
||||
// We never continue.
|
||||
return graph()->GetConstant0();
|
||||
|
@ -796,7 +796,6 @@ bool HInstruction::CanDeoptimize() {
|
||||
case HValue::kCallNew:
|
||||
case HValue::kCallNewArray:
|
||||
case HValue::kCallStub:
|
||||
case HValue::kCallWithDescriptor:
|
||||
case HValue::kCapturedObject:
|
||||
case HValue::kClassOfTestAndBranch:
|
||||
case HValue::kCompareGeneric:
|
||||
@ -863,6 +862,7 @@ bool HInstruction::CanDeoptimize() {
|
||||
case HValue::kBranch:
|
||||
case HValue::kCallJSFunction:
|
||||
case HValue::kCallRuntime:
|
||||
case HValue::kCallWithDescriptor:
|
||||
case HValue::kChange:
|
||||
case HValue::kCheckHeapObject:
|
||||
case HValue::kCheckInstanceType:
|
||||
@ -1716,6 +1716,13 @@ std::ostream& HCallStub::PrintDataTo(std::ostream& os) const { // NOLINT
|
||||
}
|
||||
|
||||
|
||||
Code::Flags HTailCallThroughMegamorphicCache::flags() const {
|
||||
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::LOAD_IC));
|
||||
return code_flags;
|
||||
}
|
||||
|
||||
|
||||
std::ostream& HTailCallThroughMegamorphicCache::PrintDataTo(
|
||||
std::ostream& os) const { // NOLINT
|
||||
for (int i = 0; i < OperandCount(); i++) {
|
||||
|
@ -2314,15 +2314,19 @@ class HCallJSFunction FINAL : public HCall<1> {
|
||||
};
|
||||
|
||||
|
||||
enum CallMode { NORMAL_CALL, TAIL_CALL };
|
||||
|
||||
|
||||
class HCallWithDescriptor FINAL : public HInstruction {
|
||||
public:
|
||||
static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target,
|
||||
int argument_count,
|
||||
CallInterfaceDescriptor descriptor,
|
||||
const Vector<HValue*>& operands) {
|
||||
const Vector<HValue*>& operands,
|
||||
CallMode call_mode = NORMAL_CALL) {
|
||||
DCHECK(operands.length() == descriptor.GetEnvironmentLength());
|
||||
HCallWithDescriptor* res = new (zone)
|
||||
HCallWithDescriptor(target, argument_count, descriptor, operands, zone);
|
||||
HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
|
||||
target, argument_count, descriptor, operands, call_mode, zone);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -2343,6 +2347,8 @@ class HCallWithDescriptor FINAL : public HInstruction {
|
||||
|
||||
HType CalculateInferredType() FINAL { return HType::Tagged(); }
|
||||
|
||||
bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
|
||||
|
||||
virtual int argument_count() const {
|
||||
return argument_count_;
|
||||
}
|
||||
@ -2361,10 +2367,14 @@ class HCallWithDescriptor FINAL : public HInstruction {
|
||||
// The argument count includes the receiver.
|
||||
HCallWithDescriptor(HValue* target, int argument_count,
|
||||
CallInterfaceDescriptor descriptor,
|
||||
const Vector<HValue*>& operands, Zone* zone)
|
||||
const Vector<HValue*>& operands, CallMode call_mode,
|
||||
Zone* zone)
|
||||
: descriptor_(descriptor),
|
||||
values_(descriptor.GetEnvironmentLength() + 1, zone) {
|
||||
argument_count_ = argument_count;
|
||||
values_(descriptor.GetEnvironmentLength() + 1, zone),
|
||||
argument_count_(argument_count),
|
||||
call_mode_(call_mode) {
|
||||
// We can only tail call without any stack arguments.
|
||||
DCHECK(call_mode != TAIL_CALL || argument_count == 0);
|
||||
AddOperand(target, zone);
|
||||
for (int i = 0; i < operands.length(); i++) {
|
||||
AddOperand(operands[i], zone);
|
||||
@ -2385,6 +2395,7 @@ class HCallWithDescriptor FINAL : public HInstruction {
|
||||
CallInterfaceDescriptor descriptor_;
|
||||
ZoneList<HValue*> values_;
|
||||
int argument_count_;
|
||||
CallMode call_mode_;
|
||||
};
|
||||
|
||||
|
||||
@ -5354,34 +5365,90 @@ class HCallStub FINAL : public HUnaryCall {
|
||||
};
|
||||
|
||||
|
||||
class HTailCallThroughMegamorphicCache FINAL : public HTemplateInstruction<3> {
|
||||
class HTailCallThroughMegamorphicCache FINAL : public HInstruction {
|
||||
public:
|
||||
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HTailCallThroughMegamorphicCache,
|
||||
HValue*, HValue*, Code::Flags);
|
||||
enum Flags {
|
||||
NONE = 0,
|
||||
CALLED_FROM_KEYED_LOAD = 1 << 0,
|
||||
PERFORM_MISS_ONLY = 1 << 1
|
||||
};
|
||||
|
||||
static Flags ComputeFlags(bool called_from_keyed_load,
|
||||
bool perform_miss_only) {
|
||||
Flags flags = NONE;
|
||||
if (called_from_keyed_load) {
|
||||
flags = static_cast<Flags>(flags | CALLED_FROM_KEYED_LOAD);
|
||||
}
|
||||
if (perform_miss_only) {
|
||||
flags = static_cast<Flags>(flags | PERFORM_MISS_ONLY);
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(
|
||||
HTailCallThroughMegamorphicCache, HValue*, HValue*, HValue*, HValue*,
|
||||
HTailCallThroughMegamorphicCache::Flags);
|
||||
|
||||
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HTailCallThroughMegamorphicCache,
|
||||
HValue*, HValue*);
|
||||
|
||||
Representation RequiredInputRepresentation(int index) OVERRIDE {
|
||||
return Representation::Tagged();
|
||||
}
|
||||
|
||||
virtual int OperandCount() const FINAL OVERRIDE {
|
||||
return FLAG_vector_ics ? 5 : 3;
|
||||
}
|
||||
virtual HValue* OperandAt(int i) const FINAL OVERRIDE { return inputs_[i]; }
|
||||
|
||||
HValue* context() const { return OperandAt(0); }
|
||||
HValue* receiver() const { return OperandAt(1); }
|
||||
HValue* name() const { return OperandAt(2); }
|
||||
Code::Flags flags() const { return flags_; }
|
||||
HValue* slot() const {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return OperandAt(3);
|
||||
}
|
||||
HValue* vector() const {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return OperandAt(4);
|
||||
}
|
||||
Code::Flags flags() const;
|
||||
|
||||
bool is_keyed_load() const { return flags_ & CALLED_FROM_KEYED_LOAD; }
|
||||
bool is_just_miss() const { return flags_ & PERFORM_MISS_ONLY; }
|
||||
|
||||
std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache)
|
||||
|
||||
protected:
|
||||
virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
|
||||
inputs_[i] = value;
|
||||
}
|
||||
|
||||
private:
|
||||
HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
|
||||
HValue* name, Code::Flags flags)
|
||||
HValue* name, HValue* slot, HValue* vector,
|
||||
Flags flags)
|
||||
: flags_(flags) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
SetOperandAt(0, context);
|
||||
SetOperandAt(1, receiver);
|
||||
SetOperandAt(2, name);
|
||||
SetOperandAt(3, slot);
|
||||
SetOperandAt(4, vector);
|
||||
}
|
||||
|
||||
HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
|
||||
HValue* name)
|
||||
: flags_(NONE) {
|
||||
SetOperandAt(0, context);
|
||||
SetOperandAt(1, receiver);
|
||||
SetOperandAt(2, name);
|
||||
}
|
||||
|
||||
Code::Flags flags_;
|
||||
EmbeddedContainer<HValue*, 5> inputs_;
|
||||
Flags flags_;
|
||||
};
|
||||
|
||||
|
||||
@ -5470,13 +5537,11 @@ class HLoadGlobalGeneric FINAL : public HTemplateInstruction<2> {
|
||||
HValue* global_object() { return OperandAt(1); }
|
||||
Handle<String> name() const { return name_; }
|
||||
bool for_typeof() const { return for_typeof_; }
|
||||
FeedbackVectorICSlot slot() const {
|
||||
DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
|
||||
return slot_;
|
||||
}
|
||||
FeedbackVectorICSlot slot() const { return slot_; }
|
||||
Handle<TypeFeedbackVector> feedback_vector() const {
|
||||
return feedback_vector_;
|
||||
}
|
||||
bool HasVectorAndSlot() const { return FLAG_vector_ics; }
|
||||
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
|
||||
FeedbackVectorICSlot slot) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
@ -6529,13 +6594,11 @@ class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
|
||||
HValue* object() const { return OperandAt(1); }
|
||||
Handle<Object> name() const { return name_; }
|
||||
|
||||
FeedbackVectorICSlot slot() const {
|
||||
DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
|
||||
return slot_;
|
||||
}
|
||||
FeedbackVectorICSlot slot() const { return slot_; }
|
||||
Handle<TypeFeedbackVector> feedback_vector() const {
|
||||
return feedback_vector_;
|
||||
}
|
||||
bool HasVectorAndSlot() const { return FLAG_vector_ics; }
|
||||
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
|
||||
FeedbackVectorICSlot slot) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
@ -6694,8 +6757,7 @@ class HLoadKeyed FINAL
|
||||
if (!other->IsLoadKeyed()) return false;
|
||||
HLoadKeyed* other_load = HLoadKeyed::cast(other);
|
||||
|
||||
if (IsDehoisted() && base_offset() != other_load->base_offset())
|
||||
return false;
|
||||
if (base_offset() != other_load->base_offset()) return false;
|
||||
return elements_kind() == other_load->elements_kind();
|
||||
}
|
||||
|
||||
@ -6808,13 +6870,11 @@ class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
|
||||
HValue* object() const { return OperandAt(0); }
|
||||
HValue* key() const { return OperandAt(1); }
|
||||
HValue* context() const { return OperandAt(2); }
|
||||
FeedbackVectorICSlot slot() const {
|
||||
DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
|
||||
return slot_;
|
||||
}
|
||||
FeedbackVectorICSlot slot() const { return slot_; }
|
||||
Handle<TypeFeedbackVector> feedback_vector() const {
|
||||
return feedback_vector_;
|
||||
}
|
||||
bool HasVectorAndSlot() const { return FLAG_vector_ics; }
|
||||
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
|
||||
FeedbackVectorICSlot slot) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
|
@ -6907,9 +6907,10 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
|
||||
if (FLAG_vector_ics) {
|
||||
Handle<SharedFunctionInfo> current_shared =
|
||||
function_state()->compilation_info()->shared_info();
|
||||
result->SetVectorAndSlot(
|
||||
handle(current_shared->feedback_vector(), isolate()),
|
||||
expr->AsProperty()->PropertyFeedbackSlot());
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
handle(current_shared->feedback_vector(), isolate());
|
||||
FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
|
||||
result->SetVectorAndSlot(vector, slot);
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
@ -6930,9 +6931,10 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
|
||||
if (FLAG_vector_ics) {
|
||||
Handle<SharedFunctionInfo> current_shared =
|
||||
function_state()->compilation_info()->shared_info();
|
||||
result->SetVectorAndSlot(
|
||||
handle(current_shared->feedback_vector(), isolate()),
|
||||
expr->AsProperty()->PropertyFeedbackSlot());
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
handle(current_shared->feedback_vector(), isolate());
|
||||
FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
|
||||
result->SetVectorAndSlot(vector, slot);
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
@ -7208,7 +7210,9 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
|
||||
HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
|
||||
BailoutId return_id, PropertyAccessType access_type,
|
||||
bool* has_side_effects) {
|
||||
if (key->ActualValue()->IsConstant()) {
|
||||
// TODO(mvstanton): This optimization causes trouble for vector-based
|
||||
// KeyedLoadICs, turn it off for now.
|
||||
if (!FLAG_vector_ics && key->ActualValue()->IsConstant()) {
|
||||
Handle<Object> constant =
|
||||
HConstant::cast(key->ActualValue())->handle(isolate());
|
||||
uint32_t array_index;
|
||||
|
@ -652,9 +652,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
if (FLAG_vector_ics) {
|
||||
// With careful management, we won't have to save slot and vector on
|
||||
// the stack. Simply handle the possibly missing case first.
|
||||
// TODO(mvstanton): this code can be more efficient.
|
||||
__ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
|
||||
Immediate(isolate()->factory()->the_hole_value()));
|
||||
__ j(equal, &miss);
|
||||
__ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
|
||||
__ ret(0);
|
||||
} else {
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
|
||||
ebx, &miss);
|
||||
}
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
|
||||
ebx, &miss);
|
||||
__ bind(&miss);
|
||||
PropertyAccessCompiler::TailCallBuiltin(
|
||||
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
|
||||
@ -697,11 +708,17 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register scratch = ebx;
|
||||
Register scratch = edi;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
Register result = eax;
|
||||
DCHECK(!result.is(scratch));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
&miss, // When not a number.
|
||||
|
@ -2831,14 +2831,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(eax));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ mov(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ mov(VectorLoadICDescriptor::SlotRegister(),
|
||||
Immediate(Smi::FromInt(index)));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ mov(slot_register, Immediate(Smi::FromInt(index)));
|
||||
}
|
||||
|
||||
|
||||
@ -3452,45 +3455,81 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
Register name = ToRegister(instr->name());
|
||||
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
|
||||
DCHECK(name.is(LoadDescriptor::NameRegister()));
|
||||
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
|
||||
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
|
||||
|
||||
Register scratch = ebx;
|
||||
Register extra = eax;
|
||||
Register extra = edi;
|
||||
DCHECK(!extra.is(slot) && !extra.is(vector));
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(name));
|
||||
DCHECK(!extra.is(receiver) && !extra.is(name));
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, extra);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
if (FLAG_vector_ics) {
|
||||
__ push(slot);
|
||||
__ push(vector);
|
||||
}
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
// If --vector-ics is on, then it knows to pop the two args first.
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, extra);
|
||||
|
||||
if (FLAG_vector_ics) {
|
||||
__ pop(vector);
|
||||
__ pop(slot);
|
||||
}
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ leave();
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(ToRegister(instr->result()).is(eax));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ leave();
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
__ jmp(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
__ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(target);
|
||||
}
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(Operand(target)));
|
||||
__ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(target);
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(Operand(target)));
|
||||
__ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1138,9 +1138,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -2115,7 +2123,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -2176,7 +2184,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
|
||||
@ -2241,7 +2249,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadKeyedGeneric* result =
|
||||
|
@ -471,19 +471,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
|
||||
LOperand* name, LOperand* slot,
|
||||
LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
|
@ -54,6 +54,14 @@ class PropertyAccessCompiler BASE_EMBEDDED {
|
||||
|
||||
Register receiver() const { return registers_[0]; }
|
||||
Register name() const { return registers_[1]; }
|
||||
Register slot() const {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return VectorLoadICDescriptor::SlotRegister();
|
||||
}
|
||||
Register vector() const {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
return VectorLoadICDescriptor::VectorRegister();
|
||||
}
|
||||
Register scratch1() const { return registers_[2]; }
|
||||
Register scratch2() const { return registers_[3]; }
|
||||
Register scratch3() const { return registers_[4]; }
|
||||
|
@ -92,6 +92,28 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ push(vector);
|
||||
__ push(slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ pop(slot);
|
||||
__ pop(vector);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ add(sp, sp, Operand(2 * kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
@ -484,6 +506,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
Label success;
|
||||
__ b(&success);
|
||||
__ bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
__ bind(&success);
|
||||
}
|
||||
@ -582,6 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
} else {
|
||||
__ Push(holder_reg, this->name());
|
||||
}
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method.)
|
||||
@ -599,6 +626,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ Ret();
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
__ pop(this->name());
|
||||
__ pop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
@ -628,7 +656,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
__ push(receiver()); // receiver
|
||||
__ push(holder_reg);
|
||||
@ -669,6 +697,9 @@ Register NamedStoreHandlerCompiler::value() {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
@ -685,6 +716,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
FrontendFooter(name, &miss);
|
||||
|
@ -283,7 +283,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is in lr.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
__ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
|
||||
|
||||
LoadIC_PushArgs(masm);
|
||||
|
||||
@ -417,7 +420,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is in lr.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
|
||||
|
||||
LoadIC_PushArgs(masm);
|
||||
|
||||
@ -818,8 +824,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, r3, r4, r5, r6);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, r3, r4, r5, r6);
|
||||
// Cache miss.
|
||||
__ b(&miss);
|
||||
|
||||
@ -880,8 +886,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
name, r3, r4, r5, r6);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, name, r3, r4, r5, r6);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
GenerateMiss(masm);
|
||||
|
@ -41,9 +41,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -16,7 +18,7 @@ namespace internal {
|
||||
|
||||
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// Number of the cache entry, not scaled.
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
@ -94,10 +96,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label miss;
|
||||
|
||||
@ -109,15 +112,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
DCHECK(!extra.is(receiver));
|
||||
DCHECK(!extra.is(name));
|
||||
DCHECK(!extra.is(scratch));
|
||||
DCHECK(!extra2.is(receiver));
|
||||
DCHECK(!extra2.is(name));
|
||||
DCHECK(!extra2.is(scratch));
|
||||
DCHECK(!extra2.is(extra));
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check scratch, extra and extra2 registers are valid.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
@ -125,6 +120,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
@ -147,8 +153,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ and_(scratch, scratch, Operand(mask));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
|
||||
@ -157,8 +163,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ and_(scratch, scratch, Operand(mask2));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
@ -15,6 +15,27 @@ namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Push(vector);
|
||||
__ Push(slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Pop(slot);
|
||||
__ Pop(vector);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ Drop(2);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
@ -306,6 +327,9 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
@ -320,6 +344,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
FrontendFooter(name, &miss);
|
||||
@ -530,6 +557,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
__ B(&success);
|
||||
|
||||
__ Bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
|
||||
__ Bind(&success);
|
||||
@ -642,6 +673,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
} else {
|
||||
__ Push(holder_reg, this->name());
|
||||
}
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method.)
|
||||
@ -658,6 +690,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&interceptor_failed);
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
__ Pop(this->name(), holder_reg, receiver());
|
||||
} else {
|
||||
@ -688,7 +721,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
// Stub never generated for non-global objects that require access checks.
|
||||
DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
|
||||
|
@ -354,7 +354,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
Isolate* isolate = masm->isolate();
|
||||
ASM_LOCATION("LoadIC::GenerateMiss");
|
||||
|
||||
__ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(x4, x5, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
|
||||
|
||||
// Perform tail call to the entry.
|
||||
if (FLAG_vector_ics) {
|
||||
@ -428,6 +431,9 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is in lr.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(x10, x11, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
|
||||
|
||||
if (FLAG_vector_ics) {
|
||||
@ -852,8 +858,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, x3, x4, x5, x6);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, x3, x4, x5, x6);
|
||||
// Cache miss.
|
||||
__ B(&miss);
|
||||
|
||||
@ -912,8 +918,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
name, x3, x4, x5, x6);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, name, x3, x4, x5, x6);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
GenerateMiss(masm);
|
||||
|
@ -42,9 +42,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -23,7 +25,7 @@ namespace internal {
|
||||
//
|
||||
// 'receiver', 'name' and 'offset' registers are preserved on miss.
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
Register scratch3) {
|
||||
@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label miss;
|
||||
|
||||
@ -108,6 +111,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
@ -125,8 +139,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
CountTrailingZeros(kPrimaryTableSize, 64));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary table.
|
||||
__ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
|
||||
@ -134,8 +148,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ And(scratch, scratch, kSecondaryTableSize - 1);
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
@ -129,11 +129,17 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
|
||||
}
|
||||
|
||||
|
||||
Register PropertyHandlerCompiler::Frontend(Register object_reg,
|
||||
Handle<Name> name) {
|
||||
Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
|
||||
Label miss;
|
||||
Register reg = FrontendHeader(object_reg, name, &miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
Register reg = FrontendHeader(receiver(), name, &miss);
|
||||
FrontendFooter(name, &miss);
|
||||
// The footer consumes the vector and slot from the stack if miss occurs.
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
return reg;
|
||||
}
|
||||
|
||||
@ -179,7 +185,7 @@ void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
|
||||
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
|
||||
FieldIndex field) {
|
||||
Register reg = Frontend(receiver(), name);
|
||||
Register reg = Frontend(name);
|
||||
__ Move(receiver(), reg);
|
||||
LoadFieldStub stub(isolate(), field);
|
||||
GenerateTailCall(masm(), stub.GetCode());
|
||||
@ -189,7 +195,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
|
||||
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
|
||||
int constant_index) {
|
||||
Register reg = Frontend(receiver(), name);
|
||||
Register reg = Frontend(name);
|
||||
__ Move(receiver(), reg);
|
||||
LoadConstantStub stub(isolate(), constant_index);
|
||||
GenerateTailCall(masm(), stub.GetCode());
|
||||
@ -200,7 +206,14 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
|
||||
Handle<Name> name) {
|
||||
Label miss;
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
GenerateLoadConstant(isolate()->factory()->undefined_value());
|
||||
FrontendFooter(name, &miss);
|
||||
return GetCode(kind(), Code::FAST, name);
|
||||
@ -209,7 +222,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
|
||||
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
|
||||
Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
|
||||
Register reg = Frontend(receiver(), name);
|
||||
Register reg = Frontend(name);
|
||||
GenerateLoadCallback(reg, callback);
|
||||
return GetCode(kind(), Code::FAST, name);
|
||||
}
|
||||
@ -218,7 +231,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
|
||||
Handle<Name> name, const CallOptimization& call_optimization) {
|
||||
DCHECK(call_optimization.is_simple_api_call());
|
||||
Frontend(receiver(), name);
|
||||
Frontend(name);
|
||||
Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
|
||||
GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
|
||||
scratch1(), false, 0, NULL);
|
||||
@ -226,6 +239,35 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
|
||||
}
|
||||
|
||||
|
||||
void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
|
||||
if (IC::ICUseVector(kind())) {
|
||||
if (holder_reg.is(receiver())) {
|
||||
PushVectorAndSlot();
|
||||
} else {
|
||||
DCHECK(holder_reg.is(scratch1()));
|
||||
PushVectorAndSlot(scratch2(), scratch3());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
|
||||
PopMode mode) {
|
||||
if (IC::ICUseVector(kind())) {
|
||||
if (mode == DISCARD) {
|
||||
DiscardVectorAndSlot();
|
||||
} else {
|
||||
if (holder_reg.is(receiver())) {
|
||||
PopVectorAndSlot();
|
||||
} else {
|
||||
DCHECK(holder_reg.is(scratch1()));
|
||||
PopVectorAndSlot(scratch2(), scratch3());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
|
||||
LookupIterator* it) {
|
||||
// So far the most popular follow ups for interceptor loads are FIELD and
|
||||
@ -256,7 +298,12 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
|
||||
}
|
||||
}
|
||||
|
||||
Register reg = Frontend(receiver(), it->name());
|
||||
Label miss;
|
||||
InterceptorVectorSlotPush(receiver());
|
||||
Register reg = FrontendHeader(receiver(), it->name(), &miss);
|
||||
FrontendFooter(it->name(), &miss);
|
||||
InterceptorVectorSlotPop(reg);
|
||||
|
||||
if (inline_followup) {
|
||||
// TODO(368): Compile in the whole chain: all the interceptors in
|
||||
// prototypes and ultimate answer.
|
||||
@ -274,7 +321,13 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
|
||||
|
||||
set_type_for_object(holder());
|
||||
set_holder(real_named_property_holder);
|
||||
Register reg = Frontend(interceptor_reg, it->name());
|
||||
|
||||
Label miss;
|
||||
InterceptorVectorSlotPush(interceptor_reg);
|
||||
Register reg = FrontendHeader(interceptor_reg, it->name(), &miss);
|
||||
FrontendFooter(it->name(), &miss);
|
||||
// We discard the vector and slot now because we don't miss below this point.
|
||||
InterceptorVectorSlotPop(reg, DISCARD);
|
||||
|
||||
switch (it->state()) {
|
||||
case LookupIterator::ACCESS_CHECK:
|
||||
@ -301,7 +354,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
|
||||
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
|
||||
Handle<Name> name, Handle<JSFunction> getter) {
|
||||
Frontend(receiver(), name);
|
||||
Frontend(name);
|
||||
GenerateLoadViaGetter(masm(), type(), receiver(), getter);
|
||||
return GetCode(kind(), Code::FAST, name);
|
||||
}
|
||||
@ -389,7 +442,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
|
||||
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
|
||||
Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
|
||||
Frontend(receiver(), name);
|
||||
Frontend(name);
|
||||
GenerateStoreViaSetter(masm(), type(), receiver(), setter);
|
||||
|
||||
return GetCode(kind(), Code::FAST, name);
|
||||
@ -399,7 +452,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
const CallOptimization& call_optimization) {
|
||||
Frontend(receiver(), name);
|
||||
Frontend(name);
|
||||
Register values[] = {value()};
|
||||
GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
|
||||
receiver(), scratch1(), true, 1, values);
|
||||
|
@ -38,10 +38,21 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
|
||||
|
||||
virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
|
||||
|
||||
Register Frontend(Register object_reg, Handle<Name> name);
|
||||
// Frontend loads from receiver(), returns holder register which may be
|
||||
// different.
|
||||
Register Frontend(Handle<Name> name);
|
||||
void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
|
||||
Register scratch1, Register scratch2);
|
||||
|
||||
// When FLAG_vector_ics is true, handlers that have the possibility of missing
|
||||
// will need to save and pass these to miss handlers.
|
||||
void PushVectorAndSlot() { PushVectorAndSlot(vector(), slot()); }
|
||||
void PushVectorAndSlot(Register vector, Register slot);
|
||||
void PopVectorAndSlot() { PopVectorAndSlot(vector(), slot()); }
|
||||
void PopVectorAndSlot(Register vector, Register slot);
|
||||
|
||||
void DiscardVectorAndSlot();
|
||||
|
||||
// TODO(verwaest): Make non-static.
|
||||
static void GenerateFastApiCall(MacroAssembler* masm,
|
||||
const CallOptimization& optimization,
|
||||
@ -170,6 +181,12 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
|
||||
Handle<ExecutableAccessorInfo> callback);
|
||||
void GenerateLoadCallback(const CallOptimization& call_optimization,
|
||||
Handle<Map> receiver_map);
|
||||
|
||||
// Helper emits no code if vector-ics are disabled.
|
||||
void InterceptorVectorSlotPush(Register holder_reg);
|
||||
enum PopMode { POP, DISCARD };
|
||||
void InterceptorVectorSlotPop(Register holder_reg, PopMode mode = POP);
|
||||
|
||||
void GenerateLoadInterceptor(Register holder_reg);
|
||||
void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
|
||||
Register holder_reg);
|
||||
|
@ -47,6 +47,28 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ push(vector);
|
||||
__ push(slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ pop(slot);
|
||||
__ pop(vector);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ add(esp, Immediate(2 * kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
@ -104,6 +126,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
|
||||
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
|
||||
MacroAssembler* masm, Register receiver, Register scratch1,
|
||||
Register scratch2, Label* miss_label) {
|
||||
DCHECK(!FLAG_vector_ics);
|
||||
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
|
||||
__ mov(eax, scratch1);
|
||||
__ ret(0);
|
||||
@ -489,6 +512,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
Label success;
|
||||
__ jmp(&success);
|
||||
__ bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
__ bind(&success);
|
||||
}
|
||||
@ -588,7 +615,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
}
|
||||
__ push(holder_reg);
|
||||
__ push(this->name());
|
||||
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method.)
|
||||
@ -612,6 +639,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
__ pop(this->name());
|
||||
__ pop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
@ -644,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
__ pop(scratch1()); // remove the return address
|
||||
__ push(receiver());
|
||||
@ -690,7 +718,9 @@ Register NamedStoreHandlerCompiler::value() {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
// Get the value from the cell.
|
||||
Register result = StoreDescriptor::ValueRegister();
|
||||
@ -713,6 +743,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1);
|
||||
// The code above already loads the result into the return register.
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ ret(0);
|
||||
|
||||
FrontendFooter(name, &miss);
|
||||
|
@ -44,10 +44,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
Label miss;
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
(kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -692,8 +692,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, ebx, no_reg);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
|
||||
// Cache miss.
|
||||
__ jmp(&miss);
|
||||
|
||||
@ -854,7 +854,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, flags, false, StoreDescriptor::ReceiverRegister(),
|
||||
masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
|
||||
StoreDescriptor::NameRegister(), ebx, no_reg);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -16,7 +18,7 @@ namespace internal {
|
||||
|
||||
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register name, Register receiver,
|
||||
// Number of the cache entry pointer-size scaled.
|
||||
Register offset, Register extra) {
|
||||
@ -56,6 +58,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
#endif
|
||||
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
// The vector and slot were pushed onto the stack before starting the
|
||||
// probe, and need to be dropped before calling the handler.
|
||||
__ pop(VectorLoadICDescriptor::VectorRegister());
|
||||
__ pop(VectorLoadICDescriptor::SlotRegister());
|
||||
}
|
||||
|
||||
if (leave_frame) __ leave();
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
@ -100,6 +109,17 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
__ pop(offset);
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
// The vector and slot were pushed onto the stack before starting the
|
||||
// probe, and need to be dropped before calling the handler.
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!offset.is(vector) && !offset.is(slot));
|
||||
|
||||
__ pop(vector);
|
||||
__ pop(slot);
|
||||
}
|
||||
|
||||
if (leave_frame) __ leave();
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
@ -113,10 +133,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Assert that code is valid. The multiplying code relies on the entry size
|
||||
@ -159,8 +180,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(kCacheIndexShift == kPointerSizeLog2);
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
|
||||
offset, extra);
|
||||
ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
|
||||
receiver, offset, extra);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
|
||||
@ -172,8 +193,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
|
||||
offset, extra);
|
||||
ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
|
||||
receiver, offset, extra);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
47
src/ic/ic.cc
47
src/ic/ic.cc
@ -665,8 +665,6 @@ void IC::ConfigureVectorState(IC::State new_state) {
|
||||
nexus->ConfigureGeneric();
|
||||
} else if (new_state == PREMONOMORPHIC) {
|
||||
nexus->ConfigurePremonomorphic();
|
||||
} else if (new_state == MEGAMORPHIC) {
|
||||
nexus->ConfigureMegamorphic();
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -971,7 +969,8 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
|
||||
CopyICToMegamorphicCache(name);
|
||||
}
|
||||
if (UseVector()) {
|
||||
ConfigureVectorState(MEGAMORPHIC);
|
||||
ConfigureVectorState(kind() == Code::KEYED_LOAD_IC ? GENERIC
|
||||
: MEGAMORPHIC);
|
||||
} else {
|
||||
set_target(*megamorphic_stub());
|
||||
}
|
||||
@ -2317,10 +2316,23 @@ RUNTIME_FUNCTION(LoadIC_Miss) {
|
||||
Handle<Smi> slot = args.at<Smi>(2);
|
||||
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
|
||||
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
|
||||
LoadICNexus nexus(vector, vector_slot);
|
||||
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
|
||||
ic.UpdateState(receiver, key);
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
|
||||
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
|
||||
// LoadIC miss handler if the handler misses. Since the vector Nexus is
|
||||
// set up outside the IC, handle that here.
|
||||
if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
|
||||
LoadICNexus nexus(vector, vector_slot);
|
||||
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
|
||||
ic.UpdateState(receiver, key);
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
|
||||
ic.Load(receiver, key));
|
||||
} else {
|
||||
DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
|
||||
KeyedLoadICNexus nexus(vector, vector_slot);
|
||||
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
|
||||
ic.UpdateState(receiver, key);
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
|
||||
ic.Load(receiver, key));
|
||||
}
|
||||
} else {
|
||||
DCHECK(args.length() == 2);
|
||||
LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
|
||||
@ -2962,10 +2974,23 @@ RUNTIME_FUNCTION(LoadIC_MissFromStubFailure) {
|
||||
Handle<Smi> slot = args.at<Smi>(2);
|
||||
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
|
||||
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
|
||||
LoadICNexus nexus(vector, vector_slot);
|
||||
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
|
||||
ic.UpdateState(receiver, key);
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
|
||||
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
|
||||
// LoadIC miss handler if the handler misses. Since the vector Nexus is
|
||||
// set up outside the IC, handle that here.
|
||||
if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
|
||||
LoadICNexus nexus(vector, vector_slot);
|
||||
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
|
||||
ic.UpdateState(receiver, key);
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
|
||||
ic.Load(receiver, key));
|
||||
} else {
|
||||
DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
|
||||
KeyedLoadICNexus nexus(vector, vector_slot);
|
||||
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
|
||||
ic.UpdateState(receiver, key);
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
|
||||
ic.Load(receiver, key));
|
||||
}
|
||||
} else {
|
||||
DCHECK(args.length() == 2);
|
||||
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
|
||||
|
12
src/ic/ic.h
12
src/ic/ic.h
@ -133,6 +133,12 @@ class IC {
|
||||
static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
|
||||
Isolate* isolate);
|
||||
|
||||
static bool ICUseVector(Code::Kind kind) {
|
||||
return (FLAG_vector_ics &&
|
||||
(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
|
||||
kind == Code::CALL_IC;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Get the call-site target; used for determining the state.
|
||||
Handle<Code> target() const { return target_; }
|
||||
@ -152,12 +158,6 @@ class IC {
|
||||
inline void set_target(Code* code);
|
||||
bool is_target_set() { return target_set_; }
|
||||
|
||||
static bool ICUseVector(Code::Kind kind) {
|
||||
return (FLAG_vector_ics &&
|
||||
(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
|
||||
kind == Code::CALL_IC;
|
||||
}
|
||||
|
||||
bool UseVector() const {
|
||||
bool use = ICUseVector(kind());
|
||||
// If we are supposed to use the nexus, verify the nexus is non-null.
|
||||
|
@ -92,6 +92,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Push(vector, slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Pop(vector, slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ Addu(sp, sp, Operand(2 * kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
@ -481,6 +501,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
Label success;
|
||||
__ Branch(&success);
|
||||
__ bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
__ bind(&success);
|
||||
}
|
||||
@ -582,6 +606,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
} else {
|
||||
__ Push(holder_reg, this->name());
|
||||
}
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method).
|
||||
@ -598,6 +623,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ Ret();
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
__ Pop(receiver(), holder_reg, this->name());
|
||||
} else {
|
||||
@ -627,7 +653,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
__ Push(receiver(), holder_reg); // Receiver.
|
||||
__ li(at, Operand(callback)); // Callback info.
|
||||
@ -667,6 +693,9 @@ Register NamedStoreHandlerCompiler::value() {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
|
||||
@ -683,6 +712,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ mov(v0, result);
|
||||
|
||||
|
@ -24,9 +24,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -290,7 +290,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is in ra.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
|
||||
|
||||
LoadIC_PushArgs(masm);
|
||||
|
||||
@ -426,7 +429,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is in ra.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
|
||||
|
||||
LoadIC_PushArgs(masm);
|
||||
|
||||
@ -816,8 +822,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, a3, t0, t1, t2);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, a3, t0, t1, t2);
|
||||
// Cache miss.
|
||||
__ Branch(&miss);
|
||||
|
||||
@ -886,8 +892,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
// Get the receiver from the stack and probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
name, a3, t0, t1, t2);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, name, a3, t0, t1, t2);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
GenerateMiss(masm);
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_MIPS
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -16,7 +18,7 @@ namespace internal {
|
||||
|
||||
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// Number of the cache entry, not scaled.
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label miss;
|
||||
|
||||
@ -105,15 +108,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
DCHECK(!extra.is(receiver));
|
||||
DCHECK(!extra.is(name));
|
||||
DCHECK(!extra.is(scratch));
|
||||
DCHECK(!extra2.is(receiver));
|
||||
DCHECK(!extra2.is(name));
|
||||
DCHECK(!extra2.is(scratch));
|
||||
DCHECK(!extra2.is(extra));
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check register validity.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
@ -121,6 +116,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
@ -140,8 +146,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ And(scratch, scratch, Operand(mask));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ srl(at, name, kCacheIndexShift);
|
||||
@ -151,8 +157,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ And(scratch, scratch, Operand(mask2));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
@ -92,6 +92,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Push(vector, slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Pop(vector, slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ Daddu(sp, sp, Operand(2 * kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
@ -482,6 +502,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
Label success;
|
||||
__ Branch(&success);
|
||||
__ bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
__ bind(&success);
|
||||
}
|
||||
@ -583,6 +607,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
} else {
|
||||
__ Push(holder_reg, this->name());
|
||||
}
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method).
|
||||
@ -599,6 +624,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ Ret();
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
__ Pop(receiver(), holder_reg, this->name());
|
||||
} else {
|
||||
@ -628,7 +654,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
__ Push(receiver(), holder_reg); // Receiver.
|
||||
__ li(at, Operand(callback)); // Callback info.
|
||||
@ -668,6 +694,9 @@ Register NamedStoreHandlerCompiler::value() {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
|
||||
@ -684,6 +713,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ mov(v0, result);
|
||||
|
||||
|
@ -24,9 +24,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -288,7 +288,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is on the stack.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
|
||||
|
||||
LoadIC_PushArgs(masm);
|
||||
|
||||
@ -424,7 +427,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
// The return address is in ra.
|
||||
Isolate* isolate = masm->isolate();
|
||||
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
|
||||
VectorLoadICDescriptor::VectorRegister()));
|
||||
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
|
||||
|
||||
LoadIC_PushArgs(masm);
|
||||
|
||||
@ -825,8 +831,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, a3, a4, a5, a6);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, a3, a4, a5, a6);
|
||||
// Cache miss.
|
||||
__ Branch(&miss);
|
||||
|
||||
@ -895,8 +901,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
// Get the receiver from the stack and probe the stub cache.
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
name, a3, a4, a5, a6);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, name, a3, a4, a5, a6);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
GenerateMiss(masm);
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_MIPS64
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -16,7 +18,7 @@ namespace internal {
|
||||
|
||||
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// Number of the cache entry, not scaled.
|
||||
Register offset, Register scratch, Register scratch2,
|
||||
@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label miss;
|
||||
|
||||
@ -106,15 +109,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
|
||||
|
||||
// Make sure that there are no register conflicts.
|
||||
DCHECK(!scratch.is(receiver));
|
||||
DCHECK(!scratch.is(name));
|
||||
DCHECK(!extra.is(receiver));
|
||||
DCHECK(!extra.is(name));
|
||||
DCHECK(!extra.is(scratch));
|
||||
DCHECK(!extra2.is(receiver));
|
||||
DCHECK(!extra2.is(name));
|
||||
DCHECK(!extra2.is(scratch));
|
||||
DCHECK(!extra2.is(extra));
|
||||
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
||||
|
||||
// Check register validity.
|
||||
DCHECK(!scratch.is(no_reg));
|
||||
@ -122,6 +117,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(!extra2.is(no_reg));
|
||||
DCHECK(!extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
|
||||
// extra3 don't conflict with the vector and slot registers, which need
|
||||
// to be preserved for a handler call or miss.
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
|
||||
extra3);
|
||||
@ -141,8 +147,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ And(scratch, scratch, Operand(mask));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ dsrl(at, name, kCacheIndexShift);
|
||||
@ -152,8 +158,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ And(scratch, scratch, Operand(mask2));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
|
||||
name, scratch, extra, extra2, extra3);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
@ -52,10 +52,10 @@ class StubCache {
|
||||
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
|
||||
// registers. Set to no_reg if not needed.
|
||||
// If leave_frame is true, then exit a frame before the tail call.
|
||||
void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name, Register scratch,
|
||||
Register extra, Register extra2 = no_reg,
|
||||
Register extra3 = no_reg);
|
||||
void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2 = no_reg, Register extra3 = no_reg);
|
||||
|
||||
enum Table { kPrimary, kSecondary };
|
||||
|
||||
|
@ -15,6 +15,28 @@ namespace internal {
|
||||
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Push(vector);
|
||||
__ Push(slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ Pop(slot);
|
||||
__ Pop(vector);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ addp(rsp, Immediate(2 * kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
@ -484,6 +506,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
Label success;
|
||||
__ jmp(&success);
|
||||
__ bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
__ bind(&success);
|
||||
}
|
||||
@ -582,6 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
}
|
||||
__ Push(holder_reg);
|
||||
__ Push(this->name());
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
@ -599,6 +626,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
__ Pop(this->name());
|
||||
__ Pop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
@ -631,7 +659,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
__ PopReturnAddressTo(scratch1());
|
||||
__ Push(receiver());
|
||||
@ -677,6 +705,9 @@ Register NamedStoreHandlerCompiler::value() {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
@ -695,6 +726,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ ret(0);
|
||||
|
||||
FrontendFooter(name, &miss);
|
||||
|
@ -81,9 +81,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -589,8 +589,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, rbx, no_reg);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, rbx, no_reg);
|
||||
// Cache miss.
|
||||
__ jmp(&miss);
|
||||
|
||||
@ -858,7 +858,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, flags, false, StoreDescriptor::ReceiverRegister(),
|
||||
masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
|
||||
StoreDescriptor::NameRegister(), rbx, no_reg);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_X64
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -16,7 +18,7 @@ namespace internal {
|
||||
|
||||
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register receiver, Register name,
|
||||
// The offset is scaled by 4, based on
|
||||
// kCacheIndexShift, which is two bits
|
||||
@ -82,10 +84,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label miss;
|
||||
USE(extra); // The register extra is not used on the X64 platform.
|
||||
@ -107,6 +110,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(extra2.is(no_reg));
|
||||
DCHECK(extra3.is(no_reg));
|
||||
|
||||
#ifdef DEBUG
|
||||
// If vector-based ics are in use, ensure that scratch doesn't conflict with
|
||||
// the vector and slot registers, which need to be preserved for a handler
|
||||
// call or miss.
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!AreAliased(vector, slot, scratch));
|
||||
}
|
||||
#endif
|
||||
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
|
||||
|
||||
@ -123,8 +137,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
|
||||
scratch);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
|
||||
name, scratch);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
|
||||
@ -136,8 +150,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
|
||||
scratch);
|
||||
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
|
||||
name, scratch);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
@ -47,6 +47,28 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
|
||||
Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ push(vector);
|
||||
__ push(slot);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
|
||||
MacroAssembler* masm = this->masm();
|
||||
__ pop(slot);
|
||||
__ pop(vector);
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
|
||||
MacroAssembler* masm = this->masm();
|
||||
// Remove vector and slot.
|
||||
__ add(esp, Immediate(2 * kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
|
||||
MacroAssembler* masm, Label* miss_label, Register receiver,
|
||||
Handle<Name> name, Register scratch0, Register scratch1) {
|
||||
@ -104,6 +126,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
|
||||
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
|
||||
MacroAssembler* masm, Register receiver, Register scratch1,
|
||||
Register scratch2, Label* miss_label) {
|
||||
DCHECK(!FLAG_vector_ics);
|
||||
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
|
||||
__ mov(eax, scratch1);
|
||||
__ ret(0);
|
||||
@ -489,6 +512,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
|
||||
Label success;
|
||||
__ jmp(&success);
|
||||
__ bind(miss);
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DCHECK(kind() == Code::LOAD_IC);
|
||||
PopVectorAndSlot();
|
||||
}
|
||||
TailCallBuiltin(masm(), MissBuiltin(kind()));
|
||||
__ bind(&success);
|
||||
}
|
||||
@ -588,7 +615,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
}
|
||||
__ push(holder_reg);
|
||||
__ push(this->name());
|
||||
|
||||
InterceptorVectorSlotPush(holder_reg);
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method.)
|
||||
@ -612,6 +639,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
|
||||
InterceptorVectorSlotPop(holder_reg);
|
||||
__ pop(this->name());
|
||||
__ pop(holder_reg);
|
||||
if (must_preserve_receiver_reg) {
|
||||
@ -644,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
|
||||
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
|
||||
Handle<JSObject> object, Handle<Name> name,
|
||||
Handle<ExecutableAccessorInfo> callback) {
|
||||
Register holder_reg = Frontend(receiver(), name);
|
||||
Register holder_reg = Frontend(name);
|
||||
|
||||
__ pop(scratch1()); // remove the return address
|
||||
__ push(receiver());
|
||||
@ -690,7 +718,9 @@ Register NamedStoreHandlerCompiler::value() {
|
||||
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
|
||||
Label miss;
|
||||
|
||||
if (IC::ICUseVector(kind())) {
|
||||
PushVectorAndSlot();
|
||||
}
|
||||
FrontendHeader(receiver(), name, &miss);
|
||||
// Get the value from the cell.
|
||||
Register result = StoreDescriptor::ValueRegister();
|
||||
@ -713,6 +743,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
|
||||
Counters* counters = isolate()->counters();
|
||||
__ IncrementCounter(counters->named_load_global_stub(), 1);
|
||||
// The code above already loads the result into the return register.
|
||||
if (IC::ICUseVector(kind())) {
|
||||
DiscardVectorAndSlot();
|
||||
}
|
||||
__ ret(0);
|
||||
|
||||
FrontendFooter(name, &miss);
|
||||
|
@ -44,10 +44,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
|
||||
Label miss;
|
||||
|
||||
if (check == PROPERTY &&
|
||||
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads and stores, just
|
||||
(kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
|
||||
// In case we are compiling an IC for dictionary loads or stores, just
|
||||
// check whether the name is unique.
|
||||
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
|
||||
// Keyed loads with dictionaries shouldn't be here, they go generic.
|
||||
// The DCHECK is to protect assumptions when --vector-ics is on.
|
||||
DCHECK(kind() != Code::KEYED_LOAD_IC);
|
||||
Register tmp = scratch1();
|
||||
__ JumpIfSmi(this->name(), &miss);
|
||||
__ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
|
||||
|
@ -694,8 +694,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
|
||||
key, ebx, no_reg);
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
|
||||
// Cache miss.
|
||||
__ jmp(&miss);
|
||||
|
||||
@ -856,7 +856,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
|
||||
Code::ComputeHandlerFlags(Code::STORE_IC));
|
||||
masm->isolate()->stub_cache()->GenerateProbe(
|
||||
masm, flags, false, StoreDescriptor::ReceiverRegister(),
|
||||
masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
|
||||
StoreDescriptor::NameRegister(), ebx, no_reg);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
|
@ -7,7 +7,9 @@
|
||||
#if V8_TARGET_ARCH_X87
|
||||
|
||||
#include "src/codegen.h"
|
||||
#include "src/ic/ic.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
#include "src/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -16,7 +18,7 @@ namespace internal {
|
||||
|
||||
|
||||
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
|
||||
StubCache::Table table, Register name, Register receiver,
|
||||
// Number of the cache entry pointer-size scaled.
|
||||
Register offset, Register extra) {
|
||||
@ -56,6 +58,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
#endif
|
||||
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
// The vector and slot were pushed onto the stack before starting the
|
||||
// probe, and need to be dropped before calling the handler.
|
||||
__ pop(VectorLoadICDescriptor::VectorRegister());
|
||||
__ pop(VectorLoadICDescriptor::SlotRegister());
|
||||
}
|
||||
|
||||
if (leave_frame) __ leave();
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
@ -100,6 +109,18 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
__ pop(offset);
|
||||
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
|
||||
|
||||
if (IC::ICUseVector(ic_kind)) {
|
||||
// The vector and slot were pushed onto the stack before starting the
|
||||
// probe, and need to be dropped before calling the handler.
|
||||
Register vector = VectorLoadICDescriptor::VectorRegister();
|
||||
Register slot = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(!offset.is(vector) && !offset.is(slot));
|
||||
|
||||
__ pop(vector);
|
||||
__ pop(slot);
|
||||
}
|
||||
|
||||
|
||||
if (leave_frame) __ leave();
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
@ -113,10 +134,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
bool leave_frame, Register receiver,
|
||||
Register name, Register scratch, Register extra,
|
||||
Register extra2, Register extra3) {
|
||||
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
|
||||
Code::Flags flags, bool leave_frame,
|
||||
Register receiver, Register name,
|
||||
Register scratch, Register extra, Register extra2,
|
||||
Register extra3) {
|
||||
Label miss;
|
||||
|
||||
// Assert that code is valid. The multiplying code relies on the entry size
|
||||
@ -159,8 +181,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
DCHECK(kCacheIndexShift == kPointerSizeLog2);
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
|
||||
offset, extra);
|
||||
ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
|
||||
receiver, offset, extra);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
|
||||
@ -172,8 +194,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
|
||||
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
|
||||
offset, extra);
|
||||
ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
|
||||
receiver, offset, extra);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
@ -1347,10 +1347,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register scratch = a3;
|
||||
Register scratch = t1;
|
||||
Register result = v0;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
&miss, // When not a number.
|
||||
@ -1564,8 +1570,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
|
||||
t0, &miss);
|
||||
// Ensure that the vector and slot registers won't be clobbered before
|
||||
// calling the miss handler.
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(t0, t1, VectorLoadICDescriptor::VectorRegister(),
|
||||
VectorLoadICDescriptor::SlotRegister()));
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
|
||||
t1, &miss);
|
||||
__ bind(&miss);
|
||||
PropertyAccessCompiler::TailCallBuiltin(
|
||||
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
|
||||
|
@ -2889,13 +2889,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(a0));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ li(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ li(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ li(slot_register, Operand(Smi::FromInt(index)));
|
||||
}
|
||||
|
||||
|
||||
@ -3947,44 +3951,73 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
DCHECK(receiver.is(a1));
|
||||
DCHECK(name.is(a2));
|
||||
|
||||
Register scratch = a3;
|
||||
Register extra = t0;
|
||||
Register extra2 = t1;
|
||||
Register extra3 = t2;
|
||||
Register scratch = t0;
|
||||
Register extra = t1;
|
||||
Register extra2 = t2;
|
||||
Register extra3 = t5;
|
||||
#ifdef DEBUG
|
||||
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
|
||||
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(slot, vector, scratch, extra, extra2, extra3));
|
||||
#endif
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, extra, extra2, extra3);
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(ToRegister(instr->result()).is(v0));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
__ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(target);
|
||||
}
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(target);
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1103,9 +1103,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -2066,7 +2074,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadGlobalGeneric* result =
|
||||
@ -2125,7 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -2192,7 +2200,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
|
@ -473,19 +473,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
|
||||
LOperand* name, LOperand* slot,
|
||||
LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
@ -1857,9 +1861,10 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
|
||||
|
||||
const CallInterfaceDescriptor descriptor() { return descriptor_; }
|
||||
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
private:
|
||||
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
void PrintDataTo(StringStream* stream) OVERRIDE;
|
||||
|
||||
|
@ -1347,10 +1347,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register scratch = a3;
|
||||
Register scratch = a4;
|
||||
Register result = v0;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
&miss, // When not a number.
|
||||
@ -1564,8 +1570,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
|
||||
a4, &miss);
|
||||
// Ensure that the vector and slot registers won't be clobbered before
|
||||
// calling the miss handler.
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(a4, a5, VectorLoadICDescriptor::VectorRegister(),
|
||||
VectorLoadICDescriptor::SlotRegister()));
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
|
||||
a5, &miss);
|
||||
__ bind(&miss);
|
||||
PropertyAccessCompiler::TailCallBuiltin(
|
||||
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
|
||||
|
@ -2859,13 +2859,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(a0));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ li(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ li(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ li(slot_register, Operand(Smi::FromInt(index)));
|
||||
}
|
||||
|
||||
|
||||
@ -3979,44 +3983,73 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
DCHECK(receiver.is(a1));
|
||||
DCHECK(name.is(a2));
|
||||
|
||||
Register scratch = a3;
|
||||
Register extra = a4;
|
||||
Register extra2 = a5;
|
||||
Register extra3 = a6;
|
||||
Register scratch = a4;
|
||||
Register extra = a5;
|
||||
Register extra2 = a6;
|
||||
Register extra3 = t1;
|
||||
#ifdef DEBUG
|
||||
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
|
||||
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(slot, vector, scratch, extra, extra2, extra3));
|
||||
#endif
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, extra, extra2, extra3);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, extra, extra2, extra3);
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(ToRegister(instr->result()).is(v0));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
__ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(target);
|
||||
}
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(target);
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1103,9 +1103,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -2064,7 +2072,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadGlobalGeneric* result =
|
||||
@ -2123,7 +2131,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -2191,7 +2199,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
|
@ -472,19 +472,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
|
||||
LOperand* name, LOperand* slot,
|
||||
LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
@ -1856,9 +1860,10 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
|
||||
|
||||
const CallInterfaceDescriptor descriptor() { return descriptor_; }
|
||||
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
private:
|
||||
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
|
||||
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
|
||||
|
||||
void PrintDataTo(StringStream* stream) OVERRIDE;
|
||||
|
||||
|
@ -98,6 +98,10 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
|
||||
}
|
||||
array->set(kWithTypesIndex, Smi::FromInt(0));
|
||||
array->set(kGenericCountIndex, Smi::FromInt(0));
|
||||
// Fill the indexes with zeros.
|
||||
for (int i = 0; i < index_count; i++) {
|
||||
array->set(kReservedIndexCount + i, Smi::FromInt(0));
|
||||
}
|
||||
|
||||
// Ensure we can skip the write barrier
|
||||
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
|
||||
@ -134,8 +138,7 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
|
||||
static bool ClearLogic(Heap* heap, int ic_age, Code::Kind kind,
|
||||
InlineCacheState state) {
|
||||
if (FLAG_cleanup_code_caches_at_gc &&
|
||||
(kind == Code::CALL_IC || state == MEGAMORPHIC || state == GENERIC ||
|
||||
state == POLYMORPHIC || heap->flush_monomorphic_ics() ||
|
||||
(kind == Code::CALL_IC || heap->flush_monomorphic_ics() ||
|
||||
// TODO(mvstanton): is this ic_age granular enough? it comes from
|
||||
// the SharedFunctionInfo which may change on a different schedule
|
||||
// than ic targets.
|
||||
@ -258,8 +261,6 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
|
||||
return UNINITIALIZED;
|
||||
} else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
|
||||
return PREMONOMORPHIC;
|
||||
} else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
|
||||
return MEGAMORPHIC;
|
||||
} else if (feedback == *vector()->GenericSentinel(isolate)) {
|
||||
return GENERIC;
|
||||
} else if (feedback->IsFixedArray()) {
|
||||
@ -322,11 +323,6 @@ void KeyedLoadICNexus::ConfigureGeneric() {
|
||||
}
|
||||
|
||||
|
||||
void KeyedLoadICNexus::ConfigureMegamorphic() {
|
||||
SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
|
||||
}
|
||||
|
||||
|
||||
void LoadICNexus::ConfigureMegamorphic() {
|
||||
SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
|
||||
}
|
||||
|
@ -348,7 +348,6 @@ class KeyedLoadICNexus : public FeedbackNexus {
|
||||
|
||||
void Clear(Code* host);
|
||||
|
||||
void ConfigureMegamorphic();
|
||||
void ConfigureGeneric();
|
||||
void ConfigurePremonomorphic();
|
||||
// name can be a null handle for element loads.
|
||||
|
@ -524,6 +524,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
// Ensure that the vector and slot registers won't be clobbered before
|
||||
// calling the miss handler.
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(r8, r9, VectorLoadICDescriptor::VectorRegister(),
|
||||
VectorLoadICDescriptor::SlotRegister()));
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
|
||||
r9, &miss);
|
||||
@ -873,10 +878,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register scratch = rbx;
|
||||
Register scratch = rdi;
|
||||
Register result = rax;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
&miss, // When not a number.
|
||||
|
@ -2852,13 +2852,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(rax));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ Move(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ Move(slot_register, Smi::FromInt(index));
|
||||
}
|
||||
|
||||
|
||||
@ -3539,43 +3543,67 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
Register name = ToRegister(instr->name());
|
||||
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
|
||||
DCHECK(name.is(LoadDescriptor::NameRegister()));
|
||||
|
||||
Register scratch = rbx;
|
||||
Register scratch = rdi;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(name));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
!AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
|
||||
scratch));
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, no_reg);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
// The probe will tail call to a handler if found.
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, no_reg);
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ leave();
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(ToRegister(instr->result()).is(rax));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ leave();
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
__ jmp(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
__ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(target);
|
||||
}
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(target);
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(target));
|
||||
__ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1118,9 +1118,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -2067,7 +2075,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -2140,7 +2148,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
|
||||
@ -2233,7 +2241,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
|
@ -480,19 +480,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LOperand* receiver, LOperand* name,
|
||||
LOperand* slot, LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
|
@ -333,8 +333,19 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
||||
Label miss;
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
|
||||
ebx, &miss);
|
||||
if (FLAG_vector_ics) {
|
||||
// With careful management, we won't have to save slot and vector on
|
||||
// the stack. Simply handle the possibly missing case first.
|
||||
// TODO(mvstanton): this code can be more efficient.
|
||||
__ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
|
||||
Immediate(isolate()->factory()->the_hole_value()));
|
||||
__ j(equal, &miss);
|
||||
__ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
|
||||
__ ret(0);
|
||||
} else {
|
||||
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
|
||||
ebx, &miss);
|
||||
}
|
||||
__ bind(&miss);
|
||||
PropertyAccessCompiler::TailCallBuiltin(
|
||||
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
|
||||
@ -377,10 +388,17 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Register receiver = LoadDescriptor::ReceiverRegister();
|
||||
Register index = LoadDescriptor::NameRegister();
|
||||
Register scratch = ebx;
|
||||
Register scratch = edi;
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(index));
|
||||
Register result = eax;
|
||||
DCHECK(!result.is(scratch));
|
||||
DCHECK(!FLAG_vector_ics ||
|
||||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
|
||||
result.is(VectorLoadICDescriptor::SlotRegister())));
|
||||
|
||||
// StringCharAtGenerator doesn't use the result register until it's passed
|
||||
// the different miss possibilities. If it did, we would have a conflict
|
||||
// when FLAG_vector_ics is true.
|
||||
|
||||
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
|
||||
&miss, // When not a string.
|
||||
|
@ -3126,14 +3126,17 @@ template <class T>
|
||||
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
||||
DCHECK(FLAG_vector_ics);
|
||||
Register vector_register = ToRegister(instr->temp_vector());
|
||||
Register slot_register = VectorLoadICDescriptor::SlotRegister();
|
||||
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
|
||||
DCHECK(slot_register.is(eax));
|
||||
|
||||
AllowDeferredHandleDereference vector_structure_check;
|
||||
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
||||
__ mov(vector_register, vector);
|
||||
// No need to allocate this register.
|
||||
DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
|
||||
int index = vector->GetIndex(instr->hydrogen()->slot());
|
||||
__ mov(VectorLoadICDescriptor::SlotRegister(),
|
||||
Immediate(Smi::FromInt(index)));
|
||||
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
|
||||
int index = vector->GetIndex(slot);
|
||||
__ mov(slot_register, Immediate(Smi::FromInt(index)));
|
||||
}
|
||||
|
||||
|
||||
@ -3738,45 +3741,81 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
|
||||
Register name = ToRegister(instr->name());
|
||||
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
|
||||
DCHECK(name.is(LoadDescriptor::NameRegister()));
|
||||
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
|
||||
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
|
||||
|
||||
Register scratch = ebx;
|
||||
Register extra = eax;
|
||||
Register extra = edi;
|
||||
DCHECK(!extra.is(slot) && !extra.is(vector));
|
||||
DCHECK(!scratch.is(receiver) && !scratch.is(name));
|
||||
DCHECK(!extra.is(receiver) && !extra.is(name));
|
||||
|
||||
// Important for the tail-call.
|
||||
bool must_teardown_frame = NeedsEagerFrame();
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
|
||||
must_teardown_frame, receiver, name,
|
||||
scratch, extra);
|
||||
if (!instr->hydrogen()->is_just_miss()) {
|
||||
if (FLAG_vector_ics) {
|
||||
__ push(slot);
|
||||
__ push(vector);
|
||||
}
|
||||
|
||||
// The probe will tail call to a handler if found.
|
||||
// If --vector-ics is on, then it knows to pop the two args first.
|
||||
DCHECK(!instr->hydrogen()->is_keyed_load());
|
||||
isolate()->stub_cache()->GenerateProbe(
|
||||
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
|
||||
receiver, name, scratch, extra);
|
||||
|
||||
if (FLAG_vector_ics) {
|
||||
__ pop(vector);
|
||||
__ pop(slot);
|
||||
}
|
||||
}
|
||||
|
||||
// Tail call to miss if we ended up here.
|
||||
if (must_teardown_frame) __ leave();
|
||||
LoadIC::GenerateMiss(masm());
|
||||
if (instr->hydrogen()->is_keyed_load()) {
|
||||
KeyedLoadIC::GenerateMiss(masm());
|
||||
} else {
|
||||
LoadIC::GenerateMiss(masm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
||||
DCHECK(ToRegister(instr->result()).is(eax));
|
||||
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
if (instr->hydrogen()->IsTailCall()) {
|
||||
if (NeedsEagerFrame()) __ leave();
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
__ jmp(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
__ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(target);
|
||||
}
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(Operand(target)));
|
||||
__ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(target);
|
||||
LPointerMap* pointers = instr->pointer_map();
|
||||
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
||||
|
||||
if (instr->target()->IsConstantOperand()) {
|
||||
LConstantOperand* target = LConstantOperand::cast(instr->target());
|
||||
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
|
||||
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
|
||||
__ call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
DCHECK(instr->target()->IsRegister());
|
||||
Register target = ToRegister(instr->target());
|
||||
generator.BeforeCall(__ CallSize(Operand(target)));
|
||||
__ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(target);
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
generator.AfterCall();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1157,9 +1157,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
|
||||
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* name_register =
|
||||
UseFixed(instr->name(), LoadDescriptor::NameRegister());
|
||||
LOperand* slot = NULL;
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
|
||||
vector =
|
||||
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
// Not marked as call. It can't deoptimize, and it never returns.
|
||||
return new (zone()) LTailCallThroughMegamorphicCache(
|
||||
context, receiver_register, name_register);
|
||||
context, receiver_register, name_register, slot, vector);
|
||||
}
|
||||
|
||||
|
||||
@ -2113,7 +2121,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
|
||||
LOperand* global_object =
|
||||
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
|
||||
@ -2174,7 +2182,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
|
||||
LOperand* object =
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
|
||||
@ -2239,7 +2247,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
|
||||
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
|
||||
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
|
||||
LOperand* vector = NULL;
|
||||
if (FLAG_vector_ics) {
|
||||
if (instr->HasVectorAndSlot()) {
|
||||
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
|
||||
}
|
||||
LLoadKeyedGeneric* result =
|
||||
|
@ -487,19 +487,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
|
||||
|
||||
|
||||
class LTailCallThroughMegamorphicCache FINAL
|
||||
: public LTemplateInstruction<0, 3, 0> {
|
||||
: public LTemplateInstruction<0, 5, 0> {
|
||||
public:
|
||||
explicit LTailCallThroughMegamorphicCache(LOperand* context,
|
||||
LOperand* receiver,
|
||||
LOperand* name) {
|
||||
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
|
||||
LOperand* name, LOperand* slot,
|
||||
LOperand* vector) {
|
||||
inputs_[0] = context;
|
||||
inputs_[1] = receiver;
|
||||
inputs_[2] = name;
|
||||
inputs_[3] = slot;
|
||||
inputs_[4] = vector;
|
||||
}
|
||||
|
||||
LOperand* context() { return inputs_[0]; }
|
||||
LOperand* receiver() { return inputs_[1]; }
|
||||
LOperand* name() { return inputs_[2]; }
|
||||
LOperand* slot() { return inputs_[3]; }
|
||||
LOperand* vector() { return inputs_[4]; }
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
|
||||
"tail-call-through-megamorphic-cache")
|
||||
|
@ -19165,6 +19165,9 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
|
||||
|
||||
|
||||
TEST(ExternalInternalizedStringCollectedAtGC) {
|
||||
// TODO(mvstanton): vector ics need weak support.
|
||||
if (i::FLAG_vector_ics) return;
|
||||
|
||||
int destroyed = 0;
|
||||
{ LocalContext env;
|
||||
v8::HandleScope handle_scope(env->GetIsolate());
|
||||
|
@ -40,11 +40,17 @@ TEST(VectorStructure) {
|
||||
CHECK_EQ(0, vector->ICSlots());
|
||||
|
||||
FeedbackVectorSpec one_icslot(0, 1);
|
||||
if (FLAG_vector_ics) {
|
||||
one_icslot.SetKind(0, Code::CALL_IC);
|
||||
}
|
||||
vector = factory->NewTypeFeedbackVector(one_icslot);
|
||||
CHECK_EQ(0, vector->Slots());
|
||||
CHECK_EQ(1, vector->ICSlots());
|
||||
|
||||
FeedbackVectorSpec spec(3, 5);
|
||||
if (FLAG_vector_ics) {
|
||||
for (int i = 0; i < 5; i++) spec.SetKind(i, Code::CALL_IC);
|
||||
}
|
||||
vector = factory->NewTypeFeedbackVector(spec);
|
||||
CHECK_EQ(3, vector->Slots());
|
||||
CHECK_EQ(5, vector->ICSlots());
|
||||
@ -295,8 +301,8 @@ TEST(VectorLoadICStates) {
|
||||
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
|
||||
CHECK_EQ(NULL, nexus.FindFirstMap());
|
||||
|
||||
// After a collection, state should be reset to PREMONOMORPHIC.
|
||||
// After a collection, state should not be reset to PREMONOMORPHIC.
|
||||
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
|
||||
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
|
||||
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
|
||||
}
|
||||
}
|
||||
|
@ -3346,6 +3346,8 @@ static void CheckVectorICCleared(Handle<JSFunction> f, int ic_slot_index) {
|
||||
|
||||
TEST(IncrementalMarkingPreservesMonomorphicIC) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
// TODO(mvstanton): vector-ics need to treat maps weakly.
|
||||
if (i::FLAG_vector_ics) return;
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
|
||||
@ -3450,14 +3452,24 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
|
||||
*v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
|
||||
|
||||
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
|
||||
CHECK(ic_before->ic_state() == POLYMORPHIC);
|
||||
if (FLAG_vector_ics) {
|
||||
CheckVectorIC(f, 0, POLYMORPHIC);
|
||||
CHECK(ic_before->ic_state() == DEFAULT);
|
||||
} else {
|
||||
CHECK(ic_before->ic_state() == POLYMORPHIC);
|
||||
}
|
||||
|
||||
// Fire context dispose notification.
|
||||
SimulateIncrementalMarking(CcTest::heap());
|
||||
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
|
||||
Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
|
||||
CHECK(ic_after->ic_state() == POLYMORPHIC);
|
||||
if (FLAG_vector_ics) {
|
||||
CheckVectorIC(f, 0, POLYMORPHIC);
|
||||
CHECK(ic_after->ic_state() == DEFAULT);
|
||||
} else {
|
||||
CHECK(ic_after->ic_state() == POLYMORPHIC);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3733,6 +3745,8 @@ TEST(Regress169209) {
|
||||
i::FLAG_stress_compaction = false;
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
i::FLAG_flush_code_incrementally = true;
|
||||
// TODO(mvstanton): vector ics need weak support.
|
||||
if (i::FLAG_vector_ics) return;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
@ -4086,6 +4100,8 @@ static int AllocationSitesCount(Heap* heap) {
|
||||
|
||||
|
||||
TEST(EnsureAllocationSiteDependentCodesProcessed) {
|
||||
// TODO(mvstanton): vector ics need weak support!
|
||||
if (FLAG_vector_ics) return;
|
||||
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
CcTest::InitializeVM();
|
||||
@ -4148,6 +4164,8 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
|
||||
|
||||
TEST(CellsInOptimizedCodeAreWeak) {
|
||||
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
|
||||
// TODO(mvstanton): vector-ics need to treat maps weakly.
|
||||
if (i::FLAG_vector_ics) return;
|
||||
i::FLAG_weak_embedded_objects_in_optimized_code = true;
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
CcTest::InitializeVM();
|
||||
@ -4190,6 +4208,8 @@ TEST(CellsInOptimizedCodeAreWeak) {
|
||||
|
||||
|
||||
TEST(ObjectsInOptimizedCodeAreWeak) {
|
||||
// TODO(mvstanton): vector ics need weak support!
|
||||
if (FLAG_vector_ics) return;
|
||||
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
|
||||
i::FLAG_weak_embedded_objects_in_optimized_code = true;
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
@ -4233,6 +4253,8 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
|
||||
TEST(NoWeakHashTableLeakWithIncrementalMarking) {
|
||||
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
|
||||
if (!i::FLAG_incremental_marking) return;
|
||||
// TODO(mvstanton): vector ics need weak support.
|
||||
if (FLAG_vector_ics) return;
|
||||
i::FLAG_weak_embedded_objects_in_optimized_code = true;
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
i::FLAG_compilation_cache = false;
|
||||
@ -4256,8 +4278,9 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
|
||||
"bar%d();"
|
||||
"bar%d();"
|
||||
"bar%d();"
|
||||
"%%OptimizeFunctionOnNextCall(bar%d);"
|
||||
"bar%d();", i, i, i, i, i, i, i, i);
|
||||
"%%OptimizeFwunctionOnNextCall(bar%d);"
|
||||
"bar%d();",
|
||||
i, i, i, i, i, i, i, i);
|
||||
CompileRun(source.start());
|
||||
}
|
||||
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
|
||||
@ -4419,6 +4442,8 @@ TEST(WeakMapInMonomorphicLoadIC) {
|
||||
|
||||
|
||||
TEST(WeakMapInPolymorphicLoadIC) {
|
||||
// TODO(mvstanton): vector-ics need to treat maps weakly.
|
||||
if (i::FLAG_vector_ics) return;
|
||||
CheckWeakness(
|
||||
"function loadIC(obj) {"
|
||||
" return obj.name;"
|
||||
|
Loading…
Reference in New Issue
Block a user