[ic] Properly integrate the CallIC into Ignition.

Drop the deprecated CallConstructStub and remove the use of CallICStub
from fullcodegen, since that feedback is unused completely every since
Crankshaft got removed, thus we can safely unlink all the CallIC stuff
from fullcodegen nowadays, and completely nuke the CallICStub and the
CallICTrampolineStub now (we can also transitively nuke the unused
CreateAllocationSiteStub and CreateWeakCellStub).

Instead the CallIC logic is integrated into Ignition now, and part of
the bytecode handlers for [[Call]] and [[Construct]]. There's still some
follow-up cleanup with the way the Array constructor feedback is
integrated, but that's way easier now.

Bug: v8:5517, v8:6399, v8:6409, v8:6679
Change-Id: I0a6c6046faceca9b1606577bc9e63d9295e44619
Reviewed-on: https://chromium-review.googlesource.com/603609
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47196}
This commit is contained in:
Benedikt Meurer 2017-08-07 14:39:11 +02:00 committed by Commit Bot
parent c39c6eba00
commit ee350c3149
44 changed files with 57 additions and 2615 deletions

View File

@ -761,8 +761,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -1122,163 +1120,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
__ Push(cp);
__ CallStub(stub);
__ Pop(cp);
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into r5.
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r5, FieldMemOperand(r5, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r6;
Register weak_value = r9;
__ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
__ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ b(ne, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ str(r4, FieldMemOperand(r5, FeedbackVector::kFeedbackSlotsOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r5, r5, Operand(FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ ldr(r4, FieldMemOperand(r5, 0));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r5, 0));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
GenerateRecordCallTarget(masm);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into r2, or undefined.
__ ldr(r2, FieldMemOperand(r5, FeedbackVector::kFeedbackSlotsOffset));
__ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(eq, &feedback_register_initialized);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(r2, r5);
// Pass function as new target.
__ mov(r3, r1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
__ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&non_function);
__ mov(r3, r1);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.

View File

@ -99,54 +99,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@ -409,17 +367,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
r1, // target to call checked to be Array function
r2, // allocation site feedback if available, undefined otherwise
r3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -752,8 +752,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -1195,183 +1193,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector, Register index,
Register new_target) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
__ Push(argc, function, feedback_vector, index);
__ Push(cp);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
__ Pop(cp);
__ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
Register new_target, Register scratch1,
Register scratch2, Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
feedback_vector, index, new_target));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
Register feedback = scratch1;
Register feedback_map = scratch2;
Register feedback_value = scratch3;
__ Add(feedback, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if feedback value is a WeakCell or a Symbol, but it's
// harmless to read at this position in a symbol (see static asserts in
// feedback-vector.h).
Label check_allocation_site;
__ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(function, feedback_value);
__ B(eq, &done);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ B(eq, &done);
__ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ B(ne, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(feedback_value, &initialize);
__ B(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
__ B(&done);
__ Bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ Bind(&megamorphic);
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
__ Str(scratch2,
FieldMemOperand(scratch1, FeedbackVector::kFeedbackSlotsOffset));
__ B(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ Bind(&initialize);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index, new_target);
__ B(&done);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
feedback_vector, index, new_target);
__ Bind(&done);
// Increment the call count for all function calls.
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(scratch1, scratch1,
Operand(FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ Ldr(scratch2, FieldMemOperand(scratch1, 0));
__ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
__ Str(scratch2, FieldMemOperand(scratch1, 0));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
Register function = x1;
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(function, &non_function);
// Check that the function is a JSFunction.
Register object_type = x10;
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
&non_function);
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into x2, or undefined.
__ Ldr(x2, FieldMemOperand(x5, FeedbackVector::kFeedbackSlotsOffset));
__ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
__ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
&feedback_register_initialized);
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(x2, x5);
__ Mov(x3, function);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
__ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
__ Br(x4);
__ Bind(&non_function);
__ Mov(x3, function);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
if (check_mode_ == RECEIVER_IS_UNKNOWN) {

View File

@ -112,26 +112,6 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
// x1: tagged value to put in the weak cell
Register registers[] = {x2, x3, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
@ -139,33 +119,6 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
@ -438,17 +391,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x1, // target to call checked to be Array function
x2, // allocation site feedback if available, undefined otherwise
x3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -1391,40 +1391,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (not including receiver)
// -- r1 : target to call verified to be Array function
// -- r2 : allocation site feedback if available, undefined otherwise.
// -- r3 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ mov(r5, Operand::Zero());
__ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
// Push the arguments. r3, r5, r6 will be modified.
Generate_InterpreterPushArgs(masm, r0, r3, r5, r6);
// Array constructor expects constructor in r3. It is same as r1 here.
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1414,39 +1414,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (not including receiver)
// -- x1 : target to call verified to be Array function
// -- x2 : allocation site feedback if available, undefined otherwise.
// -- x3 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver.
__ Push(xzr);
// Add a stack check before pushing arguments.
Generate_StackOverflowCheck(masm, x0, x7, &stack_overflow);
// Push the arguments. x3, x5, x6, x7 will be modified.
Generate_InterpreterPushArgs(masm, x0, x3, x5, x6, x7);
// Array constructor expects constructor in x3. It is same as call target.
__ mov(x3, x1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Unreachable();
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -136,7 +136,6 @@ namespace internal {
ASM(InterpreterPushArgsThenCallWithFinalSpread) \
ASM(InterpreterPushArgsThenConstruct) \
ASM(InterpreterPushArgsThenConstructFunction) \
ASM(InterpreterPushArgsThenConstructArray) \
ASM(InterpreterPushArgsThenConstructWithFinalSpread) \
ASM(InterpreterEnterBytecodeAdvance) \
ASM(InterpreterEnterBytecodeDispatch) \

View File

@ -1179,49 +1179,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the target to call checked to be Array function.
// -- ebx : the allocation site feedback
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// We need two scratch registers. Register edi is available, push edx onto
// stack.
__ Push(edx);
// Push arguments and move return address to the top of stack.
// The eax register is readonly. The ecx register will be modified. The edx
// and edi registers will be modified but restored to their original values.
Generate_InterpreterPushZeroAndArgsAndReturnAddress(masm, eax, ecx, edx, edi,
1, &stack_overflow);
// Restore edx.
__ Pop(edx);
// Array constructor expects constructor in edi. It is same as edx here.
__ Move(edi, edx);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
// Pop the temporary registers, so that return address is on top of stack.
__ Pop(edx);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// This should be unreachable.
__ int3();
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1372,41 +1372,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call checked to be Array function.
// -- a2 : allocation site feedback.
// -- a3 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver.
__ push(zero_reg);
Generate_StackOverflowCheck(masm, a0, t1, t4, &stack_overflow);
// This function modifies a3, t1, and t4.
Generate_InterpreterPushArgs(masm, a0, a3, t1, t4);
// ArrayConstructor stub expects constructor in a3. Set it here.
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ break_(0xCC);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1378,41 +1378,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call checked to be Array function.
// -- a2 : allocation site feedback.
// -- a3 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver.
__ push(zero_reg);
Generate_StackOverflowCheck(masm, a0, a5, a6, &stack_overflow);
// This function modifies a3, a5 and a6.
Generate_InterpreterPushArgs(masm, a0, a3, a5, a6);
// ArrayConstructor stub expects constructor in a3. Set it here.
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ break_(0xCC);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1419,40 +1419,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (not including receiver)
// -- r4 : target to call verified to be Array function
// -- r5 : allocation site feedback if available, undefined otherwise.
// -- r6 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ li(r0, Operand::Zero());
__ push(r0);
Generate_StackOverflowCheck(masm, r3, ip, &stack_overflow);
// Push the arguments. r6, r8, r3 will be modified.
Generate_InterpreterPushArgs(masm, r3, r6, r3, r8);
// Array constructor expects constructor in r6. It is same as r4 here.
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1413,40 +1413,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (not including receiver)
// -- r3 : target to call verified to be Array function
// -- r4 : allocation site feedback if available, undefined otherwise.
// -- r5 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ LoadImmP(r0, Operand::Zero());
__ push(r0);
Generate_StackOverflowCheck(masm, r2, ip, &stack_overflow);
// Push the arguments. r6, r8, r3 will be modified.
Generate_InterpreterPushArgs(masm, r6, r5, r2, r7);
// Array constructor expects constructor in r5. It is same as r3 here.
__ LoadRR(r5, r3);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable Code.
__ bkpt(0);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1150,52 +1150,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the target to call checked to be Array function.
// -- rbx : the allocation site feedback
// -- rcx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// Number of values to be pushed.
__ Move(r8, rax);
// Add a stack check before pushing arguments.
Generate_StackOverflowCheck(masm, r8, rdi, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
// rcx and rdi will be modified.
Generate_InterpreterPushArgs(masm, r8, rcx, rdi);
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
// Array constructor expects constructor in rdi. It is same as rdx here.
__ Move(rdi, rdx);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
// Throw stack overflow exception.
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// This should be unreachable.
__ int3();
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -63,19 +63,6 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
LoadGlobalWithVectorDescriptor(isolate));
}
// static
Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode) {
CallICStub stub(isolate, mode);
return make_callable(stub);
}
// static
Callable CodeFactory::CallICTrampoline(Isolate* isolate,
ConvertReceiverMode mode) {
CallICTrampolineStub stub(isolate, mode);
return make_callable(stub);
}
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
return Callable(language_mode == STRICT
@ -396,12 +383,6 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
InterpreterPushArgsThenConstructDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterPushArgsThenConstructArray(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, InterpreterPushArgsThenConstructArray),
InterpreterPushArgsThenConstructArrayDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to

View File

@ -28,10 +28,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
static Callable CallIC(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable CallICTrampoline(
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable StoreGlobalIC(Isolate* isolate, LanguageMode mode);
static Callable StoreGlobalICInOptimizedCode(Isolate* isolate,
LanguageMode mode);
@ -103,7 +99,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
InterpreterPushArgsMode mode);
static Callable InterpreterPushArgsThenConstruct(
Isolate* isolate, InterpreterPushArgsMode mode);
static Callable InterpreterPushArgsThenConstructArray(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);

View File

@ -571,144 +571,6 @@ TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
vector);
}
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
os << convert_mode();
}
// TODO(ishell): Move to CallICAssembler.
TF_STUB(CallICStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* target = Parameter(Descriptor::kTarget);
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
// TODO(bmeurer): The slot should actually be an IntPtr, but TurboFan's
// SimplifiedLowering cannot deal with IntPtr machine type properly yet.
slot = ChangeInt32ToIntPtr(slot);
// Static checks to assert it is safe to examine the type feedback element.
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
// AllocationSite::kTransitionInfoOrBoilerplateOffset - contains a Smi or
// pointer to FixedArray. WeakCell::kValueOffset - contains a JSFunction or
// Smi(0) Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOrBoilerplateOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
// Increment the call count.
// TODO(bmeurer): Would it be beneficial to use Int32Add on 64-bit?
Comment("increment call count");
Node* call_count = LoadFeedbackVectorSlot(vector, slot, 1 * kPointerSize);
Node* new_count = SmiAdd(call_count, SmiConstant(1));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(vector, slot, new_count, SKIP_WRITE_BARRIER,
1 * kPointerSize);
Label call_function(this), extra_checks(this), call(this);
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFeedbackVectorSlot(vector, slot);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(target, feedback_value);
GotoIfNot(is_monomorphic, &extra_checks);
// The compare above could have been a SMI/SMI comparison. Guard against
// this convincing us that we have a monomorphic JSFunction.
Node* is_smi = TaggedIsSmi(target);
Branch(is_smi, &extra_checks, &call_function);
BIND(&call_function);
{
// Call using CallFunction builtin.
Callable callable =
CodeFactory::CallFunction(isolate(), stub->convert_mode());
TailCallStub(callable, context, target, argc);
}
BIND(&extra_checks);
{
Label mark_megamorphic(this), create_weak_cell(this, Label::kDeferred);
Comment("check if megamorphic");
// Check if it is a megamorphic target.
Node* is_megamorphic =
WordEqual(feedback_element,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
GotoIf(is_megamorphic, &call);
Comment("check if uninitialized");
// Check if it is uninitialized target first.
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
GotoIfNot(is_uninitialized, &mark_megamorphic);
Comment("handle unitinitialized");
// If it is not a JSFunction mark it as megamorphic.
Node* is_smi = TaggedIsSmi(target);
GotoIf(is_smi, &mark_megamorphic);
// Check if function is an object of JSFunction type.
Node* is_js_function = IsJSFunction(target);
GotoIfNot(is_js_function, &mark_megamorphic);
// Check if the function belongs to the same native context.
Node* native_context =
LoadNativeContext(LoadObjectField(target, JSFunction::kContextOffset));
Node* is_same_native_context =
WordEqual(native_context, LoadNativeContext(context));
Branch(is_same_native_context, &create_weak_cell, &mark_megamorphic);
BIND(&create_weak_cell);
{
// Wrap the {target} in a WeakCell and remember it.
Comment("create weak cell");
CreateWeakCellInFeedbackVector(vector, SmiTag(slot), target);
// Call using CallFunction builtin.
Goto(&call_function);
}
BIND(&mark_megamorphic);
{
// Mark it as a megamorphic.
// MegamorphicSentinel is created as a part of Heap::InitialObjects
// and will not move during a GC. So it is safe to skip write barrier.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
StoreFeedbackVectorSlot(
vector, slot,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
Goto(&call);
}
}
BIND(&call);
{
// Call using call builtin.
Comment("call using Call builtin");
Callable callable_call = CodeFactory::Call(isolate(), stub->convert_mode());
TailCallStub(callable_call, context, target, argc);
}
}
TF_STUB(CallICTrampolineStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* target = Parameter(Descriptor::kTarget);
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = LoadFeedbackVectorForStub();
Callable callable = CodeFactory::CallIC(isolate(), stub->convert_mode());
TailCallStub(callable, context, target, argc, slot, vector);
}
void JSEntryStub::FinishCode(Handle<Code> code) {
Handle<FixedArray> handler_table =
code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
@ -778,17 +640,6 @@ TF_STUB(GetPropertyStub, CodeStubAssembler) {
Return(var_result.value());
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub(isolate);
stub.GetCode();
}
void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
CreateWeakCellStub stub(isolate);
stub.GetCode();
}
// TODO(ishell): move to builtins-handler-gen.
TF_STUB(StoreSlowElementStub, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@ -853,19 +704,6 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
entry_hook(function, stack_pointer);
}
// TODO(ishell): move to builtins.
TF_STUB(CreateAllocationSiteStub, CodeStubAssembler) {
Return(CreateAllocationSiteInFeedbackVector(Parameter(Descriptor::kVector),
Parameter(Descriptor::kSlot)));
}
// TODO(ishell): move to builtins.
TF_STUB(CreateWeakCellStub, CodeStubAssembler) {
Return(CreateWeakCellInFeedbackVector(Parameter(Descriptor::kVector),
Parameter(Descriptor::kSlot),
Parameter(Descriptor::kValue)));
}
TF_STUB(ArrayNoArgumentConstructorStub, CodeStubAssembler) {
ElementsKind elements_kind = stub->elements_kind();
Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),

View File

@ -34,8 +34,6 @@ class Node;
V(ArrayConstructor) \
V(CallApiCallback) \
V(CallApiGetter) \
V(CallConstruct) \
V(CallIC) \
V(CEntry) \
V(CompareIC) \
V(DoubleToI) \
@ -48,18 +46,11 @@ class Node;
V(StoreSlowElement) \
V(SubString) \
V(NameDictionaryLookup) \
/* These are only called from FCG */ \
/* They can be removed when only the TF */ \
/* version of the corresponding stub is */ \
/* used universally */ \
V(CallICTrampoline) \
/* --- TurboFanCodeStubs --- */ \
V(AllocateHeapNumber) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(StringLength) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
@ -624,26 +615,6 @@ class NumberToStringStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
};
class CreateAllocationSiteStub : public TurboFanCodeStub {
public:
explicit CreateAllocationSiteStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateAllocationSite);
DEFINE_TURBOFAN_CODE_STUB(CreateAllocationSite, TurboFanCodeStub);
};
class CreateWeakCellStub : public TurboFanCodeStub {
public:
explicit CreateWeakCellStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateWeakCell);
DEFINE_TURBOFAN_CODE_STUB(CreateWeakCell, TurboFanCodeStub);
};
class GrowArrayElementsStub : public TurboFanCodeStub {
public:
GrowArrayElementsStub(Isolate* isolate, ElementsKind kind)
@ -725,27 +696,6 @@ class MathPowStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
};
class CallICStub : public TurboFanCodeStub {
public:
CallICStub(Isolate* isolate, ConvertReceiverMode convert_mode)
: TurboFanCodeStub(isolate) {
minor_key_ = ConvertModeBits::encode(convert_mode);
}
ConvertReceiverMode convert_mode() const {
return ConvertModeBits::decode(minor_key_);
}
protected:
typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
private:
void PrintState(std::ostream& os) const final; // NOLINT
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallIC);
DEFINE_TURBOFAN_CODE_STUB(CallIC, TurboFanCodeStub);
};
class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
public:
explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
@ -993,15 +943,6 @@ class JSEntryStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
};
// TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
class CallConstructStub final : public PlatformCodeStub {
public:
explicit CallConstructStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
};
enum ReceiverCheckMode {
// We don't know anything about the receiver.
@ -1077,15 +1018,6 @@ class StringCharCodeAtGenerator {
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
};
class CallICTrampolineStub : public CallICStub {
public:
CallICTrampolineStub(Isolate* isolate, ConvertReceiverMode convert_mode)
: CallICStub(isolate, convert_mode) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallICTrampoline);
DEFINE_TURBOFAN_CODE_STUB(CallICTrampoline, CallICStub);
};
class DoubleToIStub : public PlatformCodeStub {
public:
DoubleToIStub(Isolate* isolate, Register source, Register destination,

View File

@ -1434,7 +1434,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
// for the resulting arrays. This has to be kept in sync with the
// implementation of the CallConstructStub.
// implementation in Ignition.
Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
// Retrieve the Array function from the {node}.

View File

@ -235,6 +235,11 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
Object* const obj = Get(slot);
switch (kind) {
case FeedbackSlotKind::kCall:
// If we are not running interpreted code, we need to ignore the special
// IC slots for call/construct used by the interpreter.
// TODO(mvstanton): Remove code_is_interpreted when full code is retired
// from service.
if (!code_is_interpreted) break;
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:

View File

@ -1662,8 +1662,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ mov(r3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
CallIC(code);
@ -1695,17 +1694,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into r1 and r0.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(r2);
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ mov(r3, r1);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(r0);

View File

@ -1623,8 +1623,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Mov(x3, IntFromSlot(expr->CallFeedbackICSlot()));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ Mov(x0, arg_count);
CallIC(code);
@ -1656,17 +1655,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into x1 and x0.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ Mov(x0, arg_count);
__ Peek(x1, arg_count * kXRegSize);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(x2);
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ Mov(x3, x1);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(x0);

View File

@ -1556,8 +1556,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Move(edx, Immediate(IntFromSlot(expr->CallFeedbackICSlot())));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Move(eax, Immediate(arg_count));
CallIC(code);
@ -1589,17 +1588,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into edi and eax.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ mov(edx, edi);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(eax);

View File

@ -1664,8 +1664,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Record source position of the IC call.
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
@ -1697,17 +1696,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into a1 and a0.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ mov(a3, a1);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(v0);

View File

@ -1666,8 +1666,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Record source position of the IC call.
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ Ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
@ -1699,17 +1698,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into a1 and a0.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ li(a0, Operand(arg_count));
__ Ld(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ mov(a3, a1);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(v0);

View File

@ -1629,8 +1629,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ mov(r6, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
CallIC(code);
@ -1662,17 +1661,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into r4 and r3.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ mov(r3, Operand(arg_count));
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(r5);
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ mov(r6, r4);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(r3);

View File

@ -1587,8 +1587,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Load(r5, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r2, Operand(arg_count));
CallIC(code);
@ -1620,17 +1619,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into r3 and r2.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ mov(r2, Operand(arg_count));
__ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(r4);
__ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ mov(r5, r3);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(r2);

View File

@ -1577,8 +1577,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Set(rdx, IntFromSlot(expr->CallFeedbackICSlot()));
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
CallIC(code);
@ -1611,17 +1610,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
// Load function and argument count into rdi and rax.
Handle<Code> code = CodeFactory::Construct(isolate()).code();
__ Set(rax, arg_count);
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
__ EmitLoadFeedbackVector(rbx);
__ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
__ movp(rdx, rdi);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(rax);

View File

@ -759,165 +759,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
GenerateMiss(masm);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
__ push(edx);
__ push(ebx);
__ push(esi);
__ CallStub(stub);
__ pop(esi);
__ pop(ebx);
__ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read
// at this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
__ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
__ j(equal, &done, Label::kFar);
__ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
__ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
__ j(not_equal, &miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &megamorphic);
__ jmp(&done, Label::kFar);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(ecx, Heap::kuninitialized_symbolRootIndex);
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset),
Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset + kPointerSize),
Immediate(Smi::FromInt(1)));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
Label non_function;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
GenerateRecordCallTarget(masm);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into ebx, or undefined.
__ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
__ j(equal, &feedback_register_initialized);
__ mov(ebx, isolate()->factory()->undefined_value());
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(ebx);
// Pass new target to construct stub.
__ mov(edx, edi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
__ bind(&non_function);
__ mov(edx, edi);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@ -928,8 +769,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}

View File

@ -98,54 +98,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx, edx, edi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
// ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {eax, edi, ecx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
@ -403,17 +361,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
edx, // target to the call. It is checked to be Array function.
ebx, // allocation site feedback
ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -441,25 +441,6 @@ void FastCloneShallowArrayDescriptor::InitializePlatformIndependent(
machine_types);
}
void CreateAllocationSiteDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kVector, kSlot
MachineType machine_types[] = {MachineType::AnyTagged(),
MachineType::TaggedSigned()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CreateWeakCellDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kVector, kSlot, kValue
MachineType machine_types[] = {MachineType::AnyTagged(),
MachineType::TaggedSigned(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CallTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kActualArgumentsCount
@ -566,25 +547,6 @@ void ConstructTrampolineDescriptor::InitializePlatformIndependent(
machine_types);
}
void CallICDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kActualArgumentsCount, kSlot, kVector
MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
MachineType::Int32(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CallICTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kActualArgumentsCount, kSlot
MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void BuiltinDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kArgumentsCount
@ -702,16 +664,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformIndependent(
machine_types);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformIndependent(CallInterfaceDescriptorData* data) {
// kNumberOfArguments, kFunction, kFeedbackElement, kFirstArgument
MachineType machine_types[] = {MachineType::Int32(), MachineType::AnyTagged(),
MachineType::AnyTagged(),
MachineType::Pointer()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void InterpreterCEntryDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kNumberOfArguments, kFirstArgument, kFunctionEntry

View File

@ -41,16 +41,11 @@ class PlatformInterfaceDescriptor;
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(CallFunction) \
V(CallIC) \
V(CallICTrampoline) \
V(CallVarargs) \
V(CallForwardVarargs) \
V(CallWithSpread) \
V(CallWithArrayLike) \
V(CallConstruct) \
V(CallTrampoline) \
V(ConstructStub) \
V(ConstructVarargs) \
@ -88,7 +83,6 @@ class PlatformInterfaceDescriptor;
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
V(InterpreterPushArgsThenConstructArray) \
V(InterpreterCEntry) \
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
@ -572,23 +566,6 @@ class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(FastCloneShallowObjectDescriptor, CallInterfaceDescriptor)
};
class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kVector, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateAllocationSiteDescriptor,
CallInterfaceDescriptor)
};
class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kVector, kSlot, kValue)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateWeakCellDescriptor,
CallInterfaceDescriptor)
};
class CallTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kActualArgumentsCount)
@ -676,25 +653,6 @@ class CallFunctionDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
};
class CallICDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICDescriptor,
CallInterfaceDescriptor)
};
class CallICTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICTrampolineDescriptor,
CallInterfaceDescriptor)
};
class CallConstructDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
};
class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kMap)
@ -915,15 +873,6 @@ class InterpreterPushArgsThenConstructDescriptor
InterpreterPushArgsThenConstructDescriptor, CallInterfaceDescriptor)
};
class InterpreterPushArgsThenConstructArrayDescriptor
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFunction, kFeedbackElement,
kFirstArgument)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
InterpreterPushArgsThenConstructArrayDescriptor, CallInterfaceDescriptor)
};
class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunctionEntry)

View File

@ -567,8 +567,6 @@ Node* InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
Node* feedback_vector,
Node* slot_id) {
// TODO(bmeurer): Add support for the Array constructor AllocationSite,
// and unify this with the general Call/Construct IC code below.
Label extra_checks(this, Label::kDeferred), done(this);
// Increment the call count.
@ -644,132 +642,6 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
BIND(&done);
}
Node* InterpreterAssembler::CallJSWithFeedback(
compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
compiler::Node* slot_id, compiler::Node* feedback_vector,
ConvertReceiverMode receiver_mode) {
// Static checks to assert it is safe to examine the type feedback element.
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
// AllocationSite::kTransitionInfoOrBoilerplateOffset - contains a Smi or
// pointer to FixedArray.
// WeakCell::kValueOffset - contains a JSFunction or Smi(0)
// Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOrBoilerplateOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
Variable return_value(this, MachineRepresentation::kTagged);
Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
end(this);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFeedbackVectorSlot(feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
GotoIfNot(is_monomorphic, &extra_checks);
// The compare above could have been a SMI/SMI comparison. Guard against
// this convincing us that we have a monomorphic JSFunction.
Node* is_smi = TaggedIsSmi(function);
Branch(is_smi, &extra_checks, &call_function);
BIND(&call_function);
{
// Call using call function builtin.
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kJSFunction);
Node* code_target = HeapConstant(callable.code());
Node* ret_value = CallStub(callable.descriptor(), code_target, context,
arg_count, first_arg, function);
return_value.Bind(ret_value);
Goto(&end);
}
BIND(&extra_checks);
{
Label mark_megamorphic(this);
Comment("check if megamorphic");
// Check if it is a megamorphic target.
Node* is_megamorphic =
WordEqual(feedback_element,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
GotoIf(is_megamorphic, &call);
Comment("check if uninitialized");
// Check if it is uninitialized target first.
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
GotoIfNot(is_uninitialized, &mark_megamorphic);
Comment("handle_uninitialized");
// If it is not a JSFunction mark it as megamorphic.
Node* is_smi = TaggedIsSmi(function);
GotoIf(is_smi, &mark_megamorphic);
// Check if function is an object of JSFunction type.
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoIfNot(is_js_function, &mark_megamorphic);
// Check if the function belongs to the same native context.
Node* native_context = LoadNativeContext(
LoadObjectField(function, JSFunction::kContextOffset));
Node* is_same_native_context =
WordEqual(native_context, LoadNativeContext(context));
GotoIfNot(is_same_native_context, &mark_megamorphic);
CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id), function);
// Call using call function builtin.
Goto(&call_function);
BIND(&mark_megamorphic);
{
// Mark it as a megamorphic.
// MegamorphicSentinel is created as a part of Heap::InitialObjects
// and will not move during a GC. So it is safe to skip write barrier.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
Goto(&call);
}
}
BIND(&call);
{
Comment("invoke using Call builtin");
// Call using call builtin.
Callable callable_call = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target_call = HeapConstant(callable_call.code());
Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
context, arg_count, first_arg, function);
return_value.Bind(ret_value);
Goto(&end);
}
BIND(&end);
return return_value.value();
}
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
ConvertReceiverMode receiver_mode) {

View File

@ -125,21 +125,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* slot_id,
compiler::Node* feedback_vector);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|. Type
// feedback is collected in the slot at index |slot_id|.
//
// If the |receiver_mode| is kNullOrUndefined, then the receiver is implicitly
// undefined and |first_arg| is the first parameter. Otherwise, |first_arg| is
// the receiver and it is converted according to |receiver_mode|.
compiler::Node* CallJSWithFeedback(compiler::Node* function,
compiler::Node* context,
compiler::Node* first_arg,
compiler::Node* arg_count,
compiler::Node* slot_id,
compiler::Node* feedback_vector,
ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|, possibly
// including the receiver depending on |receiver_mode|.

View File

@ -1718,8 +1718,12 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
Node* result = CallJSWithFeedback(function, context, first_arg, args_count,
slot_id, feedback_vector, receiver_mode);
// Collect the {function} feedback.
CollectCallFeedback(function, context, feedback_vector, slot_id);
Node* result =
CallJS(function, context, first_arg, args_count, receiver_mode);
SetAccumulator(result);
Dispatch();
}
@ -1734,22 +1738,27 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
const int kSlotOperandIndex =
kFirstArgumentOperandIndex + kReceiverOperandCount + arg_count;
// Indices and counts of parameters to the call stub.
const int kBoilerplateParameterCount = 7;
const int kReceiverParameterIndex = 5;
const int kBoilerplateParameterCount = 5;
const int kReceiverParameterIndex = 3;
const int kReceiverParameterCount = 1;
// Only used in a DCHECK.
USE(kReceiverParameterCount);
Node* function_reg = BytecodeOperandReg(0);
Node* function = LoadRegister(function_reg);
Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
// Collect the {function} feedback.
CollectCallFeedback(function, context, feedback_vector, slot_id);
std::array<Node*, Bytecodes::kMaxOperands + kBoilerplateParameterCount>
temp;
Callable call_ic = CodeFactory::CallIC(isolate());
temp[0] = HeapConstant(call_ic.code());
Callable callable = CodeFactory::Call(isolate());
temp[0] = HeapConstant(callable.code());
temp[1] = function;
temp[2] = Int32Constant(arg_count);
temp[3] = BytecodeOperandIdxInt32(kSlotOperandIndex);
temp[4] = LoadFeedbackVector();
int parameter_index = kReceiverParameterIndex;
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@ -1767,9 +1776,9 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
DCHECK_EQ(parameter_index,
kReceiverParameterIndex + kReceiverParameterCount + arg_count);
temp[parameter_index] = GetContext();
temp[parameter_index] = context;
Node* result = CallStubN(call_ic.descriptor(), 1,
Node* result = CallStubN(callable.descriptor(), 1,
arg_count + kBoilerplateParameterCount, &temp[0]);
SetAccumulator(result);
Dispatch();

View File

@ -853,8 +853,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1239,165 +1237,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs = 1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7 | // a3
1 << cp.code();
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
__ CallStub(stub);
__ MultiPop(kSavedRegs);
__ SmiUntag(a0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t2.
__ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t2, FieldMemOperand(t2, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = t1;
Register weak_value = t4;
__ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, t2, Operand(at));
__ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&megamorphic, ne, a1, Operand(t2));
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
__ Branch(&initialize, eq, t2, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t2, FeedbackVector::kFeedbackSlotsOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&not_array_function, ne, a1, Operand(t2));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t0, FieldMemOperand(
at, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(
at, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, t1, t1);
__ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
GenerateRecordCallTarget(masm);
__ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
__ lw(a2, FieldMemOperand(t1, FeedbackVector::kFeedbackSlotsOffset));
__ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, t1, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, t1);
// Pass function as new target.
__ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
__ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
__ bind(&non_function);
__ mov(a3, a1);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
DCHECK(!t0.is(index_));

View File

@ -97,54 +97,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3, a1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// t0 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, t0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@ -399,17 +357,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
a1, // the target to call verified to be Array function
a2, // allocation site feedback
a3, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -850,8 +850,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1236,172 +1234,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs = 1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7 | // a3
1 << cp.code();
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
__ CallStub(stub);
__ MultiPop(kSavedRegs);
__ SmiUntag(a0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into a5.
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ Ld(a5, FieldMemOperand(a5, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = a6;
Register weak_value = t0;
__ Ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
__ Ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&megamorphic, ne, a1, Operand(a5));
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
__ Branch(&initialize, eq, a5, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Sd(at, FieldMemOperand(a5, FeedbackVector::kFeedbackSlotsOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&not_array_function, ne, a1, Operand(a5));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiScale(a4, a3, kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a4));
__ Ld(a4, FieldMemOperand(
a5, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ Sd(a4, FieldMemOperand(
a5, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, a5, a5);
__ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
GenerateRecordCallTarget(masm);
__ dsrl(at, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, at);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
__ Ld(a2, FieldMemOperand(a5, FeedbackVector::kFeedbackSlotsOffset));
__ Ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, a5, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, a5);
// Pass function as new target.
__ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ bind(&non_function);
__ mov(a3, a1);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
DCHECK(!a4.is(index_));

View File

@ -97,54 +97,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3, a1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// a4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, a4, a2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@ -398,17 +356,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
a1, // the target to call verified to be Array function
a2, // allocation site feedback
a3, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -818,8 +818,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1187,179 +1185,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
__ Push(r6, r5, r4, r3);
__ Push(cp);
__ CallStub(stub);
__ Pop(cp);
__ Pop(r6, r5, r4, r3);
__ SmiUntag(r3);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r8.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadP(r8, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r9;
Register weak_value = r10;
__ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
__ cmp(r4, weak_value);
__ beq(&done);
__ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done);
__ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ bne(&check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ b(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ bne(&miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
__ beq(&initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadP(r7, FieldMemOperand(r8, count_offset));
__ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
__ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r3 : number of arguments
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(r4, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
__ bne(&non_function);
GenerateRecordCallTarget(masm);
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
// Put the AllocationSite from the feedback vector into r5, or undefined.
__ LoadP(r5, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ isel(eq, r5, r5, r8);
} else {
Label feedback_register_initialized;
__ beq(&feedback_register_initialized);
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
}
__ AssertUndefinedOrAllocationSite(r5, r8);
// Pass function as new target.
__ mr(r6, r4);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
__ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
__ bind(&non_function);
__ mr(r6, r4);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.

View File

@ -97,40 +97,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r6};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r6, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r6};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r6, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@ -400,17 +372,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
r4, // target to call checked to be Array function
r5, // allocation site feedback if available, undefined otherwise
r6 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -782,8 +782,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1175,171 +1173,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(r14);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r2 : number of arguments to the construct function
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi)
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r2);
__ Push(r5, r4, r3, r2);
__ Push(cp);
__ CallStub(stub);
__ Pop(cp);
__ Pop(r5, r4, r3, r2);
__ SmiUntag(r2);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r2 : number of arguments to the construct function
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r7.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadP(r7, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r8;
Register weak_value = r9;
__ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
__ CmpP(r3, weak_value);
__ beq(&done, Label::kNear);
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done, Label::kNear);
__ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ bne(&check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ b(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ bne(&miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ CmpP(r3, r7);
__ bne(&megamorphic);
__ b(&done, Label::kNear);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
__ beq(&initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ CmpP(r3, r7);
__ bne(&not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done, Label::kNear);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadP(r6, FieldMemOperand(r7, count_offset));
__ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
__ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r2 : number of arguments
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(r3, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
__ bne(&non_function);
GenerateRecordCallTarget(masm);
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
// Put the AllocationSite from the feedback vector into r4, or undefined.
__ LoadP(r4, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
__ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
Label feedback_register_initialized;
__ beq(&feedback_register_initialized);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(r4, r7);
// Pass function as new target.
__ LoadRR(r5, r3);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
__ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
__ bind(&non_function);
__ LoadRR(r5, r3);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.

View File

@ -92,36 +92,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r5, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@ -383,17 +359,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
r3, // target to call checked to be Array function
r4, // allocation site feedback if available, undefined otherwise
r5 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -647,164 +647,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
GenerateMiss(masm);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// rax : number of arguments to the construct function
// rbx : feedback vector
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ Integer32ToSmi(rax, rax);
__ Push(rax);
__ Push(rdi);
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
__ Push(rbx);
__ Push(rsi);
__ CallStub(stub);
__ Pop(rsi);
__ Pop(rbx);
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
__ SmiToInteger32(rdx, rdx);
__ SmiToInteger32(rax, rax);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
// rbx : feedback vector
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into r11.
__ SmiToInteger32(rdx, rdx);
__ movp(r11, FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r11 is a WeakCell or a Symbol, but it's harmless to read
// at this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
__ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &done, Label::kFar);
__ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
__ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ CheckSmi(FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex);
__ j(not_equal, &miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r11, Heap::kuninitialized_symbolRootIndex);
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ Move(FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset),
FeedbackVector::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &not_array_function);
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiAddConstant(
FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset + kPointerSize),
Smi::FromInt(1));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
// rbx : feedback vector
// rdx : slot in feedback vector (Smi)
// rdi : constructor function
Label non_function;
// Check that the constructor is not a smi.
__ JumpIfSmi(rdi, &non_function);
// Check that constructor is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
__ j(not_equal, &non_function);
GenerateRecordCallTarget(masm);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into rbx, or undefined.
__ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
__ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
__ j(equal, &feedback_register_initialized, Label::kNear);
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(rbx);
// Pass new target to construct stub.
__ movp(rdx, rdi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
__ jmp(rcx);
__ bind(&non_function);
__ movp(rdx, rdi);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@ -815,8 +657,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}

View File

@ -99,52 +99,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx, rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx, rdx, rdi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi, rax, rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi, rax, rdx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
// rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {rax, rdi, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@ -401,17 +361,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
rax, // argument count (not including receiver)
rdx, // target to the call. It is checked to be Array function.
rbx, // allocation site feedback
rcx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {