Revert "[ic] Properly integrate the CallIC into Ignition."

This reverts commit 6c541561ef.

Reason for revert:
https://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20nosnap/builds/17240

Original change's description:
> [ic] Properly integrate the CallIC into Ignition.
> 
> Drop the deprecated CallConstructStub and remove the use of CallICStub
> from fullcodegen, since that feedback is unused completely every since
> Crankshaft got removed, thus we can safely unlink all the CallIC stuff
> from fullcodegen nowadays, and completely nuke the CallICStub and the
> CallICTrampolineStub now (we can also transitively nuke the unused
> CreateAllocationSiteStub and CreateWeakCellStub).
> 
> Instead the CallIC logic is integrated into Ignition now, and part of
> the bytecode handlers for [[Call]] and [[Construct]]. There's still some
> follow-up cleanup with the way the Array constructor feedback is
> integrated, but that's way easier now.
> 
> Bug: v8:5517, v8:6399, v8:6409, v8:6679
> Change-Id: Ia0efc6145ee64633757a6c3fd1879d4906ea2835
> Reviewed-on: https://chromium-review.googlesource.com/602134
> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
> Reviewed-by: Yang Guo <yangguo@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#47192}

TBR=rmcilroy@chromium.org,yangguo@chromium.org,bmeurer@chromium.org

Change-Id: I416ce6646f62ceb4127b3acee43912ee0d701c23
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:5517, v8:6399, v8:6409, v8:6679
Reviewed-on: https://chromium-review.googlesource.com/603647
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47193}
This commit is contained in:
Michael Achenbach 2017-08-07 12:01:37 +00:00 committed by Commit Bot
parent 6c541561ef
commit 018128a439
44 changed files with 2615 additions and 49 deletions

View File

@ -761,6 +761,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -1120,6 +1122,163 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
__ Push(cp);
__ CallStub(stub);
__ Pop(cp);
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into r5.
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r5, FieldMemOperand(r5, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r6;
Register weak_value = r9;
__ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
__ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ b(ne, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ str(r4, FieldMemOperand(r5, FeedbackVector::kFeedbackSlotsOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r5, r5, Operand(FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ ldr(r4, FieldMemOperand(r5, 0));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r5, 0));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
GenerateRecordCallTarget(masm);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into r2, or undefined.
__ ldr(r2, FieldMemOperand(r5, FeedbackVector::kFeedbackSlotsOffset));
__ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(eq, &feedback_register_initialized);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(r2, r5);
// Pass function as new target.
__ mov(r3, r1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
__ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&non_function);
__ mov(r3, r1);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.

View File

@ -99,12 +99,54 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@ -367,6 +409,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
r1, // target to call checked to be Array function
r2, // allocation site feedback if available, undefined otherwise
r3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -752,6 +752,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -1193,6 +1195,183 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector, Register index,
Register new_target) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
__ Push(argc, function, feedback_vector, index);
__ Push(cp);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
__ Pop(cp);
__ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
Register new_target, Register scratch1,
Register scratch2, Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
feedback_vector, index, new_target));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
Register feedback = scratch1;
Register feedback_map = scratch2;
Register feedback_value = scratch3;
__ Add(feedback, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if feedback value is a WeakCell or a Symbol, but it's
// harmless to read at this position in a symbol (see static asserts in
// feedback-vector.h).
Label check_allocation_site;
__ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(function, feedback_value);
__ B(eq, &done);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ B(eq, &done);
__ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ B(ne, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(feedback_value, &initialize);
__ B(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
__ B(&done);
__ Bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ Bind(&megamorphic);
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
__ Str(scratch2,
FieldMemOperand(scratch1, FeedbackVector::kFeedbackSlotsOffset));
__ B(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ Bind(&initialize);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index, new_target);
__ B(&done);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
feedback_vector, index, new_target);
__ Bind(&done);
// Increment the call count for all function calls.
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(scratch1, scratch1,
Operand(FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ Ldr(scratch2, FieldMemOperand(scratch1, 0));
__ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
__ Str(scratch2, FieldMemOperand(scratch1, 0));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
Register function = x1;
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(function, &non_function);
// Check that the function is a JSFunction.
Register object_type = x10;
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
&non_function);
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into x2, or undefined.
__ Ldr(x2, FieldMemOperand(x5, FeedbackVector::kFeedbackSlotsOffset));
__ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
__ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
&feedback_register_initialized);
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(x2, x5);
__ Mov(x3, function);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
__ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
__ Br(x4);
__ Bind(&non_function);
__ Mov(x3, function);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
if (check_mode_ == RECEIVER_IS_UNKNOWN) {

View File

@ -112,6 +112,26 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
// x1: tagged value to put in the weak cell
Register registers[] = {x2, x3, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
@ -119,6 +139,33 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
@ -391,6 +438,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x1, // target to call checked to be Array function
x2, // allocation site feedback if available, undefined otherwise
x3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -1391,6 +1391,40 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (not including receiver)
// -- r1 : target to call verified to be Array function
// -- r2 : allocation site feedback if available, undefined otherwise.
// -- r3 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ mov(r5, Operand::Zero());
__ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
// Push the arguments. r3, r5, r6 will be modified.
Generate_InterpreterPushArgs(masm, r0, r3, r5, r6);
// Array constructor expects constructor in r3. It is same as r1 here.
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1414,6 +1414,39 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (not including receiver)
// -- x1 : target to call verified to be Array function
// -- x2 : allocation site feedback if available, undefined otherwise.
// -- x3 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver.
__ Push(xzr);
// Add a stack check before pushing arguments.
Generate_StackOverflowCheck(masm, x0, x7, &stack_overflow);
// Push the arguments. x3, x5, x6, x7 will be modified.
Generate_InterpreterPushArgs(masm, x0, x3, x5, x6, x7);
// Array constructor expects constructor in x3. It is same as call target.
__ mov(x3, x1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Unreachable();
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -136,6 +136,7 @@ namespace internal {
ASM(InterpreterPushArgsThenCallWithFinalSpread) \
ASM(InterpreterPushArgsThenConstruct) \
ASM(InterpreterPushArgsThenConstructFunction) \
ASM(InterpreterPushArgsThenConstructArray) \
ASM(InterpreterPushArgsThenConstructWithFinalSpread) \
ASM(InterpreterEnterBytecodeAdvance) \
ASM(InterpreterEnterBytecodeDispatch) \

View File

@ -1179,6 +1179,49 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the target to call checked to be Array function.
// -- ebx : the allocation site feedback
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// We need two scratch registers. Register edi is available, push edx onto
// stack.
__ Push(edx);
// Push arguments and move return address to the top of stack.
// The eax register is readonly. The ecx register will be modified. The edx
// and edi registers will be modified but restored to their original values.
Generate_InterpreterPushZeroAndArgsAndReturnAddress(masm, eax, ecx, edx, edi,
1, &stack_overflow);
// Restore edx.
__ Pop(edx);
// Array constructor expects constructor in edi. It is same as edx here.
__ Move(edi, edx);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
// Pop the temporary registers, so that return address is on top of stack.
__ Pop(edx);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// This should be unreachable.
__ int3();
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1372,6 +1372,41 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call checked to be Array function.
// -- a2 : allocation site feedback.
// -- a3 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver.
__ push(zero_reg);
Generate_StackOverflowCheck(masm, a0, t1, t4, &stack_overflow);
// This function modifies a3, t1, and t4.
Generate_InterpreterPushArgs(masm, a0, a3, t1, t4);
// ArrayConstructor stub expects constructor in a3. Set it here.
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ break_(0xCC);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1378,6 +1378,41 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call checked to be Array function.
// -- a2 : allocation site feedback.
// -- a3 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver.
__ push(zero_reg);
Generate_StackOverflowCheck(masm, a0, a5, a6, &stack_overflow);
// This function modifies a3, a5 and a6.
Generate_InterpreterPushArgs(masm, a0, a3, a5, a6);
// ArrayConstructor stub expects constructor in a3. Set it here.
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ break_(0xCC);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1419,6 +1419,40 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (not including receiver)
// -- r4 : target to call verified to be Array function
// -- r5 : allocation site feedback if available, undefined otherwise.
// -- r6 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ li(r0, Operand::Zero());
__ push(r0);
Generate_StackOverflowCheck(masm, r3, ip, &stack_overflow);
// Push the arguments. r6, r8, r3 will be modified.
Generate_InterpreterPushArgs(masm, r3, r6, r3, r8);
// Array constructor expects constructor in r6. It is same as r4 here.
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1413,6 +1413,40 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (not including receiver)
// -- r3 : target to call verified to be Array function
// -- r4 : allocation site feedback if available, undefined otherwise.
// -- r5 : address of the first argument
// -----------------------------------
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ LoadImmP(r0, Operand::Zero());
__ push(r0);
Generate_StackOverflowCheck(masm, r2, ip, &stack_overflow);
// Push the arguments. r6, r8, r3 will be modified.
Generate_InterpreterPushArgs(masm, r6, r5, r2, r7);
// Array constructor expects constructor in r5. It is same as r3 here.
__ LoadRR(r5, r3);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// Unreachable Code.
__ bkpt(0);
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -1150,6 +1150,52 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
}
// static
void Builtins::Generate_InterpreterPushArgsThenConstructArray(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the target to call checked to be Array function.
// -- rbx : the allocation site feedback
// -- rcx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
Label stack_overflow;
// Number of values to be pushed.
__ Move(r8, rax);
// Add a stack check before pushing arguments.
Generate_StackOverflowCheck(masm, r8, rdi, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
// rcx and rdi will be modified.
Generate_InterpreterPushArgs(masm, r8, rcx, rdi);
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
// Array constructor expects constructor in rdi. It is same as rdx here.
__ Move(rdi, rdx);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
// Throw stack overflow exception.
__ bind(&stack_overflow);
{
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// This should be unreachable.
__ int3();
}
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.

View File

@ -63,6 +63,19 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
LoadGlobalWithVectorDescriptor(isolate));
}
// static
Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode) {
CallICStub stub(isolate, mode);
return make_callable(stub);
}
// static
Callable CodeFactory::CallICTrampoline(Isolate* isolate,
ConvertReceiverMode mode) {
CallICTrampolineStub stub(isolate, mode);
return make_callable(stub);
}
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
return Callable(language_mode == STRICT
@ -383,6 +396,12 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
InterpreterPushArgsThenConstructDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterPushArgsThenConstructArray(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, InterpreterPushArgsThenConstructArray),
InterpreterPushArgsThenConstructArrayDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to

View File

@ -28,6 +28,10 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
static Callable CallIC(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable CallICTrampoline(
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable StoreGlobalIC(Isolate* isolate, LanguageMode mode);
static Callable StoreGlobalICInOptimizedCode(Isolate* isolate,
LanguageMode mode);
@ -99,6 +103,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
InterpreterPushArgsMode mode);
static Callable InterpreterPushArgsThenConstruct(
Isolate* isolate, InterpreterPushArgsMode mode);
static Callable InterpreterPushArgsThenConstructArray(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);

View File

@ -571,6 +571,144 @@ TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
vector);
}
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
os << convert_mode();
}
// TODO(ishell): Move to CallICAssembler.
TF_STUB(CallICStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* target = Parameter(Descriptor::kTarget);
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
// TODO(bmeurer): The slot should actually be an IntPtr, but TurboFan's
// SimplifiedLowering cannot deal with IntPtr machine type properly yet.
slot = ChangeInt32ToIntPtr(slot);
// Static checks to assert it is safe to examine the type feedback element.
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
// AllocationSite::kTransitionInfoOrBoilerplateOffset - contains a Smi or
// pointer to FixedArray. WeakCell::kValueOffset - contains a JSFunction or
// Smi(0) Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOrBoilerplateOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
// Increment the call count.
// TODO(bmeurer): Would it be beneficial to use Int32Add on 64-bit?
Comment("increment call count");
Node* call_count = LoadFeedbackVectorSlot(vector, slot, 1 * kPointerSize);
Node* new_count = SmiAdd(call_count, SmiConstant(1));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(vector, slot, new_count, SKIP_WRITE_BARRIER,
1 * kPointerSize);
Label call_function(this), extra_checks(this), call(this);
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFeedbackVectorSlot(vector, slot);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(target, feedback_value);
GotoIfNot(is_monomorphic, &extra_checks);
// The compare above could have been a SMI/SMI comparison. Guard against
// this convincing us that we have a monomorphic JSFunction.
Node* is_smi = TaggedIsSmi(target);
Branch(is_smi, &extra_checks, &call_function);
BIND(&call_function);
{
// Call using CallFunction builtin.
Callable callable =
CodeFactory::CallFunction(isolate(), stub->convert_mode());
TailCallStub(callable, context, target, argc);
}
BIND(&extra_checks);
{
Label mark_megamorphic(this), create_weak_cell(this, Label::kDeferred);
Comment("check if megamorphic");
// Check if it is a megamorphic target.
Node* is_megamorphic =
WordEqual(feedback_element,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
GotoIf(is_megamorphic, &call);
Comment("check if uninitialized");
// Check if it is uninitialized target first.
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
GotoIfNot(is_uninitialized, &mark_megamorphic);
Comment("handle unitinitialized");
// If it is not a JSFunction mark it as megamorphic.
Node* is_smi = TaggedIsSmi(target);
GotoIf(is_smi, &mark_megamorphic);
// Check if function is an object of JSFunction type.
Node* is_js_function = IsJSFunction(target);
GotoIfNot(is_js_function, &mark_megamorphic);
// Check if the function belongs to the same native context.
Node* native_context =
LoadNativeContext(LoadObjectField(target, JSFunction::kContextOffset));
Node* is_same_native_context =
WordEqual(native_context, LoadNativeContext(context));
Branch(is_same_native_context, &create_weak_cell, &mark_megamorphic);
BIND(&create_weak_cell);
{
// Wrap the {target} in a WeakCell and remember it.
Comment("create weak cell");
CreateWeakCellInFeedbackVector(vector, SmiTag(slot), target);
// Call using CallFunction builtin.
Goto(&call_function);
}
BIND(&mark_megamorphic);
{
// Mark it as a megamorphic.
// MegamorphicSentinel is created as a part of Heap::InitialObjects
// and will not move during a GC. So it is safe to skip write barrier.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
StoreFeedbackVectorSlot(
vector, slot,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
Goto(&call);
}
}
BIND(&call);
{
// Call using call builtin.
Comment("call using Call builtin");
Callable callable_call = CodeFactory::Call(isolate(), stub->convert_mode());
TailCallStub(callable_call, context, target, argc);
}
}
TF_STUB(CallICTrampolineStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* target = Parameter(Descriptor::kTarget);
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = LoadFeedbackVectorForStub();
Callable callable = CodeFactory::CallIC(isolate(), stub->convert_mode());
TailCallStub(callable, context, target, argc, slot, vector);
}
void JSEntryStub::FinishCode(Handle<Code> code) {
Handle<FixedArray> handler_table =
code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
@ -640,6 +778,17 @@ TF_STUB(GetPropertyStub, CodeStubAssembler) {
Return(var_result.value());
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub(isolate);
stub.GetCode();
}
void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
CreateWeakCellStub stub(isolate);
stub.GetCode();
}
// TODO(ishell): move to builtins-handler-gen.
TF_STUB(StoreSlowElementStub, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@ -704,6 +853,19 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
entry_hook(function, stack_pointer);
}
// TODO(ishell): move to builtins.
TF_STUB(CreateAllocationSiteStub, CodeStubAssembler) {
Return(CreateAllocationSiteInFeedbackVector(Parameter(Descriptor::kVector),
Parameter(Descriptor::kSlot)));
}
// TODO(ishell): move to builtins.
TF_STUB(CreateWeakCellStub, CodeStubAssembler) {
Return(CreateWeakCellInFeedbackVector(Parameter(Descriptor::kVector),
Parameter(Descriptor::kSlot),
Parameter(Descriptor::kValue)));
}
TF_STUB(ArrayNoArgumentConstructorStub, CodeStubAssembler) {
ElementsKind elements_kind = stub->elements_kind();
Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),

View File

@ -34,6 +34,8 @@ class Node;
V(ArrayConstructor) \
V(CallApiCallback) \
V(CallApiGetter) \
V(CallConstruct) \
V(CallIC) \
V(CEntry) \
V(CompareIC) \
V(DoubleToI) \
@ -46,11 +48,18 @@ class Node;
V(StoreSlowElement) \
V(SubString) \
V(NameDictionaryLookup) \
/* These are only called from FCG */ \
/* They can be removed when only the TF */ \
/* version of the corresponding stub is */ \
/* used universally */ \
V(CallICTrampoline) \
/* --- TurboFanCodeStubs --- */ \
V(AllocateHeapNumber) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(StringLength) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
@ -615,6 +624,26 @@ class NumberToStringStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
};
class CreateAllocationSiteStub : public TurboFanCodeStub {
public:
explicit CreateAllocationSiteStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateAllocationSite);
DEFINE_TURBOFAN_CODE_STUB(CreateAllocationSite, TurboFanCodeStub);
};
class CreateWeakCellStub : public TurboFanCodeStub {
public:
explicit CreateWeakCellStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateWeakCell);
DEFINE_TURBOFAN_CODE_STUB(CreateWeakCell, TurboFanCodeStub);
};
class GrowArrayElementsStub : public TurboFanCodeStub {
public:
GrowArrayElementsStub(Isolate* isolate, ElementsKind kind)
@ -696,6 +725,27 @@ class MathPowStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
};
class CallICStub : public TurboFanCodeStub {
public:
CallICStub(Isolate* isolate, ConvertReceiverMode convert_mode)
: TurboFanCodeStub(isolate) {
minor_key_ = ConvertModeBits::encode(convert_mode);
}
ConvertReceiverMode convert_mode() const {
return ConvertModeBits::decode(minor_key_);
}
protected:
typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
private:
void PrintState(std::ostream& os) const final; // NOLINT
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallIC);
DEFINE_TURBOFAN_CODE_STUB(CallIC, TurboFanCodeStub);
};
class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
public:
explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
@ -943,6 +993,15 @@ class JSEntryStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
};
// TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
class CallConstructStub final : public PlatformCodeStub {
public:
explicit CallConstructStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
};
enum ReceiverCheckMode {
// We don't know anything about the receiver.
@ -1018,6 +1077,15 @@ class StringCharCodeAtGenerator {
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
};
class CallICTrampolineStub : public CallICStub {
public:
CallICTrampolineStub(Isolate* isolate, ConvertReceiverMode convert_mode)
: CallICStub(isolate, convert_mode) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallICTrampoline);
DEFINE_TURBOFAN_CODE_STUB(CallICTrampoline, CallICStub);
};
class DoubleToIStub : public PlatformCodeStub {
public:
DoubleToIStub(Isolate* isolate, Register source, Register destination,

View File

@ -1463,7 +1463,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
// for the resulting arrays. This has to be kept in sync with the
// implementation in Ignition.
// implementation of the CallConstructStub.
Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
// Retrieve the Array function from the {node}.

View File

@ -235,11 +235,6 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
Object* const obj = Get(slot);
switch (kind) {
case FeedbackSlotKind::kCall:
// If we are not running interpreted code, we need to ignore the special
// IC slots for call/construct used by the interpreter.
// TODO(mvstanton): Remove code_is_interpreted when full code is retired
// from service.
if (!code_is_interpreted) break;
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:

View File

@ -1662,7 +1662,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ mov(r3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
CallIC(code);
@ -1694,10 +1695,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(r2);
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(r0);

View File

@ -1623,7 +1623,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Mov(x3, IntFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ Mov(x0, arg_count);
CallIC(code);
@ -1655,10 +1656,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
__ Peek(x1, arg_count * kXRegSize);
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(x2);
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(x0);

View File

@ -1556,7 +1556,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Move(edx, Immediate(IntFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Move(eax, Immediate(arg_count));
CallIC(code);
@ -1588,10 +1589,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(eax);

View File

@ -1664,7 +1664,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Record source position of the IC call.
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
@ -1696,10 +1697,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(v0);

View File

@ -1666,7 +1666,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Record source position of the IC call.
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ Ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
@ -1698,10 +1699,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
__ Ld(a1, MemOperand(sp, arg_count * kPointerSize));
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(v0);

View File

@ -1629,7 +1629,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ mov(r6, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
CallIC(code);
@ -1661,10 +1662,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into r4 and r3.
__ mov(r3, Operand(arg_count));
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(r5);
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(r3);

View File

@ -1587,7 +1587,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Load(r5, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r2, Operand(arg_count));
CallIC(code);
@ -1619,10 +1620,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into r3 and r2.
__ mov(r2, Operand(arg_count));
__ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
CallIC(code);
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(r4);
__ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(r2);

View File

@ -1577,7 +1577,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
SetCallPosition(expr);
Handle<Code> code = CodeFactory::Call(isolate(), mode).code();
Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Set(rdx, IntFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
CallIC(code);
@ -1610,10 +1611,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetConstructCallPosition(expr);
Handle<Code> code = CodeFactory::Construct(isolate()).code();
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
CallIC(code);
// Record call targets in unoptimized code, but not in the snapshot.
__ EmitLoadFeedbackVector(rbx);
__ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
RestoreContext();
context()->Plug(rax);

View File

@ -759,6 +759,165 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
GenerateMiss(masm);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
__ push(edx);
__ push(ebx);
__ push(esi);
__ CallStub(stub);
__ pop(esi);
__ pop(ebx);
__ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read
// at this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
__ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
__ j(equal, &done, Label::kFar);
__ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
__ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
__ j(not_equal, &miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &megamorphic);
__ jmp(&done, Label::kFar);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(ecx, Heap::kuninitialized_symbolRootIndex);
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset),
Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset + kPointerSize),
Immediate(Smi::FromInt(1)));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
Label non_function;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
GenerateRecordCallTarget(masm);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into ebx, or undefined.
__ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
__ j(equal, &feedback_register_initialized);
__ mov(ebx, isolate()->factory()->undefined_value());
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(ebx);
// Pass new target to construct stub.
__ mov(edx, edi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
__ bind(&non_function);
__ mov(edx, edi);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@ -769,6 +928,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}

View File

@ -98,12 +98,54 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx, edx, edi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
// ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {eax, edi, ecx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
@ -361,6 +403,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
edx, // target to the call. It is checked to be Array function.
ebx, // allocation site feedback
ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -441,6 +441,25 @@ void FastCloneShallowArrayDescriptor::InitializePlatformIndependent(
machine_types);
}
void CreateAllocationSiteDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kVector, kSlot
MachineType machine_types[] = {MachineType::AnyTagged(),
MachineType::TaggedSigned()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CreateWeakCellDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kVector, kSlot, kValue
MachineType machine_types[] = {MachineType::AnyTagged(),
MachineType::TaggedSigned(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CallTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kActualArgumentsCount
@ -547,6 +566,25 @@ void ConstructTrampolineDescriptor::InitializePlatformIndependent(
machine_types);
}
void CallICDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kActualArgumentsCount, kSlot, kVector
MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
MachineType::Int32(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CallICTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kActualArgumentsCount, kSlot
MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void BuiltinDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kArgumentsCount
@ -664,6 +702,16 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformIndependent(
machine_types);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformIndependent(CallInterfaceDescriptorData* data) {
// kNumberOfArguments, kFunction, kFeedbackElement, kFirstArgument
MachineType machine_types[] = {MachineType::Int32(), MachineType::AnyTagged(),
MachineType::AnyTagged(),
MachineType::Pointer()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void InterpreterCEntryDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kNumberOfArguments, kFirstArgument, kFunctionEntry

View File

@ -41,11 +41,16 @@ class PlatformInterfaceDescriptor;
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(CallFunction) \
V(CallIC) \
V(CallICTrampoline) \
V(CallVarargs) \
V(CallForwardVarargs) \
V(CallWithSpread) \
V(CallWithArrayLike) \
V(CallConstruct) \
V(CallTrampoline) \
V(ConstructStub) \
V(ConstructVarargs) \
@ -83,6 +88,7 @@ class PlatformInterfaceDescriptor;
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
V(InterpreterPushArgsThenConstructArray) \
V(InterpreterCEntry) \
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
@ -566,6 +572,23 @@ class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(FastCloneShallowObjectDescriptor, CallInterfaceDescriptor)
};
class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kVector, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateAllocationSiteDescriptor,
CallInterfaceDescriptor)
};
class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kVector, kSlot, kValue)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateWeakCellDescriptor,
CallInterfaceDescriptor)
};
class CallTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kActualArgumentsCount)
@ -653,6 +676,25 @@ class CallFunctionDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
};
class CallICDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICDescriptor,
CallInterfaceDescriptor)
};
class CallICTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICTrampolineDescriptor,
CallInterfaceDescriptor)
};
class CallConstructDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
};
class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kMap)
@ -873,6 +915,15 @@ class InterpreterPushArgsThenConstructDescriptor
InterpreterPushArgsThenConstructDescriptor, CallInterfaceDescriptor)
};
class InterpreterPushArgsThenConstructArrayDescriptor
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFunction, kFeedbackElement,
kFirstArgument)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
InterpreterPushArgsThenConstructArrayDescriptor, CallInterfaceDescriptor)
};
class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunctionEntry)

View File

@ -567,6 +567,8 @@ Node* InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
Node* feedback_vector,
Node* slot_id) {
// TODO(bmeurer): Add support for the Array constructor AllocationSite,
// and unify this with the general Call/Construct IC code below.
Label extra_checks(this, Label::kDeferred), done(this);
// Increment the call count.
@ -642,6 +644,132 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
BIND(&done);
}
Node* InterpreterAssembler::CallJSWithFeedback(
compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
compiler::Node* slot_id, compiler::Node* feedback_vector,
ConvertReceiverMode receiver_mode) {
// Static checks to assert it is safe to examine the type feedback element.
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
// AllocationSite::kTransitionInfoOrBoilerplateOffset - contains a Smi or
// pointer to FixedArray.
// WeakCell::kValueOffset - contains a JSFunction or Smi(0)
// Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOrBoilerplateOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
Variable return_value(this, MachineRepresentation::kTagged);
Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
end(this);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFeedbackVectorSlot(feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
GotoIfNot(is_monomorphic, &extra_checks);
// The compare above could have been a SMI/SMI comparison. Guard against
// this convincing us that we have a monomorphic JSFunction.
Node* is_smi = TaggedIsSmi(function);
Branch(is_smi, &extra_checks, &call_function);
BIND(&call_function);
{
// Call using call function builtin.
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kJSFunction);
Node* code_target = HeapConstant(callable.code());
Node* ret_value = CallStub(callable.descriptor(), code_target, context,
arg_count, first_arg, function);
return_value.Bind(ret_value);
Goto(&end);
}
BIND(&extra_checks);
{
Label mark_megamorphic(this);
Comment("check if megamorphic");
// Check if it is a megamorphic target.
Node* is_megamorphic =
WordEqual(feedback_element,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
GotoIf(is_megamorphic, &call);
Comment("check if uninitialized");
// Check if it is uninitialized target first.
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
GotoIfNot(is_uninitialized, &mark_megamorphic);
Comment("handle_uninitialized");
// If it is not a JSFunction mark it as megamorphic.
Node* is_smi = TaggedIsSmi(function);
GotoIf(is_smi, &mark_megamorphic);
// Check if function is an object of JSFunction type.
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoIfNot(is_js_function, &mark_megamorphic);
// Check if the function belongs to the same native context.
Node* native_context = LoadNativeContext(
LoadObjectField(function, JSFunction::kContextOffset));
Node* is_same_native_context =
WordEqual(native_context, LoadNativeContext(context));
GotoIfNot(is_same_native_context, &mark_megamorphic);
CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id), function);
// Call using call function builtin.
Goto(&call_function);
BIND(&mark_megamorphic);
{
// Mark it as a megamorphic.
// MegamorphicSentinel is created as a part of Heap::InitialObjects
// and will not move during a GC. So it is safe to skip write barrier.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
SKIP_WRITE_BARRIER);
Goto(&call);
}
}
BIND(&call);
{
Comment("invoke using Call builtin");
// Call using call builtin.
Callable callable_call = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target_call = HeapConstant(callable_call.code());
Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
context, arg_count, first_arg, function);
return_value.Bind(ret_value);
Goto(&end);
}
BIND(&end);
return return_value.value();
}
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
ConvertReceiverMode receiver_mode) {

View File

@ -125,6 +125,21 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* slot_id,
compiler::Node* feedback_vector);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|. Type
// feedback is collected in the slot at index |slot_id|.
//
// If the |receiver_mode| is kNullOrUndefined, then the receiver is implicitly
// undefined and |first_arg| is the first parameter. Otherwise, |first_arg| is
// the receiver and it is converted according to |receiver_mode|.
compiler::Node* CallJSWithFeedback(compiler::Node* function,
compiler::Node* context,
compiler::Node* first_arg,
compiler::Node* arg_count,
compiler::Node* slot_id,
compiler::Node* feedback_vector,
ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|, possibly
// including the receiver depending on |receiver_mode|.

View File

@ -1718,12 +1718,8 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
// Collect the {function} feedback.
CollectCallFeedback(function, context, feedback_vector, slot_id);
Node* result =
CallJS(function, context, first_arg, args_count, receiver_mode);
Node* result = CallJSWithFeedback(function, context, first_arg, args_count,
slot_id, feedback_vector, receiver_mode);
SetAccumulator(result);
Dispatch();
}
@ -1738,27 +1734,22 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
const int kSlotOperandIndex =
kFirstArgumentOperandIndex + kReceiverOperandCount + arg_count;
// Indices and counts of parameters to the call stub.
const int kBoilerplateParameterCount = 5;
const int kReceiverParameterIndex = 3;
const int kBoilerplateParameterCount = 7;
const int kReceiverParameterIndex = 5;
const int kReceiverParameterCount = 1;
// Only used in a DCHECK.
USE(kReceiverParameterCount);
Node* function_reg = BytecodeOperandReg(0);
Node* function = LoadRegister(function_reg);
Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
// Collect the {function} feedback.
CollectCallFeedback(function, context, feedback_vector, slot_id);
std::array<Node*, Bytecodes::kMaxOperands + kBoilerplateParameterCount>
temp;
Callable callable = CodeFactory::Call(isolate());
temp[0] = HeapConstant(callable.code());
Callable call_ic = CodeFactory::CallIC(isolate());
temp[0] = HeapConstant(call_ic.code());
temp[1] = function;
temp[2] = Int32Constant(arg_count);
temp[3] = BytecodeOperandIdxInt32(kSlotOperandIndex);
temp[4] = LoadFeedbackVector();
int parameter_index = kReceiverParameterIndex;
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@ -1776,9 +1767,9 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
DCHECK_EQ(parameter_index,
kReceiverParameterIndex + kReceiverParameterCount + arg_count);
temp[parameter_index] = context;
temp[parameter_index] = GetContext();
Node* result = CallStubN(callable.descriptor(), 1,
Node* result = CallStubN(call_ic.descriptor(), 1,
arg_count + kBoilerplateParameterCount, &temp[0]);
SetAccumulator(result);
Dispatch();

View File

@ -853,6 +853,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1237,6 +1239,165 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs = 1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7 | // a3
1 << cp.code();
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
__ CallStub(stub);
__ MultiPop(kSavedRegs);
__ SmiUntag(a0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t2.
__ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t2, FieldMemOperand(t2, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = t1;
Register weak_value = t4;
__ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, t2, Operand(at));
__ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&megamorphic, ne, a1, Operand(t2));
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
__ Branch(&initialize, eq, t2, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t2, FeedbackVector::kFeedbackSlotsOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&not_array_function, ne, a1, Operand(t2));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t0, FieldMemOperand(
at, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(
at, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, t1, t1);
__ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
GenerateRecordCallTarget(masm);
__ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
__ lw(a2, FieldMemOperand(t1, FeedbackVector::kFeedbackSlotsOffset));
__ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, t1, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, t1);
// Pass function as new target.
__ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
__ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
__ bind(&non_function);
__ mov(a3, a1);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
DCHECK(!t0.is(index_));

View File

@ -97,12 +97,54 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3, a1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// t0 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, t0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@ -357,6 +399,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
a1, // the target to call verified to be Array function
a2, // allocation site feedback
a3, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -850,6 +850,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1234,6 +1236,172 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs = 1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7 | // a3
1 << cp.code();
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
__ CallStub(stub);
__ MultiPop(kSavedRegs);
__ SmiUntag(a0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into a5.
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ Ld(a5, FieldMemOperand(a5, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = a6;
Register weak_value = t0;
__ Ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
__ Ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&megamorphic, ne, a1, Operand(a5));
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
__ Branch(&initialize, eq, a5, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Sd(at, FieldMemOperand(a5, FeedbackVector::kFeedbackSlotsOffset));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&not_array_function, ne, a1, Operand(a5));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiScale(a4, a3, kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a4));
__ Ld(a4, FieldMemOperand(
a5, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ Sd(a4, FieldMemOperand(
a5, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, a5, a5);
__ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
GenerateRecordCallTarget(masm);
__ dsrl(at, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, at);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
__ Ld(a2, FieldMemOperand(a5, FeedbackVector::kFeedbackSlotsOffset));
__ Ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, a5, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, a5);
// Pass function as new target.
__ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ bind(&non_function);
__ mov(a3, a1);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
DCHECK(!a4.is(index_));

View File

@ -97,12 +97,54 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a3, a1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// a4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, a4, a2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@ -356,6 +398,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
a1, // the target to call verified to be Array function
a2, // allocation site feedback
a3, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -818,6 +818,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1185,6 +1187,179 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
__ Push(r6, r5, r4, r3);
__ Push(cp);
__ CallStub(stub);
__ Pop(cp);
__ Pop(r6, r5, r4, r3);
__ SmiUntag(r3);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r8.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadP(r8, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r9;
Register weak_value = r10;
__ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
__ cmp(r4, weak_value);
__ beq(&done);
__ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done);
__ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ bne(&check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ b(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ bne(&miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
__ beq(&initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadP(r7, FieldMemOperand(r8, count_offset));
__ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
__ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r3 : number of arguments
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(r4, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
__ bne(&non_function);
GenerateRecordCallTarget(masm);
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
// Put the AllocationSite from the feedback vector into r5, or undefined.
__ LoadP(r5, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ isel(eq, r5, r5, r8);
} else {
Label feedback_register_initialized;
__ beq(&feedback_register_initialized);
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
}
__ AssertUndefinedOrAllocationSite(r5, r8);
// Pass function as new target.
__ mr(r6, r4);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
__ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
__ bind(&non_function);
__ mr(r6, r4);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.

View File

@ -97,12 +97,40 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r6};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r6, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r6};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r6, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@ -372,6 +400,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
r4, // target to call checked to be Array function
r5, // allocation site feedback if available, undefined otherwise
r6 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -782,6 +782,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@ -1173,6 +1175,171 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(r14);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r2 : number of arguments to the construct function
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi)
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r2);
__ Push(r5, r4, r3, r2);
__ Push(cp);
__ CallStub(stub);
__ Pop(cp);
__ Pop(r5, r4, r3, r2);
__ SmiUntag(r2);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r2 : number of arguments to the construct function
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r7.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadP(r7, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r8;
Register weak_value = r9;
__ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
__ CmpP(r3, weak_value);
__ beq(&done, Label::kNear);
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done, Label::kNear);
__ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ bne(&check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ b(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ bne(&miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ CmpP(r3, r7);
__ bne(&megamorphic);
__ b(&done, Label::kNear);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
__ beq(&initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ CmpP(r3, r7);
__ bne(&not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done, Label::kNear);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadP(r6, FieldMemOperand(r7, count_offset));
__ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
__ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r2 : number of arguments
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi, for RecordCallTarget)
Label non_function;
// Check that the function is not a smi.
__ JumpIfSmi(r3, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
__ bne(&non_function);
GenerateRecordCallTarget(masm);
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
// Put the AllocationSite from the feedback vector into r4, or undefined.
__ LoadP(r4, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
__ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
Label feedback_register_initialized;
__ beq(&feedback_register_initialized);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(r4, r7);
// Pass function as new target.
__ LoadRR(r5, r3);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
__ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
__ bind(&non_function);
__ LoadRR(r5, r3);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.

View File

@ -92,12 +92,36 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r5, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@ -359,6 +383,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
r3, // target to call checked to be Array function
r4, // allocation site feedback if available, undefined otherwise
r5 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -647,6 +647,164 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
GenerateMiss(masm);
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// rax : number of arguments to the construct function
// rbx : feedback vector
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ Integer32ToSmi(rax, rax);
__ Push(rax);
__ Push(rdi);
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
__ Push(rbx);
__ Push(rsi);
__ CallStub(stub);
__ Pop(rsi);
__ Pop(rbx);
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
__ SmiToInteger32(rdx, rdx);
__ SmiToInteger32(rax, rax);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
// rbx : feedback vector
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into r11.
__ SmiToInteger32(rdx, rdx);
__ movp(r11, FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r11 is a WeakCell or a Symbol, but it's harmless to read
// at this position in a symbol (see static asserts in feedback-vector.h).
Label check_allocation_site;
__ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &done, Label::kFar);
__ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
__ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ CheckSmi(FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &initialize);
__ jmp(&megamorphic);
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
__ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex);
__ j(not_equal, &miss);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
__ jmp(&done);
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r11, Heap::kuninitialized_symbolRootIndex);
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ Move(FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset),
FeedbackVector::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &not_array_function);
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
// Increment the call count for all function calls.
__ SmiAddConstant(
FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset + kPointerSize),
Smi::FromInt(1));
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
// rbx : feedback vector
// rdx : slot in feedback vector (Smi)
// rdi : constructor function
Label non_function;
// Check that the constructor is not a smi.
__ JumpIfSmi(rdi, &non_function);
// Check that constructor is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
__ j(not_equal, &non_function);
GenerateRecordCallTarget(masm);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into rbx, or undefined.
__ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
FeedbackVector::kFeedbackSlotsOffset));
__ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
__ j(equal, &feedback_register_initialized, Label::kNear);
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(rbx);
// Pass new target to construct stub.
__ movp(rdx, rdi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
__ jmp(rcx);
__ bind(&non_function);
__ movp(rdx, rdi);
__ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
}
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@ -657,6 +815,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}

View File

@ -99,12 +99,52 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx, rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx, rdx, rdi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi, rax, rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi, rax, rdx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
// rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {rax, rdi, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@ -361,6 +401,17 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
rax, // argument count (not including receiver)
rdx, // target to the call. It is checked to be Array function.
rbx, // allocation site feedback
rcx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {