Reland of Type Feedback Vector lives in the closure
(Fixed a bug found by nosnap builds.) We get less "pollution" of type feedback if we have one vector per native context, rather than one for the whole system. This CL moves the vector appropriately. We rely more heavily on the Optimized Code Map in the SharedFunctionInfo. The vector actually lives in the first slot of the literals array (indeed there is great commonality between those arrays, they can be thought of as the same thing). So we make greater effort to ensure there is a valid literals array after compilation. This meant, for performance reasons, that we needed to extend FastNewClosureStub to support creating closures with literals. And ultimately, it drove us to move the optimized code map lookup out of FastNewClosureStub and into the compile lazy builtin. The heap change is trivial so I TBR Hannes for it... TBR=hpayer@chromium.org BUG= Review URL: https://codereview.chromium.org/1642613002 Cr-Commit-Position: refs/heads/master@{#33548}
This commit is contained in:
parent
282648c2e8
commit
d984b3b0ce
@ -1157,6 +1157,151 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r3 : new target (preserved for callee)
|
||||
// -- r1 : target function (preserved for callee)
|
||||
// -----------------------------------
|
||||
// First lookup code, maybe we don't need to compile!
|
||||
Label gotta_call_runtime, gotta_call_runtime_no_stack;
|
||||
Label maybe_call_runtime;
|
||||
Label try_shared;
|
||||
Label loop_top, loop_bottom;
|
||||
|
||||
Register closure = r1;
|
||||
Register new_target = r3;
|
||||
__ push(new_target);
|
||||
__ push(closure);
|
||||
|
||||
Register map = r0;
|
||||
Register index = r2;
|
||||
__ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(map,
|
||||
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
||||
__ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
|
||||
__ cmp(index, Operand(Smi::FromInt(2)));
|
||||
__ b(lt, &gotta_call_runtime);
|
||||
|
||||
// Find literals.
|
||||
// r3 : native context
|
||||
// r2 : length / index
|
||||
// r0 : optimized code map
|
||||
// stack[0] : new target
|
||||
// stack[4] : closure
|
||||
Register native_context = r3;
|
||||
__ ldr(native_context, NativeContextMemOperand());
|
||||
|
||||
__ bind(&loop_top);
|
||||
Register temp = r1;
|
||||
Register array_pointer = r5;
|
||||
|
||||
// Does the native context match?
|
||||
__ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
|
||||
__ ldr(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousContext()));
|
||||
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ cmp(temp, native_context);
|
||||
__ b(ne, &loop_bottom);
|
||||
// OSR id set to none?
|
||||
__ ldr(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
|
||||
const int bailout_id = BailoutId::None().ToInt();
|
||||
__ cmp(temp, Operand(Smi::FromInt(bailout_id)));
|
||||
__ b(ne, &loop_bottom);
|
||||
// Literals available?
|
||||
__ ldr(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousLiterals()));
|
||||
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(temp, &gotta_call_runtime);
|
||||
|
||||
// Save the literals in the closure.
|
||||
__ ldr(r4, MemOperand(sp, 0));
|
||||
__ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
|
||||
__ push(index);
|
||||
__ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ pop(index);
|
||||
|
||||
// Code available?
|
||||
Register entry = r4;
|
||||
__ ldr(entry,
|
||||
FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousCachedCode()));
|
||||
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &maybe_call_runtime);
|
||||
|
||||
// Found literals and code. Get them into the closure and return.
|
||||
__ pop(closure);
|
||||
// Store code entry in the closure.
|
||||
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
Label install_optimized_code_and_tailcall;
|
||||
__ bind(&install_optimized_code_and_tailcall);
|
||||
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
|
||||
// Link the closure into the optimized function list.
|
||||
// r4 : code entry
|
||||
// r3 : native context
|
||||
// r1 : closure
|
||||
__ ldr(r5,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
|
||||
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
const int function_list_offset =
|
||||
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
||||
__ str(closure,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
// Save closure before the write barrier.
|
||||
__ mov(r5, closure);
|
||||
__ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs);
|
||||
__ mov(closure, r5);
|
||||
__ pop(new_target);
|
||||
__ Jump(entry);
|
||||
|
||||
__ bind(&loop_bottom);
|
||||
__ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
|
||||
__ cmp(index, Operand(Smi::FromInt(1)));
|
||||
__ b(gt, &loop_top);
|
||||
|
||||
// We found neither literals nor code.
|
||||
__ jmp(&gotta_call_runtime);
|
||||
|
||||
__ bind(&maybe_call_runtime);
|
||||
__ pop(closure);
|
||||
|
||||
// Last possibility. Check the context free optimized code map entry.
|
||||
__ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
|
||||
SharedFunctionInfo::kSharedCodeIndex));
|
||||
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &try_shared);
|
||||
|
||||
// Store code entry in the closure.
|
||||
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(&install_optimized_code_and_tailcall);
|
||||
|
||||
__ bind(&try_shared);
|
||||
__ pop(new_target);
|
||||
// Is the full code valid?
|
||||
__ ldr(entry,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
|
||||
__ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
|
||||
__ and_(r5, r5, Operand(Code::KindField::kMask));
|
||||
__ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
|
||||
__ cmp(r5, Operand(Code::BUILTIN));
|
||||
__ b(eq, &gotta_call_runtime_no_stack);
|
||||
// Yes, install the full code.
|
||||
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
__ Jump(entry);
|
||||
|
||||
__ bind(&gotta_call_runtime);
|
||||
__ pop(closure);
|
||||
__ pop(new_target);
|
||||
__ bind(&gotta_call_runtime_no_stack);
|
||||
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
|
||||
GenerateTailCallToReturnedCode(masm);
|
||||
}
|
||||
|
@ -1048,9 +1048,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
|
||||
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
|
||||
ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
|
||||
ldr(vector,
|
||||
FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
|
||||
ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
|
||||
ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1116,6 +1116,135 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- x3 : new target (preserved for callee)
|
||||
// -- x1 : target function (preserved for callee)
|
||||
// -----------------------------------
|
||||
// First lookup code, maybe we don't need to compile!
|
||||
Label gotta_call_runtime;
|
||||
Label maybe_call_runtime;
|
||||
Label try_shared;
|
||||
Label loop_top, loop_bottom;
|
||||
|
||||
Register closure = x1;
|
||||
Register new_target = x3;
|
||||
Register map = x0;
|
||||
Register index = x2;
|
||||
__ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Ldr(map,
|
||||
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
||||
__ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
|
||||
__ Cmp(index, Operand(2));
|
||||
__ B(lt, &gotta_call_runtime);
|
||||
|
||||
// Find literals.
|
||||
// x3 : native context
|
||||
// x2 : length / index
|
||||
// x0 : optimized code map
|
||||
// stack[0] : new target
|
||||
// stack[4] : closure
|
||||
Register native_context = x4;
|
||||
__ Ldr(native_context, NativeContextMemOperand());
|
||||
|
||||
__ Bind(&loop_top);
|
||||
Register temp = x5;
|
||||
Register array_pointer = x6;
|
||||
|
||||
// Does the native context match?
|
||||
__ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2));
|
||||
__ Ldr(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousContext()));
|
||||
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ Cmp(temp, native_context);
|
||||
__ B(ne, &loop_bottom);
|
||||
// OSR id set to none?
|
||||
__ Ldr(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
|
||||
const int bailout_id = BailoutId::None().ToInt();
|
||||
__ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
|
||||
__ B(ne, &loop_bottom);
|
||||
// Literals available?
|
||||
__ Ldr(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousLiterals()));
|
||||
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(temp, &gotta_call_runtime);
|
||||
|
||||
// Save the literals in the closure.
|
||||
__ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
|
||||
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
|
||||
// Code available?
|
||||
Register entry = x7;
|
||||
__ Ldr(entry,
|
||||
FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousCachedCode()));
|
||||
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &maybe_call_runtime);
|
||||
|
||||
// Found literals and code. Get them into the closure and return.
|
||||
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
Label install_optimized_code_and_tailcall;
|
||||
__ Bind(&install_optimized_code_and_tailcall);
|
||||
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
|
||||
// Link the closure into the optimized function list.
|
||||
// x7 : code entry
|
||||
// x4 : native context
|
||||
// x1 : closure
|
||||
__ Ldr(x8,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
|
||||
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x0,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
const int function_list_offset =
|
||||
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
||||
__ Str(closure,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ Mov(x5, closure);
|
||||
__ RecordWriteContextSlot(native_context, function_list_offset, x5, x0,
|
||||
kLRHasNotBeenSaved, kDontSaveFPRegs);
|
||||
__ Jump(entry);
|
||||
|
||||
__ Bind(&loop_bottom);
|
||||
__ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength));
|
||||
__ Cmp(index, Operand(1));
|
||||
__ B(gt, &loop_top);
|
||||
|
||||
// We found neither literals nor code.
|
||||
__ B(&gotta_call_runtime);
|
||||
|
||||
__ Bind(&maybe_call_runtime);
|
||||
|
||||
// Last possibility. Check the context free optimized code map entry.
|
||||
__ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
|
||||
SharedFunctionInfo::kSharedCodeIndex));
|
||||
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &try_shared);
|
||||
|
||||
// Store code entry in the closure.
|
||||
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ B(&install_optimized_code_and_tailcall);
|
||||
|
||||
__ Bind(&try_shared);
|
||||
// Is the full code valid?
|
||||
__ Ldr(entry,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
|
||||
__ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
|
||||
__ and_(x5, x5, Operand(Code::KindField::kMask));
|
||||
__ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
|
||||
__ Cmp(x5, Operand(Code::BUILTIN));
|
||||
__ B(eq, &gotta_call_runtime);
|
||||
// Yes, install the full code.
|
||||
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
__ Jump(entry);
|
||||
|
||||
__ Bind(&gotta_call_runtime);
|
||||
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
|
||||
GenerateTailCallToReturnedCode(masm);
|
||||
}
|
||||
|
@ -2653,9 +2653,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
|
||||
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
|
||||
Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
|
||||
Ldr(vector,
|
||||
FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
|
||||
Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
|
||||
Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -94,26 +94,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
|
||||
HValue* BuildInternalArrayConstructor(ElementsKind kind,
|
||||
ArgumentClass argument_class);
|
||||
|
||||
// BuildCheckAndInstallOptimizedCode emits code to install the optimized
|
||||
// function found in the optimized code map at map_index in js_function, if
|
||||
// the function at map_index matches the given native_context. Builder is
|
||||
// left in the "Then()" state after the install.
|
||||
void BuildCheckAndInstallOptimizedCode(HValue* js_function,
|
||||
HValue* native_context,
|
||||
IfBuilder* builder,
|
||||
HValue* optimized_map,
|
||||
HValue* map_index);
|
||||
void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
|
||||
HValue* code_object, HValue* literals);
|
||||
void BuildInstallCode(HValue* js_function, HValue* shared_info);
|
||||
|
||||
HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
|
||||
HValue* iterator,
|
||||
int field_offset);
|
||||
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
|
||||
HValue* shared_info,
|
||||
HValue* native_context);
|
||||
|
||||
HValue* BuildToString(HValue* input, bool convert);
|
||||
HValue* BuildToPrimitive(HValue* input, HValue* input_map);
|
||||
|
||||
@ -1864,188 +1844,14 @@ HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
|
||||
Handle<Code> ToObjectStub::GenerateCode() { return DoGenerateCode(this); }
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
|
||||
HValue* js_function,
|
||||
HValue* native_context,
|
||||
IfBuilder* builder,
|
||||
HValue* optimized_map,
|
||||
HValue* map_index) {
|
||||
HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
|
||||
HValue* context_slot = LoadFromOptimizedCodeMap(
|
||||
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
|
||||
context_slot = Add<HLoadNamedField>(context_slot, nullptr,
|
||||
HObjectAccess::ForWeakCellValue());
|
||||
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
|
||||
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
|
||||
HValue* code_object = LoadFromOptimizedCodeMap(
|
||||
optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
|
||||
code_object = Add<HLoadNamedField>(code_object, nullptr,
|
||||
HObjectAccess::ForWeakCellValue());
|
||||
builder->If<HCompareObjectEqAndBranch>(native_context,
|
||||
context_slot);
|
||||
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
|
||||
builder->And();
|
||||
builder->IfNot<HCompareObjectEqAndBranch>(code_object,
|
||||
graph()->GetConstant0());
|
||||
builder->Then();
|
||||
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
|
||||
map_index, SharedFunctionInfo::kLiteralsOffset);
|
||||
literals = Add<HLoadNamedField>(literals, nullptr,
|
||||
HObjectAccess::ForWeakCellValue());
|
||||
IfBuilder maybe_deopt(this);
|
||||
maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
|
||||
maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
|
||||
maybe_deopt.End();
|
||||
|
||||
BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
|
||||
|
||||
// The builder continues in the "then" after this function.
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(HValue* js_function,
|
||||
HValue* native_context,
|
||||
HValue* code_object,
|
||||
HValue* literals) {
|
||||
Counters* counters = isolate()->counters();
|
||||
AddIncrementCounter(counters->fast_new_closure_install_optimized());
|
||||
|
||||
// TODO(fschneider): Idea: store proper code pointers in the optimized code
|
||||
// map and either unmangle them on marking or do nothing as the whole map is
|
||||
// discarded on major GC anyway.
|
||||
Add<HStoreCodeEntry>(js_function, code_object);
|
||||
Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
|
||||
literals);
|
||||
|
||||
// Now link a function into a list of optimized functions.
|
||||
HValue* optimized_functions_list = Add<HLoadNamedField>(
|
||||
native_context, nullptr,
|
||||
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
Add<HStoreNamedField>(js_function,
|
||||
HObjectAccess::ForNextFunctionLinkPointer(),
|
||||
optimized_functions_list);
|
||||
|
||||
// This store is the only one that should have a write barrier.
|
||||
Add<HStoreNamedField>(native_context,
|
||||
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
|
||||
js_function);
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
|
||||
HValue* shared_info) {
|
||||
Add<HStoreNamedField>(js_function,
|
||||
HObjectAccess::ForNextFunctionLinkPointer(),
|
||||
graph()->GetConstantUndefined());
|
||||
HValue* code_object = Add<HLoadNamedField>(shared_info, nullptr,
|
||||
HObjectAccess::ForCodeOffset());
|
||||
Add<HStoreCodeEntry>(js_function, code_object);
|
||||
}
|
||||
|
||||
|
||||
HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
|
||||
HValue* optimized_map,
|
||||
HValue* iterator,
|
||||
int field_offset) {
|
||||
// By making sure to express these loads in the form [<hvalue> + constant]
|
||||
// the keyed load can be hoisted.
|
||||
DCHECK(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
|
||||
HValue* field_slot = iterator;
|
||||
if (field_offset > 0) {
|
||||
HValue* field_offset_value = Add<HConstant>(field_offset);
|
||||
field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
|
||||
}
|
||||
HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
|
||||
nullptr, nullptr, FAST_ELEMENTS);
|
||||
return field_entry;
|
||||
}
|
||||
|
||||
|
||||
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
|
||||
HValue* js_function,
|
||||
HValue* shared_info,
|
||||
HValue* native_context) {
|
||||
Counters* counters = isolate()->counters();
|
||||
Factory* factory = isolate()->factory();
|
||||
IfBuilder is_optimized(this);
|
||||
HInstruction* optimized_map = Add<HLoadNamedField>(
|
||||
shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
|
||||
HValue* null_constant = Add<HConstant>(0);
|
||||
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
|
||||
is_optimized.Then();
|
||||
{
|
||||
BuildInstallCode(js_function, shared_info);
|
||||
}
|
||||
is_optimized.Else();
|
||||
{
|
||||
AddIncrementCounter(counters->fast_new_closure_try_optimized());
|
||||
// The {optimized_map} points to fixed array of 4-element entries:
|
||||
// (native context, optimized code, literals, ast-id).
|
||||
// Iterate through the {optimized_map} backwards. After the loop, if no
|
||||
// matching optimized code was found, install unoptimized code.
|
||||
// for(i = map.length() - SharedFunctionInfo::kEntryLength;
|
||||
// i >= SharedFunctionInfo::kEntriesStart;
|
||||
// i -= SharedFunctionInfo::kEntryLength) { ... }
|
||||
HValue* first_entry_index =
|
||||
Add<HConstant>(SharedFunctionInfo::kEntriesStart);
|
||||
HValue* shared_function_entry_length =
|
||||
Add<HConstant>(SharedFunctionInfo::kEntryLength);
|
||||
LoopBuilder loop_builder(this, context(), LoopBuilder::kPostDecrement,
|
||||
shared_function_entry_length);
|
||||
HValue* array_length = Add<HLoadNamedField>(
|
||||
optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
|
||||
HValue* start_pos =
|
||||
AddUncasted<HSub>(array_length, shared_function_entry_length);
|
||||
HValue* slot_iterator =
|
||||
loop_builder.BeginBody(start_pos, first_entry_index, Token::GTE);
|
||||
{
|
||||
IfBuilder done_check(this);
|
||||
BuildCheckAndInstallOptimizedCode(js_function, native_context,
|
||||
&done_check, optimized_map,
|
||||
slot_iterator);
|
||||
// Fall out of the loop
|
||||
loop_builder.Break();
|
||||
}
|
||||
loop_builder.EndBody();
|
||||
|
||||
// If {slot_iterator} is less than the first entry index, then we failed to
|
||||
// find a context-dependent code and try context-independent code next.
|
||||
IfBuilder no_optimized_code_check(this);
|
||||
no_optimized_code_check.If<HCompareNumericAndBranch>(
|
||||
slot_iterator, first_entry_index, Token::LT);
|
||||
no_optimized_code_check.Then();
|
||||
{
|
||||
IfBuilder shared_code_check(this);
|
||||
HValue* shared_code =
|
||||
Add<HLoadNamedField>(optimized_map, nullptr,
|
||||
HObjectAccess::ForOptimizedCodeMapSharedCode());
|
||||
shared_code = Add<HLoadNamedField>(shared_code, nullptr,
|
||||
HObjectAccess::ForWeakCellValue());
|
||||
shared_code_check.IfNot<HCompareObjectEqAndBranch>(
|
||||
shared_code, graph()->GetConstant0());
|
||||
shared_code_check.Then();
|
||||
{
|
||||
// Store the context-independent optimized code.
|
||||
HValue* literals = Add<HConstant>(factory->empty_fixed_array());
|
||||
BuildInstallOptimizedCode(js_function, native_context, shared_code,
|
||||
literals);
|
||||
}
|
||||
shared_code_check.Else();
|
||||
{
|
||||
// Store the unoptimized code.
|
||||
BuildInstallCode(js_function, shared_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
|
||||
Counters* counters = isolate()->counters();
|
||||
Factory* factory = isolate()->factory();
|
||||
HInstruction* empty_fixed_array =
|
||||
Add<HConstant>(factory->empty_fixed_array());
|
||||
HInstruction* empty_literals_array =
|
||||
Add<HConstant>(factory->empty_literals_array());
|
||||
HValue* shared_info = GetParameter(0);
|
||||
|
||||
AddIncrementCounter(counters->fast_new_closure_total());
|
||||
@ -2071,19 +1877,20 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
|
||||
Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
|
||||
empty_fixed_array);
|
||||
Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
|
||||
empty_fixed_array);
|
||||
empty_literals_array);
|
||||
Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
|
||||
graph()->GetConstantHole());
|
||||
Add<HStoreNamedField>(
|
||||
js_function, HObjectAccess::ForSharedFunctionInfoPointer(), shared_info);
|
||||
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
|
||||
context());
|
||||
|
||||
// Initialize the code pointer in the function to be the one found in the
|
||||
// shared function info object. But first check if there is an optimized
|
||||
// version for our context.
|
||||
BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
|
||||
|
||||
Handle<Code> lazy_builtin(
|
||||
isolate()->builtins()->builtin(Builtins::kCompileLazy));
|
||||
HConstant* lazy = Add<HConstant>(lazy_builtin);
|
||||
Add<HStoreCodeEntry>(js_function, lazy);
|
||||
Add<HStoreNamedField>(js_function,
|
||||
HObjectAccess::ForNextFunctionLinkPointer(),
|
||||
graph()->GetConstantUndefined());
|
||||
return js_function;
|
||||
}
|
||||
|
||||
|
@ -664,11 +664,7 @@ ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
|
||||
return VectorStoreTransitionDescriptor(isolate());
|
||||
}
|
||||
|
||||
|
||||
void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
|
||||
descriptor->Initialize(Runtime::FunctionForId(Runtime::kNewClosure)->entry);
|
||||
}
|
||||
|
||||
void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
|
||||
|
||||
void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}
|
||||
|
||||
|
@ -130,8 +130,8 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
|
||||
if (shared_info()->is_compiled()) {
|
||||
// We should initialize the CompilationInfo feedback vector from the
|
||||
// passed in shared info, rather than creating a new one.
|
||||
feedback_vector_ = Handle<TypeFeedbackVector>(
|
||||
shared_info()->feedback_vector(), parse_info->isolate());
|
||||
feedback_metadata_ = Handle<TypeFeedbackMetadata>(
|
||||
shared_info()->feedback_metadata(), parse_info->isolate());
|
||||
}
|
||||
if (shared_info()->never_compiled()) MarkAsFirstCompile();
|
||||
}
|
||||
@ -205,18 +205,16 @@ bool CompilationInfo::ShouldSelfOptimize() {
|
||||
(!has_shared_info() || !shared_info()->optimization_disabled());
|
||||
}
|
||||
|
||||
|
||||
void CompilationInfo::EnsureFeedbackVector() {
|
||||
if (feedback_vector_.is_null()) {
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata =
|
||||
void CompilationInfo::EnsureFeedbackMetadata() {
|
||||
if (feedback_metadata_.is_null()) {
|
||||
feedback_metadata_ =
|
||||
TypeFeedbackMetadata::New(isolate(), literal()->feedback_vector_spec());
|
||||
feedback_vector_ = TypeFeedbackVector::New(isolate(), feedback_metadata);
|
||||
}
|
||||
|
||||
// It's very important that recompiles do not alter the structure of the
|
||||
// type feedback vector.
|
||||
CHECK(!feedback_vector_->metadata()->SpecDiffersFrom(
|
||||
literal()->feedback_vector_spec()));
|
||||
CHECK(
|
||||
!feedback_metadata_->SpecDiffersFrom(literal()->feedback_vector_spec()));
|
||||
}
|
||||
|
||||
|
||||
@ -383,6 +381,11 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
|
||||
DCHECK(info()->shared_info()->has_deoptimization_support());
|
||||
DCHECK(!info()->is_first_compile());
|
||||
|
||||
// If we have a closure make sure it has the literals array at this point.
|
||||
if (!info()->closure().is_null()) {
|
||||
JSFunction::EnsureLiterals(info()->closure());
|
||||
}
|
||||
|
||||
bool optimization_disabled = info()->shared_info()->optimization_disabled();
|
||||
bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
|
||||
|
||||
@ -787,6 +790,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
|
||||
Handle<SharedFunctionInfo> shared = info->shared_info();
|
||||
FunctionLiteral* lit = info->literal();
|
||||
DCHECK_EQ(shared->language_mode(), lit->language_mode());
|
||||
shared->set_num_literals(lit->materialized_literal_count());
|
||||
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
|
||||
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
|
||||
|
||||
@ -804,7 +808,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
|
||||
|
||||
// Update the code and feedback vector for the shared function info.
|
||||
shared->ReplaceCode(*info->code());
|
||||
shared->set_feedback_vector(*info->feedback_vector());
|
||||
shared->set_feedback_metadata(*info->feedback_metadata());
|
||||
if (info->has_bytecode_array()) {
|
||||
DCHECK(shared->function_data()->IsUndefined());
|
||||
shared->set_function_data(*info->bytecode_array());
|
||||
@ -1018,6 +1022,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
|
||||
|
||||
if (FLAG_always_opt) {
|
||||
Handle<Code> opt_code;
|
||||
JSFunction::EnsureLiterals(function);
|
||||
if (Compiler::GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
|
||||
.ToHandle(&opt_code)) {
|
||||
result = opt_code;
|
||||
@ -1032,14 +1037,16 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
|
||||
if (function->is_compiled()) return true;
|
||||
MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
|
||||
Handle<Code> code;
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
if (!maybe_code.ToHandle(&code)) {
|
||||
if (flag == CLEAR_EXCEPTION) {
|
||||
function->GetIsolate()->clear_pending_exception();
|
||||
isolate->clear_pending_exception();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function->ReplaceCode(*code);
|
||||
DCHECK(function->is_compiled());
|
||||
JSFunction::EnsureLiterals(function);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1070,7 +1077,7 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
|
||||
if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
|
||||
|
||||
shared->EnableDeoptimizationSupport(*unoptimized.code());
|
||||
shared->set_feedback_vector(*unoptimized.feedback_vector());
|
||||
shared->set_feedback_metadata(*unoptimized.feedback_metadata());
|
||||
|
||||
info->MarkAsCompiled();
|
||||
|
||||
@ -1146,12 +1153,15 @@ static inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
|
||||
|
||||
bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
|
||||
Handle<SharedFunctionInfo> shared(function->shared());
|
||||
bool result;
|
||||
if (IsEvalToplevel(shared)) {
|
||||
return CompileEvalForDebugging(function, shared);
|
||||
result = CompileEvalForDebugging(function, shared);
|
||||
} else {
|
||||
CompilationInfoWithZone info(function);
|
||||
return CompileForDebugging(&info);
|
||||
result = CompileForDebugging(&info);
|
||||
}
|
||||
JSFunction::EnsureLiterals(function);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -1263,7 +1273,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
|
||||
lit->name(), lit->materialized_literal_count(), lit->kind(),
|
||||
info->code(),
|
||||
ScopeInfo::Create(info->isolate(), info->zone(), info->scope()),
|
||||
info->feedback_vector());
|
||||
info->feedback_metadata());
|
||||
if (info->has_bytecode_array()) {
|
||||
DCHECK(result->function_data()->IsUndefined());
|
||||
result->set_function_data(*info->bytecode_array());
|
||||
@ -1590,14 +1600,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
|
||||
if (lazy) {
|
||||
Handle<Code> code = isolate->builtins()->CompileLazy();
|
||||
info.SetCode(code);
|
||||
// There's no need in theory for a lazy-compiled function to have a type
|
||||
// feedback vector, but some parts of the system expect all
|
||||
// SharedFunctionInfo instances to have one. The size of the vector depends
|
||||
// on how many feedback-needing nodes are in the tree, and when lazily
|
||||
// parsing we might not know that, if this function was never parsed before.
|
||||
// In that case the vector will be replaced the next time MakeCode is
|
||||
// called.
|
||||
info.EnsureFeedbackVector();
|
||||
// There's no need in theory for a lazy-compiled function to have type
|
||||
// feedback metadata, but some parts of the system expect all
|
||||
// SharedFunctionInfo instances to have one.
|
||||
info.EnsureFeedbackMetadata();
|
||||
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
|
||||
} else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
|
||||
// Code generation will ensure that the feedback vector is present and
|
||||
@ -1617,7 +1623,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
|
||||
Handle<SharedFunctionInfo> result =
|
||||
isolate->factory()->NewSharedFunctionInfo(
|
||||
literal->name(), literal->materialized_literal_count(),
|
||||
literal->kind(), info.code(), scope_info, info.feedback_vector());
|
||||
literal->kind(), info.code(), scope_info, info.feedback_metadata());
|
||||
if (info.has_bytecode_array()) {
|
||||
DCHECK(result->function_data()->IsUndefined());
|
||||
result->set_function_data(*info.bytecode_array());
|
||||
@ -1646,7 +1652,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
|
||||
DCHECK(!existing->HasDebugCode());
|
||||
existing->ReplaceCode(*info.code());
|
||||
existing->set_scope_info(*scope_info);
|
||||
existing->set_feedback_vector(*info.feedback_vector());
|
||||
existing->set_feedback_metadata(*info.feedback_metadata());
|
||||
}
|
||||
return existing;
|
||||
}
|
||||
@ -1672,7 +1678,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
|
||||
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
|
||||
name, literals, FunctionKind::kNormalFunction, code,
|
||||
Handle<ScopeInfo>(fun->shared()->scope_info()),
|
||||
Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
|
||||
Handle<TypeFeedbackMetadata>(fun->shared()->feedback_metadata()));
|
||||
shared->set_construct_stub(*construct_stub);
|
||||
|
||||
// Copy the function data to the shared function info.
|
||||
@ -1720,6 +1726,10 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
|
||||
shared->ReplaceCode(*current_code);
|
||||
}
|
||||
|
||||
// At this point we know we've compiled the function, so make sure the closure
|
||||
// points to valid literals and type-feedback-vector.
|
||||
JSFunction::EnsureLiterals(function);
|
||||
|
||||
current_code->set_profiler_ticks(0);
|
||||
|
||||
// TODO(mstarzinger): We cannot properly deserialize a scope chain containing
|
||||
|
@ -215,9 +215,9 @@ class CompilationInfo {
|
||||
!is_debug() && output_code_kind() == Code::FUNCTION;
|
||||
}
|
||||
|
||||
void EnsureFeedbackVector();
|
||||
Handle<TypeFeedbackVector> feedback_vector() const {
|
||||
return feedback_vector_;
|
||||
void EnsureFeedbackMetadata();
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata() const {
|
||||
return feedback_metadata_;
|
||||
}
|
||||
void SetCode(Handle<Code> code) { code_ = code; }
|
||||
|
||||
@ -412,7 +412,7 @@ class CompilationInfo {
|
||||
Handle<Code> code_;
|
||||
|
||||
// Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
|
||||
Handle<TypeFeedbackVector> feedback_vector_;
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata_;
|
||||
|
||||
// Compilation mode flag and whether deoptimization is allowed.
|
||||
Mode mode_;
|
||||
|
@ -321,8 +321,15 @@ FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
|
||||
|
||||
|
||||
// static
|
||||
FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
|
||||
FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
|
||||
FieldAccess AccessBuilder::ForJSFunctionLiterals() {
|
||||
FieldAccess access = {kTaggedBase, JSFunction::kLiteralsOffset,
|
||||
Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
|
||||
return access;
|
||||
}
|
||||
|
||||
// static
|
||||
FieldAccess AccessBuilder::ForLiteralsTypeFeedbackVector() {
|
||||
FieldAccess access = {kTaggedBase, LiteralsArray::kFeedbackVectorOffset,
|
||||
Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
|
||||
return access;
|
||||
}
|
||||
|
@ -43,6 +43,9 @@ class AccessBuilder final : public AllStatic {
|
||||
// Provides access to JSArray::length() field.
|
||||
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
|
||||
|
||||
// Provides access to JSFunction::literals() field.
|
||||
static FieldAccess ForJSFunctionLiterals();
|
||||
|
||||
// Provides access to JSArrayBuffer::backing_store() field.
|
||||
static FieldAccess ForJSArrayBufferBackingStore();
|
||||
|
||||
@ -117,8 +120,8 @@ class AccessBuilder final : public AllStatic {
|
||||
static FieldAccess ForPropertyCellValue();
|
||||
static FieldAccess ForPropertyCellValue(Type* type);
|
||||
|
||||
// Provides access to SharedFunctionInfo::feedback_vector() field.
|
||||
static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
|
||||
// Provides access to JSFunction::literals()[0] field.
|
||||
static FieldAccess ForLiteralsTypeFeedbackVector();
|
||||
|
||||
// Provides access to FixedArray elements.
|
||||
static ElementAccess ForFixedArrayElement();
|
||||
|
@ -3054,7 +3054,7 @@ LanguageMode AstGraphBuilder::language_mode() const {
|
||||
|
||||
VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
|
||||
FeedbackVectorSlot slot) const {
|
||||
return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
|
||||
return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
|
||||
}
|
||||
|
||||
|
||||
@ -3690,10 +3690,10 @@ Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
|
||||
Node* AstGraphBuilder::BuildLoadFeedbackVector() {
|
||||
if (!feedback_vector_.is_set()) {
|
||||
Node* closure = GetFunctionClosure();
|
||||
Node* shared = BuildLoadImmutableObjectField(
|
||||
closure, JSFunction::kSharedFunctionInfoOffset);
|
||||
Node* literals =
|
||||
BuildLoadImmutableObjectField(closure, JSFunction::kLiteralsOffset);
|
||||
Node* vector = BuildLoadImmutableObjectField(
|
||||
shared, SharedFunctionInfo::kFeedbackVectorOffset);
|
||||
literals, LiteralsArray::kFeedbackVectorOffset);
|
||||
feedback_vector_.set(vector);
|
||||
}
|
||||
return feedback_vector_.get();
|
||||
|
@ -457,10 +457,10 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
|
||||
Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
|
||||
if (!feedback_vector_.is_set()) {
|
||||
Node* closure = GetFunctionClosure();
|
||||
Node* shared = BuildLoadImmutableObjectField(
|
||||
closure, JSFunction::kSharedFunctionInfoOffset);
|
||||
Node* literals =
|
||||
BuildLoadImmutableObjectField(closure, JSFunction::kLiteralsOffset);
|
||||
Node* vector = BuildLoadImmutableObjectField(
|
||||
shared, SharedFunctionInfo::kFeedbackVectorOffset);
|
||||
literals, LiteralsArray::kFeedbackVectorOffset);
|
||||
feedback_vector_.set(vector);
|
||||
}
|
||||
return feedback_vector_.get();
|
||||
@ -468,7 +468,8 @@ Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
|
||||
|
||||
|
||||
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
|
||||
Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
handle(info()->closure()->feedback_vector());
|
||||
FeedbackVectorSlot slot;
|
||||
if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
|
||||
slot = feedback_vector->ToSlot(slot_id);
|
||||
|
@ -431,10 +431,9 @@ Node* InterpreterAssembler::LoadTypeFeedbackVector() {
|
||||
Node* function = raw_assembler_->Load(
|
||||
MachineType::AnyTagged(), RegisterFileRawPointer(),
|
||||
IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
|
||||
Node* shared_info =
|
||||
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
|
||||
Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
|
||||
Node* vector =
|
||||
LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
|
||||
LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
|
||||
return vector;
|
||||
}
|
||||
|
||||
|
@ -410,6 +410,9 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
|
||||
// does not remove the code with the deoptimization support.
|
||||
info_->AddInlinedFunction(info.shared_info());
|
||||
|
||||
// If function was lazily compiled, it's literals array may not yet be set up.
|
||||
JSFunction::EnsureLiterals(function);
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
// After this point, we've made a decision to inline this function.
|
||||
// We shall not bailout from inlining if we got here.
|
||||
|
@ -1819,8 +1819,8 @@ Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
|
||||
Handle<SharedFunctionInfo> shared = p.shared_info();
|
||||
|
||||
// Use the FastNewClosureStub that allocates in new space only for nested
|
||||
// functions that don't need literals cloning.
|
||||
if (p.pretenure() == NOT_TENURED && shared->num_literals() == 0) {
|
||||
// functions that don't need pretenuring.
|
||||
if (p.pretenure() == NOT_TENURED) {
|
||||
Isolate* isolate = jsgraph()->isolate();
|
||||
Callable callable = CodeFactory::FastNewClosure(
|
||||
isolate, shared->language_mode(), shared->kind());
|
||||
|
@ -5501,10 +5501,10 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
|
||||
// We also have a stack overflow if the recursive compilation did.
|
||||
if (HasStackOverflow()) return;
|
||||
// Use the fast case closure allocation code that allocates in new
|
||||
// space for nested functions that don't need literals cloning.
|
||||
// space for nested functions that don't need pretenuring.
|
||||
HConstant* shared_info_value = Add<HConstant>(shared_info);
|
||||
HInstruction* instr;
|
||||
if (!expr->pretenure() && shared_info->num_literals() == 0) {
|
||||
if (!expr->pretenure()) {
|
||||
FastNewClosureStub stub(isolate(), shared_info->language_mode(),
|
||||
shared_info->kind());
|
||||
FastNewClosureDescriptor descriptor(isolate());
|
||||
@ -8442,6 +8442,9 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
|
||||
// does not remove the code with the deoptimization support.
|
||||
top_info()->AddInlinedFunction(target_info.shared_info());
|
||||
|
||||
// If target was lazily compiled, it's literals array may not yet be set up.
|
||||
JSFunction::EnsureLiterals(target);
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
// After this point, we've made a decision to inline this function (so
|
||||
// TryInline should always return true).
|
||||
|
@ -2190,8 +2190,11 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
|
||||
Handle<SharedFunctionInfo> current_shared_info() const {
|
||||
return current_info()->shared_info();
|
||||
}
|
||||
Handle<JSFunction> current_closure() const {
|
||||
return current_info()->closure();
|
||||
}
|
||||
TypeFeedbackVector* current_feedback_vector() const {
|
||||
return current_shared_info()->feedback_vector();
|
||||
return current_closure()->feedback_vector();
|
||||
}
|
||||
void ClearInlinedTestContext() {
|
||||
function_state()->ClearInlinedTestContext();
|
||||
|
@ -14,7 +14,6 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
|
||||
Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
|
||||
: isolate_(isolate),
|
||||
@ -24,7 +23,7 @@ AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
|
||||
osr_ast_id_(osr_ast_id),
|
||||
root_(root),
|
||||
oracle_(isolate, zone, handle(closure->shared()->code()),
|
||||
handle(closure->shared()->feedback_vector()),
|
||||
handle(closure->feedback_vector()),
|
||||
handle(closure->context()->native_context())),
|
||||
store_(zone) {
|
||||
InitializeAstVisitor(isolate);
|
||||
|
@ -1148,15 +1148,13 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
|
||||
List<Handle<JSFunction> > functions;
|
||||
List<Handle<JSGeneratorObject> > suspended_generators;
|
||||
|
||||
// Flush all optimized code maps. Note that the below heap iteration does not
|
||||
// Flush all optimized code. Note that the below heap iteration does not
|
||||
// cover this, because the given function might have been inlined into code
|
||||
// for which no JSFunction exists.
|
||||
{
|
||||
SharedFunctionInfo::Iterator iterator(isolate_);
|
||||
while (SharedFunctionInfo* shared = iterator.Next()) {
|
||||
if (!shared->OptimizedCodeMapIsCleared()) {
|
||||
shared->ClearOptimizedCodeMap();
|
||||
}
|
||||
shared->ClearCodeFromOptimizedCodeMap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1198,6 +1196,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
|
||||
|
||||
for (Handle<JSFunction> const function : functions) {
|
||||
function->ReplaceCode(shared->code());
|
||||
JSFunction::EnsureLiterals(function);
|
||||
}
|
||||
|
||||
for (Handle<JSGeneratorObject> const generator_obj : suspended_generators) {
|
||||
|
@ -646,18 +646,17 @@ Handle<Code> FunctionInfoWrapper::GetFunctionCode() {
|
||||
return Handle<Code>::cast(raw_result);
|
||||
}
|
||||
|
||||
|
||||
MaybeHandle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
|
||||
MaybeHandle<TypeFeedbackMetadata> FunctionInfoWrapper::GetFeedbackMetadata() {
|
||||
Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
|
||||
if (element->IsJSValue()) {
|
||||
Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
|
||||
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
|
||||
Handle<SharedFunctionInfo> shared =
|
||||
Handle<SharedFunctionInfo>::cast(raw_result);
|
||||
return Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
|
||||
return Handle<TypeFeedbackMetadata>(shared->feedback_metadata(), isolate());
|
||||
} else {
|
||||
// Scripts may never have a SharedFunctionInfo created.
|
||||
return MaybeHandle<TypeFeedbackVector>();
|
||||
return MaybeHandle<TypeFeedbackMetadata>();
|
||||
}
|
||||
}
|
||||
|
||||
@ -974,11 +973,11 @@ class LiteralFixer {
|
||||
public:
|
||||
static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
|
||||
Handle<SharedFunctionInfo> shared_info,
|
||||
Isolate* isolate) {
|
||||
bool feedback_metadata_changed, Isolate* isolate) {
|
||||
int new_literal_count = compile_info_wrapper->GetLiteralCount();
|
||||
int old_literal_count = shared_info->num_literals();
|
||||
|
||||
if (old_literal_count == new_literal_count) {
|
||||
if (old_literal_count == new_literal_count && !feedback_metadata_changed) {
|
||||
// If literal count didn't change, simply go over all functions
|
||||
// and clear literal arrays.
|
||||
ClearValuesVisitor visitor;
|
||||
@ -989,10 +988,13 @@ class LiteralFixer {
|
||||
// collect all functions and fix their literal arrays.
|
||||
Handle<FixedArray> function_instances =
|
||||
CollectJSFunctions(shared_info, isolate);
|
||||
Handle<TypeFeedbackVector> vector(shared_info->feedback_vector());
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata(
|
||||
shared_info->feedback_metadata());
|
||||
|
||||
for (int i = 0; i < function_instances->length(); i++) {
|
||||
Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
TypeFeedbackVector::New(isolate, feedback_metadata);
|
||||
Handle<LiteralsArray> new_literals =
|
||||
LiteralsArray::New(isolate, vector, new_literal_count, TENURED);
|
||||
fun->set_literals(*new_literals);
|
||||
@ -1040,10 +1042,10 @@ class LiteralFixer {
|
||||
class ClearValuesVisitor {
|
||||
public:
|
||||
void visit(JSFunction* fun) {
|
||||
FixedArray* literals = fun->literals();
|
||||
int len = literals->length();
|
||||
LiteralsArray* literals = fun->literals();
|
||||
int len = literals->literals_count();
|
||||
for (int j = 0; j < len; j++) {
|
||||
literals->set_undefined(j);
|
||||
literals->set_literal_undefined(j);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1118,6 +1120,7 @@ void LiveEdit::ReplaceFunctionCode(
|
||||
SharedInfoWrapper shared_info_wrapper(shared_info_array);
|
||||
|
||||
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
|
||||
bool feedback_metadata_changed = false;
|
||||
|
||||
if (shared_info->code()->kind() == Code::FUNCTION) {
|
||||
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
|
||||
@ -1128,10 +1131,14 @@ void LiveEdit::ReplaceFunctionCode(
|
||||
}
|
||||
shared_info->DisableOptimization(kLiveEdit);
|
||||
// Update the type feedback vector, if needed.
|
||||
MaybeHandle<TypeFeedbackVector> feedback_vector =
|
||||
compile_info_wrapper.GetFeedbackVector();
|
||||
if (!feedback_vector.is_null()) {
|
||||
shared_info->set_feedback_vector(*feedback_vector.ToHandleChecked());
|
||||
MaybeHandle<TypeFeedbackMetadata> feedback_metadata =
|
||||
compile_info_wrapper.GetFeedbackMetadata();
|
||||
if (!feedback_metadata.is_null()) {
|
||||
Handle<TypeFeedbackMetadata> checked_feedback_metadata =
|
||||
feedback_metadata.ToHandleChecked();
|
||||
feedback_metadata_changed = checked_feedback_metadata->DiffersFrom(
|
||||
shared_info->feedback_metadata());
|
||||
shared_info->set_feedback_metadata(*checked_feedback_metadata);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1140,7 +1147,8 @@ void LiveEdit::ReplaceFunctionCode(
|
||||
shared_info->set_start_position(start_position);
|
||||
shared_info->set_end_position(end_position);
|
||||
|
||||
LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
|
||||
LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info,
|
||||
feedback_metadata_changed, isolate);
|
||||
|
||||
DeoptimizeDependentFunctions(*shared_info);
|
||||
isolate->compilation_cache()->Remove(shared_info);
|
||||
|
@ -302,7 +302,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
|
||||
|
||||
Handle<Code> GetFunctionCode();
|
||||
|
||||
MaybeHandle<TypeFeedbackVector> GetFeedbackVector();
|
||||
MaybeHandle<TypeFeedbackMetadata> GetFeedbackMetadata();
|
||||
|
||||
Handle<Object> GetCodeScopeInfo();
|
||||
|
||||
|
@ -340,7 +340,6 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
|
||||
V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
|
||||
"Insufficient type feedback for RHS of binary operation") \
|
||||
V(kKeyIsNegative, "key is negative") \
|
||||
V(kLiteralsWereDisposed, "literals have been disposed") \
|
||||
V(kLostPrecision, "lost precision") \
|
||||
V(kLostPrecisionOrNaN, "lost precision or NaN") \
|
||||
V(kMementoFound, "memento found") \
|
||||
|
@ -1205,7 +1205,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
|
||||
function->set_code(info->code());
|
||||
function->set_context(*context);
|
||||
function->set_prototype_or_initial_map(*the_hole_value());
|
||||
function->set_literals(LiteralsArray::cast(*empty_fixed_array()));
|
||||
function->set_literals(LiteralsArray::cast(*empty_literals_array()));
|
||||
function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
|
||||
isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
|
||||
return function;
|
||||
@ -1371,11 +1371,12 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
|
||||
|
||||
if (cached.literals != nullptr) {
|
||||
result->set_literals(cached.literals);
|
||||
} else {
|
||||
} else if (info->is_compiled()) {
|
||||
int number_of_literals = info->num_literals();
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
TypeFeedbackVector::New(isolate(), handle(info->feedback_metadata()));
|
||||
Handle<LiteralsArray> literals =
|
||||
LiteralsArray::New(isolate(), handle(info->feedback_vector()),
|
||||
number_of_literals, pretenure);
|
||||
LiteralsArray::New(isolate(), vector, number_of_literals, pretenure);
|
||||
result->set_literals(*literals);
|
||||
|
||||
// Cache context-specific literals.
|
||||
@ -2081,16 +2082,15 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
|
||||
object->set_hash(*hash);
|
||||
}
|
||||
|
||||
|
||||
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
|
||||
Handle<String> name, int number_of_literals, FunctionKind kind,
|
||||
Handle<Code> code, Handle<ScopeInfo> scope_info,
|
||||
Handle<TypeFeedbackVector> feedback_vector) {
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata) {
|
||||
DCHECK(IsValidFunctionKind(kind));
|
||||
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
|
||||
name, code, IsConstructable(kind, scope_info->language_mode()));
|
||||
shared->set_scope_info(*scope_info);
|
||||
shared->set_feedback_vector(*feedback_vector);
|
||||
shared->set_feedback_metadata(*feedback_metadata);
|
||||
shared->set_kind(kind);
|
||||
shared->set_num_literals(number_of_literals);
|
||||
if (IsGeneratorFunction(kind)) {
|
||||
@ -2146,9 +2146,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
|
||||
StaticFeedbackVectorSpec empty_spec;
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata =
|
||||
TypeFeedbackMetadata::New(isolate(), &empty_spec);
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
TypeFeedbackVector::New(isolate(), feedback_metadata);
|
||||
share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
|
||||
share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
|
||||
#if TRACE_MAPS
|
||||
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
|
||||
#endif
|
||||
|
@ -629,7 +629,7 @@ class Factory final {
|
||||
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
|
||||
Handle<String> name, int number_of_literals, FunctionKind kind,
|
||||
Handle<Code> code, Handle<ScopeInfo> scope_info,
|
||||
Handle<TypeFeedbackVector> feedback_vector);
|
||||
Handle<TypeFeedbackMetadata> feedback_metadata);
|
||||
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
|
||||
MaybeHandle<Code> code,
|
||||
bool is_constructor);
|
||||
|
@ -1212,11 +1212,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
// flag, we need to use the runtime function so that the new function
|
||||
// we are creating here gets a chance to have its code optimized and
|
||||
// doesn't just get a copy of the existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
!pretenure &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0) {
|
||||
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
|
||||
scope()->is_function_scope()) {
|
||||
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
|
||||
__ mov(r2, Operand(info));
|
||||
__ CallStub(&stub);
|
||||
|
@ -1205,11 +1205,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
// runtime function so that the new function we are creating here gets a
|
||||
// chance to have its code optimized and doesn't just get a copy of the
|
||||
// existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
!pretenure &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0) {
|
||||
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
|
||||
scope()->is_function_scope()) {
|
||||
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
|
||||
__ Mov(x2, Operand(info));
|
||||
__ CallStub(&stub);
|
||||
|
@ -29,7 +29,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
|
||||
TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
|
||||
|
||||
// Ensure that the feedback vector is large enough.
|
||||
info->EnsureFeedbackVector();
|
||||
info->EnsureFeedbackMetadata();
|
||||
|
||||
Handle<Script> script = info->script();
|
||||
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
|
||||
|
@ -1134,11 +1134,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
// flag, we need to use the runtime function so that the new function
|
||||
// we are creating here gets a chance to have its code optimized and
|
||||
// doesn't just get a copy of the existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
!pretenure &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0) {
|
||||
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
|
||||
scope()->is_function_scope()) {
|
||||
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
|
||||
__ mov(ebx, Immediate(info));
|
||||
__ CallStub(&stub);
|
||||
|
@ -1207,11 +1207,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
// flag, we need to use the runtime function so that the new function
|
||||
// we are creating here gets a chance to have its code optimized and
|
||||
// doesn't just get a copy of the existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
!pretenure &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0) {
|
||||
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
|
||||
scope()->is_function_scope()) {
|
||||
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
|
||||
__ li(a2, Operand(info));
|
||||
__ CallStub(&stub);
|
||||
|
@ -1208,11 +1208,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
// flag, we need to use the runtime function so that the new function
|
||||
// we are creating here gets a chance to have its code optimized and
|
||||
// doesn't just get a copy of the existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
!pretenure &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0) {
|
||||
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
|
||||
scope()->is_function_scope()) {
|
||||
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
|
||||
__ li(a2, Operand(info));
|
||||
__ CallStub(&stub);
|
||||
|
@ -1160,11 +1160,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
// flag, we need to use the runtime function so that the new function
|
||||
// we are creating here gets a chance to have its code optimized and
|
||||
// doesn't just get a copy of the existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
!pretenure &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0) {
|
||||
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
|
||||
scope()->is_function_scope()) {
|
||||
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
|
||||
__ Move(rbx, info);
|
||||
__ CallStub(&stub);
|
||||
|
@ -2825,6 +2825,14 @@ void Heap::CreateInitialObjects() {
|
||||
set_dummy_vector(*dummy_vector);
|
||||
}
|
||||
|
||||
{
|
||||
Handle<FixedArray> empty_literals_array =
|
||||
factory->NewFixedArray(1, TENURED);
|
||||
empty_literals_array->set(0, *factory->empty_fixed_array(),
|
||||
SKIP_WRITE_BARRIER);
|
||||
set_empty_literals_array(*empty_literals_array);
|
||||
}
|
||||
|
||||
{
|
||||
Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
|
||||
set_empty_weak_cell(*cell);
|
||||
|
@ -180,6 +180,7 @@ namespace internal {
|
||||
V(FixedArray, materialized_objects, MaterializedObjects) \
|
||||
V(FixedArray, microtask_queue, MicrotaskQueue) \
|
||||
V(TypeFeedbackVector, dummy_vector, DummyVector) \
|
||||
V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
|
||||
V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
|
||||
V(FixedArray, detached_contexts, DetachedContexts) \
|
||||
V(ArrayList, retained_maps, RetainedMaps) \
|
||||
@ -193,7 +194,6 @@ namespace internal {
|
||||
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
|
||||
V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
|
||||
|
||||
|
||||
// Entries in this list are limited to Smis and are not visited during GC.
|
||||
#define SMI_ROOT_LIST(V) \
|
||||
V(Smi, stack_limit, StackLimit) \
|
||||
|
@ -446,9 +446,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
|
||||
if (shared->ic_age() != heap->global_ic_age()) {
|
||||
shared->ResetForNewContext(heap->global_ic_age());
|
||||
}
|
||||
if (FLAG_cleanup_code_caches_at_gc) {
|
||||
shared->ClearTypeFeedbackInfoAtGCTime();
|
||||
}
|
||||
if (FLAG_flush_optimized_code_cache) {
|
||||
if (!shared->OptimizedCodeMapIsCleared()) {
|
||||
// Always flush the optimized code map if requested by flag.
|
||||
@ -479,6 +476,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
|
||||
HeapObject* object) {
|
||||
Heap* heap = map->GetHeap();
|
||||
JSFunction* function = JSFunction::cast(object);
|
||||
if (FLAG_cleanup_code_caches_at_gc) {
|
||||
function->ClearTypeFeedbackInfoAtGCTime();
|
||||
}
|
||||
|
||||
MarkCompactCollector* collector = heap->mark_compact_collector();
|
||||
if (collector->is_code_flushing_enabled()) {
|
||||
if (IsFlushable(heap, function)) {
|
||||
|
@ -865,6 +865,144 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- edx : new target (preserved for callee)
|
||||
// -- edi : target function (preserved for callee)
|
||||
// -----------------------------------
|
||||
// First lookup code, maybe we don't need to compile!
|
||||
Label gotta_call_runtime, gotta_call_runtime_no_stack;
|
||||
Label maybe_call_runtime;
|
||||
Label try_shared;
|
||||
Label loop_top, loop_bottom;
|
||||
|
||||
Register closure = edi;
|
||||
Register new_target = edx;
|
||||
__ push(new_target);
|
||||
__ push(closure);
|
||||
|
||||
Register map = eax;
|
||||
Register index = ebx;
|
||||
__ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
||||
__ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
|
||||
__ cmp(index, Immediate(Smi::FromInt(2)));
|
||||
__ j(less, &gotta_call_runtime);
|
||||
|
||||
// Find literals.
|
||||
// edx : native context
|
||||
// ebx : length / index
|
||||
// eax : optimized code map
|
||||
// stack[0] : new target
|
||||
// stack[4] : closure
|
||||
Register native_context = edx;
|
||||
__ mov(native_context, NativeContextOperand());
|
||||
|
||||
__ bind(&loop_top);
|
||||
Register temp = edi;
|
||||
|
||||
// Does the native context match?
|
||||
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousContext()));
|
||||
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
|
||||
__ cmp(temp, native_context);
|
||||
__ j(not_equal, &loop_bottom);
|
||||
// OSR id set to none?
|
||||
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
|
||||
const int bailout_id = BailoutId::None().ToInt();
|
||||
__ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
|
||||
__ j(not_equal, &loop_bottom);
|
||||
// Literals available?
|
||||
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousLiterals()));
|
||||
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(temp, &gotta_call_runtime);
|
||||
|
||||
// Save the literals in the closure.
|
||||
__ mov(ecx, Operand(esp, 0));
|
||||
__ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
|
||||
__ push(index);
|
||||
__ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
__ pop(index);
|
||||
|
||||
// Code available?
|
||||
Register entry = ecx;
|
||||
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousCachedCode()));
|
||||
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &maybe_call_runtime);
|
||||
|
||||
// Found literals and code. Get them into the closure and return.
|
||||
__ pop(closure);
|
||||
// Store code entry in the closure.
|
||||
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
|
||||
|
||||
Label install_optimized_code_and_tailcall;
|
||||
__ bind(&install_optimized_code_and_tailcall);
|
||||
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
|
||||
|
||||
// Link the closure into the optimized function list.
|
||||
// ecx : code entry
|
||||
// edx : native context
|
||||
// edi : closure
|
||||
__ mov(ebx,
|
||||
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
|
||||
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
const int function_list_offset =
|
||||
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
||||
__ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
|
||||
closure);
|
||||
// Save closure before the write barrier.
|
||||
__ mov(ebx, closure);
|
||||
__ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
|
||||
kDontSaveFPRegs);
|
||||
__ mov(closure, ebx);
|
||||
__ pop(new_target);
|
||||
__ jmp(entry);
|
||||
|
||||
__ bind(&loop_bottom);
|
||||
__ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
|
||||
__ cmp(index, Immediate(Smi::FromInt(1)));
|
||||
__ j(greater, &loop_top);
|
||||
|
||||
// We found neither literals nor code.
|
||||
__ jmp(&gotta_call_runtime);
|
||||
|
||||
__ bind(&maybe_call_runtime);
|
||||
__ pop(closure);
|
||||
|
||||
// Last possibility. Check the context free optimized code map entry.
|
||||
__ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
|
||||
SharedFunctionInfo::kSharedCodeIndex));
|
||||
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &try_shared);
|
||||
|
||||
// Store code entry in the closure.
|
||||
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
|
||||
__ jmp(&install_optimized_code_and_tailcall);
|
||||
|
||||
__ bind(&try_shared);
|
||||
__ pop(new_target);
|
||||
// Is the full code valid?
|
||||
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
|
||||
__ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
|
||||
__ and_(ebx, Code::KindField::kMask);
|
||||
__ shr(ebx, Code::KindField::kShift);
|
||||
__ cmp(ebx, Immediate(Code::BUILTIN));
|
||||
__ j(equal, &gotta_call_runtime_no_stack);
|
||||
// Yes, install the full code.
|
||||
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
|
||||
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
|
||||
__ jmp(entry);
|
||||
|
||||
__ bind(&gotta_call_runtime);
|
||||
__ pop(closure);
|
||||
__ pop(new_target);
|
||||
__ bind(&gotta_call_runtime_no_stack);
|
||||
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
|
||||
GenerateTailCallToReturnedCode(masm);
|
||||
}
|
||||
|
@ -882,8 +882,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
|
||||
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
|
||||
mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
|
||||
mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
|
||||
mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
|
||||
mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -2507,7 +2507,7 @@ Strength BytecodeGenerator::language_mode_strength() const {
|
||||
|
||||
|
||||
int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
|
||||
return info()->feedback_vector()->GetIndex(slot);
|
||||
return TypeFeedbackVector::GetIndex(slot);
|
||||
}
|
||||
|
||||
} // namespace interpreter
|
||||
|
@ -61,7 +61,7 @@ void Interpreter::Initialize() {
|
||||
|
||||
bool Interpreter::MakeBytecode(CompilationInfo* info) {
|
||||
BytecodeGenerator generator(info->isolate(), info->zone());
|
||||
info->EnsureFeedbackVector();
|
||||
info->EnsureFeedbackMetadata();
|
||||
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
|
||||
if (FLAG_print_bytecode) {
|
||||
OFStream os(stdout);
|
||||
|
@ -1148,6 +1148,146 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a3 : new target (preserved for callee)
|
||||
// -- a1 : target function (preserved for callee)
|
||||
// -----------------------------------
|
||||
// First lookup code, maybe we don't need to compile!
|
||||
Label gotta_call_runtime, gotta_call_runtime_no_stack;
|
||||
Label maybe_call_runtime;
|
||||
Label try_shared;
|
||||
Label loop_top, loop_bottom;
|
||||
|
||||
Register closure = a1;
|
||||
Register new_target = a3;
|
||||
__ push(new_target);
|
||||
__ push(closure);
|
||||
|
||||
Register map = a0;
|
||||
Register index = a2;
|
||||
__ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
||||
__ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
|
||||
__ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
|
||||
|
||||
// Find literals.
|
||||
// a3 : native context
|
||||
// a2 : length / index
|
||||
// a0 : optimized code map
|
||||
// stack[0] : new target
|
||||
// stack[4] : closure
|
||||
Register native_context = a3;
|
||||
__ lw(native_context, NativeContextMemOperand());
|
||||
|
||||
__ bind(&loop_top);
|
||||
Register temp = a1;
|
||||
Register array_pointer = t1;
|
||||
|
||||
// Does the native context match?
|
||||
__ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
|
||||
__ Addu(array_pointer, map, Operand(at));
|
||||
__ lw(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousContext()));
|
||||
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
|
||||
// OSR id set to none?
|
||||
__ lw(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
|
||||
const int bailout_id = BailoutId::None().ToInt();
|
||||
__ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
|
||||
// Literals available?
|
||||
__ lw(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousLiterals()));
|
||||
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(temp, &gotta_call_runtime);
|
||||
|
||||
// Save the literals in the closure.
|
||||
__ lw(t0, MemOperand(sp, 0));
|
||||
__ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
|
||||
__ push(index);
|
||||
__ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
|
||||
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ pop(index);
|
||||
|
||||
// Code available?
|
||||
Register entry = t0;
|
||||
__ lw(entry,
|
||||
FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousCachedCode()));
|
||||
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &maybe_call_runtime);
|
||||
|
||||
// Found literals and code. Get them into the closure and return.
|
||||
__ pop(closure);
|
||||
// Store code entry in the closure.
|
||||
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
Label install_optimized_code_and_tailcall;
|
||||
__ bind(&install_optimized_code_and_tailcall);
|
||||
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
|
||||
// Link the closure into the optimized function list.
|
||||
// t0 : code entry
|
||||
// a3 : native context
|
||||
// a1 : closure
|
||||
__ lw(t1,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
|
||||
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
|
||||
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
const int function_list_offset =
|
||||
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
||||
__ sw(closure,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
// Save closure before the write barrier.
|
||||
__ mov(t1, closure);
|
||||
__ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
|
||||
kRAHasNotBeenSaved, kDontSaveFPRegs);
|
||||
__ mov(closure, t1);
|
||||
__ pop(new_target);
|
||||
__ Jump(entry);
|
||||
|
||||
__ bind(&loop_bottom);
|
||||
__ Subu(index, index,
|
||||
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
|
||||
__ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
|
||||
|
||||
// We found neither literals nor code.
|
||||
__ jmp(&gotta_call_runtime);
|
||||
|
||||
__ bind(&maybe_call_runtime);
|
||||
__ pop(closure);
|
||||
|
||||
// Last possibility. Check the context free optimized code map entry.
|
||||
__ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
|
||||
SharedFunctionInfo::kSharedCodeIndex));
|
||||
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &try_shared);
|
||||
|
||||
// Store code entry in the closure.
|
||||
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(&install_optimized_code_and_tailcall);
|
||||
|
||||
__ bind(&try_shared);
|
||||
__ pop(new_target);
|
||||
// Is the full code valid?
|
||||
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
|
||||
__ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
|
||||
__ And(t1, t1, Operand(Code::KindField::kMask));
|
||||
__ srl(t1, t1, Code::KindField::kShift);
|
||||
__ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
|
||||
// Yes, install the full code.
|
||||
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
__ Jump(entry);
|
||||
|
||||
__ bind(&gotta_call_runtime);
|
||||
__ pop(closure);
|
||||
__ pop(new_target);
|
||||
__ bind(&gotta_call_runtime_no_stack);
|
||||
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
|
||||
GenerateTailCallToReturnedCode(masm);
|
||||
}
|
||||
|
@ -4783,9 +4783,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
|
||||
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
|
||||
lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
lw(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
|
||||
lw(vector,
|
||||
FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
|
||||
lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
|
||||
lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1139,6 +1139,146 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a3 : new target (preserved for callee)
|
||||
// -- a1 : target function (preserved for callee)
|
||||
// -----------------------------------
|
||||
// First lookup code, maybe we don't need to compile!
|
||||
Label gotta_call_runtime, gotta_call_runtime_no_stack;
|
||||
Label maybe_call_runtime;
|
||||
Label try_shared;
|
||||
Label loop_top, loop_bottom;
|
||||
|
||||
Register closure = a1;
|
||||
Register new_target = a3;
|
||||
__ push(new_target);
|
||||
__ push(closure);
|
||||
|
||||
Register map = a0;
|
||||
Register index = a2;
|
||||
__ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
||||
__ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
|
||||
__ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
|
||||
|
||||
// Find literals.
|
||||
// a3 : native context
|
||||
// a2 : length / index
|
||||
// a0 : optimized code map
|
||||
// stack[0] : new target
|
||||
// stack[4] : closure
|
||||
Register native_context = a3;
|
||||
__ ld(native_context, NativeContextMemOperand());
|
||||
|
||||
__ bind(&loop_top);
|
||||
Register temp = a1;
|
||||
Register array_pointer = a5;
|
||||
|
||||
// Does the native context match?
|
||||
__ SmiScale(at, index, kPointerSizeLog2);
|
||||
__ Daddu(array_pointer, map, Operand(at));
|
||||
__ ld(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousContext()));
|
||||
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
|
||||
// OSR id set to none?
|
||||
__ ld(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
|
||||
const int bailout_id = BailoutId::None().ToInt();
|
||||
__ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
|
||||
// Literals available?
|
||||
__ ld(temp, FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousLiterals()));
|
||||
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(temp, &gotta_call_runtime);
|
||||
|
||||
// Save the literals in the closure.
|
||||
__ ld(a4, MemOperand(sp, 0));
|
||||
__ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
|
||||
__ push(index);
|
||||
__ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
|
||||
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
__ pop(index);
|
||||
|
||||
// Code available?
|
||||
Register entry = a4;
|
||||
__ ld(entry,
|
||||
FieldMemOperand(array_pointer,
|
||||
SharedFunctionInfo::OffsetToPreviousCachedCode()));
|
||||
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &maybe_call_runtime);
|
||||
|
||||
// Found literals and code. Get them into the closure and return.
|
||||
__ pop(closure);
|
||||
// Store code entry in the closure.
|
||||
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
Label install_optimized_code_and_tailcall;
|
||||
__ bind(&install_optimized_code_and_tailcall);
|
||||
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
|
||||
// Link the closure into the optimized function list.
|
||||
// a4 : code entry
|
||||
// a3 : native context
|
||||
// a1 : closure
|
||||
__ ld(a5,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
|
||||
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
|
||||
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
||||
OMIT_SMI_CHECK);
|
||||
const int function_list_offset =
|
||||
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
||||
__ sd(closure,
|
||||
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
// Save closure before the write barrier.
|
||||
__ mov(a5, closure);
|
||||
__ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
|
||||
kRAHasNotBeenSaved, kDontSaveFPRegs);
|
||||
__ mov(closure, a5);
|
||||
__ pop(new_target);
|
||||
__ Jump(entry);
|
||||
|
||||
__ bind(&loop_bottom);
|
||||
__ Dsubu(index, index,
|
||||
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
|
||||
__ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
|
||||
|
||||
// We found neither literals nor code.
|
||||
__ jmp(&gotta_call_runtime);
|
||||
|
||||
__ bind(&maybe_call_runtime);
|
||||
__ pop(closure);
|
||||
|
||||
// Last possibility. Check the context free optimized code map entry.
|
||||
__ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
|
||||
SharedFunctionInfo::kSharedCodeIndex));
|
||||
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &try_shared);
|
||||
|
||||
// Store code entry in the closure.
|
||||
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(&install_optimized_code_and_tailcall);
|
||||
|
||||
__ bind(&try_shared);
|
||||
__ pop(new_target);
|
||||
// Is the full code valid?
|
||||
__ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
|
||||
__ ld(a5, FieldMemOperand(entry, Code::kFlagsOffset));
|
||||
__ And(a5, a5, Operand(Code::KindField::kMask));
|
||||
__ dsrl(a5, a5, Code::KindField::kShift);
|
||||
__ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
|
||||
// Yes, install the full code.
|
||||
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
||||
__ Jump(entry);
|
||||
|
||||
__ bind(&gotta_call_runtime);
|
||||
__ pop(closure);
|
||||
__ pop(new_target);
|
||||
__ bind(&gotta_call_runtime_no_stack);
|
||||
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
|
||||
GenerateTailCallToReturnedCode(masm);
|
||||
}
|
||||
|
@ -5438,9 +5438,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
|
||||
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
|
||||
ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
|
||||
ld(vector,
|
||||
FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
|
||||
ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
|
||||
ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -576,7 +576,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
|
||||
VerifyObjectField(kNameOffset);
|
||||
VerifyObjectField(kCodeOffset);
|
||||
VerifyObjectField(kOptimizedCodeMapOffset);
|
||||
VerifyObjectField(kFeedbackVectorOffset);
|
||||
VerifyObjectField(kFeedbackMetadataOffset);
|
||||
VerifyObjectField(kScopeInfoOffset);
|
||||
VerifyObjectField(kInstanceClassNameOffset);
|
||||
CHECK(function_data()->IsUndefined() || IsApiFunction() ||
|
||||
|
@ -3390,11 +3390,19 @@ LiteralsArray* LiteralsArray::cast(Object* object) {
|
||||
|
||||
|
||||
TypeFeedbackVector* LiteralsArray::feedback_vector() const {
|
||||
if (length() == 0) {
|
||||
return TypeFeedbackVector::cast(
|
||||
const_cast<FixedArray*>(FixedArray::cast(this)));
|
||||
}
|
||||
return TypeFeedbackVector::cast(get(kVectorIndex));
|
||||
}
|
||||
|
||||
|
||||
void LiteralsArray::set_feedback_vector(TypeFeedbackVector* vector) {
|
||||
if (length() <= kVectorIndex) {
|
||||
DCHECK(vector->length() == 0);
|
||||
return;
|
||||
}
|
||||
set(kVectorIndex, vector);
|
||||
}
|
||||
|
||||
@ -3408,6 +3416,9 @@ void LiteralsArray::set_literal(int literal_index, Object* literal) {
|
||||
set(kFirstLiteralIndex + literal_index, literal);
|
||||
}
|
||||
|
||||
void LiteralsArray::set_literal_undefined(int literal_index) {
|
||||
set_undefined(kFirstLiteralIndex + literal_index);
|
||||
}
|
||||
|
||||
int LiteralsArray::literals_count() const {
|
||||
return length() - kFirstLiteralIndex;
|
||||
@ -5621,8 +5632,8 @@ ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
|
||||
ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
|
||||
kOptimizedCodeMapOffset)
|
||||
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
|
||||
ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
|
||||
kFeedbackVectorOffset)
|
||||
ACCESSORS(SharedFunctionInfo, feedback_metadata, TypeFeedbackMetadata,
|
||||
kFeedbackMetadataOffset)
|
||||
#if TRACE_MAPS
|
||||
SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
|
||||
#endif
|
||||
@ -5804,6 +5815,26 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
|
||||
set_compiler_hints(hints);
|
||||
}
|
||||
|
||||
// static
|
||||
int SharedFunctionInfo::OffsetToPreviousContext() {
|
||||
return FixedArray::kHeaderSize +
|
||||
kPointerSize * (kContextOffset - kEntryLength);
|
||||
}
|
||||
|
||||
int SharedFunctionInfo::OffsetToPreviousCachedCode() {
|
||||
return FixedArray::kHeaderSize +
|
||||
kPointerSize * (kCachedCodeOffset - kEntryLength);
|
||||
}
|
||||
|
||||
int SharedFunctionInfo::OffsetToPreviousLiterals() {
|
||||
return FixedArray::kHeaderSize +
|
||||
kPointerSize * (kLiteralsOffset - kEntryLength);
|
||||
}
|
||||
|
||||
int SharedFunctionInfo::OffsetToPreviousOsrAstId() {
|
||||
return FixedArray::kHeaderSize +
|
||||
kPointerSize * (kOsrAstIdOffset - kEntryLength);
|
||||
}
|
||||
|
||||
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
|
||||
kNeedsHomeObject)
|
||||
@ -6255,11 +6286,12 @@ bool JSFunction::is_compiled() {
|
||||
code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
|
||||
}
|
||||
|
||||
|
||||
int JSFunction::NumberOfLiterals() {
|
||||
return literals()->length();
|
||||
TypeFeedbackVector* JSFunction::feedback_vector() {
|
||||
LiteralsArray* array = literals();
|
||||
return array->feedback_vector();
|
||||
}
|
||||
|
||||
int JSFunction::NumberOfLiterals() { return literals()->literals_count(); }
|
||||
|
||||
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
|
||||
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
|
||||
|
@ -900,8 +900,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
|
||||
os << "\n - debug info = " << Brief(debug_info());
|
||||
os << "\n - length = " << length();
|
||||
os << "\n - optimized_code_map = " << Brief(optimized_code_map());
|
||||
os << "\n - feedback_vector = ";
|
||||
feedback_vector()->TypeFeedbackVectorPrint(os);
|
||||
os << "\n - feedback_metadata = ";
|
||||
feedback_metadata()->TypeFeedbackMetadataPrint(os);
|
||||
if (HasBytecodeArray()) {
|
||||
os << "\n - bytecode_array = " << bytecode_array();
|
||||
}
|
||||
|
@ -12344,6 +12344,22 @@ void JSFunction::AttemptConcurrentOptimization() {
|
||||
// No write barrier required, since the builtin is part of the root set.
|
||||
}
|
||||
|
||||
// static
|
||||
Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
|
||||
Handle<SharedFunctionInfo> shared, Handle<Context> native_context) {
|
||||
Isolate* isolate = shared->GetIsolate();
|
||||
CodeAndLiterals result =
|
||||
shared->SearchOptimizedCodeMap(*native_context, BailoutId::None());
|
||||
if (result.literals != nullptr) {
|
||||
return handle(result.literals, isolate);
|
||||
}
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
|
||||
Handle<LiteralsArray> literals = LiteralsArray::New(
|
||||
isolate, feedback_vector, shared->num_literals(), TENURED);
|
||||
AddLiteralsToOptimizedCodeMap(shared, native_context, literals);
|
||||
return literals;
|
||||
}
|
||||
|
||||
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
|
||||
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
|
||||
@ -12547,6 +12563,14 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
|
||||
Handle<SharedFunctionInfo> shared(function->shared());
|
||||
Handle<Context> native_context(function->context()->native_context());
|
||||
Handle<LiteralsArray> literals =
|
||||
SharedFunctionInfo::FindOrCreateLiterals(shared, native_context);
|
||||
function->set_literals(*literals);
|
||||
}
|
||||
|
||||
static void GetMinInobjectSlack(Map* map, void* data) {
|
||||
int slack = map->unused_property_fields();
|
||||
@ -13824,9 +13848,6 @@ void Map::StartInobjectSlackTracking() {
|
||||
|
||||
void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
|
||||
code()->ClearInlineCaches();
|
||||
// If we clear ICs, we need to clear the type feedback vector too, since
|
||||
// CallICs are synced with a feedback vector slot.
|
||||
ClearTypeFeedbackInfo();
|
||||
set_ic_age(new_ic_age);
|
||||
if (code()->kind() == Code::FUNCTION) {
|
||||
code()->set_profiler_ticks(0);
|
||||
@ -13865,6 +13886,19 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
|
||||
return -1;
|
||||
}
|
||||
|
||||
void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
|
||||
if (!OptimizedCodeMapIsCleared()) {
|
||||
FixedArray* optimized_code_map = this->optimized_code_map();
|
||||
int length = optimized_code_map->length();
|
||||
WeakCell* empty_weak_cell = GetHeap()->empty_weak_cell();
|
||||
for (int i = kEntriesStart; i < length; i += kEntryLength) {
|
||||
optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
|
||||
SKIP_WRITE_BARRIER);
|
||||
}
|
||||
optimized_code_map->set(kSharedCodeIndex, empty_weak_cell,
|
||||
SKIP_WRITE_BARRIER);
|
||||
}
|
||||
}
|
||||
|
||||
CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
|
||||
Context* native_context, BailoutId osr_ast_id) {
|
||||
@ -14303,14 +14337,12 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
|
||||
feedback_vector()->ClearSlots(this);
|
||||
void JSFunction::ClearTypeFeedbackInfo() {
|
||||
feedback_vector()->ClearSlots(shared());
|
||||
}
|
||||
|
||||
|
||||
void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
|
||||
feedback_vector()->ClearSlotsAtGCTime(this);
|
||||
void JSFunction::ClearTypeFeedbackInfoAtGCTime() {
|
||||
feedback_vector()->ClearSlotsAtGCTime(shared());
|
||||
}
|
||||
|
||||
|
||||
|
@ -849,6 +849,7 @@ class SafepointEntry;
|
||||
class SharedFunctionInfo;
|
||||
class StringStream;
|
||||
class TypeFeedbackInfo;
|
||||
class TypeFeedbackMetadata;
|
||||
class TypeFeedbackVector;
|
||||
class WeakCell;
|
||||
class TransitionArray;
|
||||
@ -4711,8 +4712,8 @@ class LiteralsArray : public FixedArray {
|
||||
public:
|
||||
static const int kVectorIndex = 0;
|
||||
static const int kFirstLiteralIndex = 1;
|
||||
static const int kOffsetToFirstLiteral =
|
||||
FixedArray::kHeaderSize + kPointerSize;
|
||||
static const int kFeedbackVectorOffset = FixedArray::kHeaderSize;
|
||||
static const int kOffsetToFirstLiteral = kFeedbackVectorOffset + kPointerSize;
|
||||
|
||||
static int OffsetOfLiteralAt(int index) {
|
||||
return SizeFor(index + kFirstLiteralIndex);
|
||||
@ -4722,6 +4723,7 @@ class LiteralsArray : public FixedArray {
|
||||
inline void set_feedback_vector(TypeFeedbackVector* vector);
|
||||
inline Object* literal(int literal_index) const;
|
||||
inline void set_literal(int literal_index, Object* literal);
|
||||
inline void set_literal_undefined(int literal_index);
|
||||
inline int literals_count() const;
|
||||
|
||||
static Handle<LiteralsArray> New(Isolate* isolate,
|
||||
@ -6541,6 +6543,9 @@ class SharedFunctionInfo: public HeapObject {
|
||||
// Clear optimized code map.
|
||||
void ClearOptimizedCodeMap();
|
||||
|
||||
// Like ClearOptimizedCodeMap, but preserves literals.
|
||||
void ClearCodeFromOptimizedCodeMap();
|
||||
|
||||
// We have a special root FixedArray with the right shape and values
|
||||
// to represent the cleared optimized code map. This predicate checks
|
||||
// if that root is installed.
|
||||
@ -6554,6 +6559,9 @@ class SharedFunctionInfo: public HeapObject {
|
||||
// Trims the optimized code map after entries have been removed.
|
||||
void TrimOptimizedCodeMap(int shrink_by);
|
||||
|
||||
static Handle<LiteralsArray> FindOrCreateLiterals(
|
||||
Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
|
||||
|
||||
// Add a new entry to the optimized code map for context-independent code.
|
||||
static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
|
||||
Handle<Code> code);
|
||||
@ -6588,6 +6596,13 @@ class SharedFunctionInfo: public HeapObject {
|
||||
|
||||
static const int kNotFound = -1;
|
||||
|
||||
// Helpers for assembly code that does a backwards walk of the optimized code
|
||||
// map.
|
||||
static inline int OffsetToPreviousContext();
|
||||
static inline int OffsetToPreviousCachedCode();
|
||||
static inline int OffsetToPreviousLiterals();
|
||||
static inline int OffsetToPreviousOsrAstId();
|
||||
|
||||
// [scope_info]: Scope info.
|
||||
DECL_ACCESSORS(scope_info, ScopeInfo)
|
||||
|
||||
@ -6616,16 +6631,10 @@ class SharedFunctionInfo: public HeapObject {
|
||||
inline int expected_nof_properties() const;
|
||||
inline void set_expected_nof_properties(int value);
|
||||
|
||||
// [feedback_vector] - accumulates ast node feedback from full-codegen and
|
||||
// [feedback_metadata] - describes ast node feedback from full-codegen and
|
||||
// (increasingly) from crankshafted code where sufficient feedback isn't
|
||||
// available.
|
||||
DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
|
||||
|
||||
// Unconditionally clear the type feedback vector (including vector ICs).
|
||||
void ClearTypeFeedbackInfo();
|
||||
|
||||
// Clear the type feedback vector with a more subtle policy at GC time.
|
||||
void ClearTypeFeedbackInfoAtGCTime();
|
||||
DECL_ACCESSORS(feedback_metadata, TypeFeedbackMetadata)
|
||||
|
||||
#if TRACE_MAPS
|
||||
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
|
||||
@ -6919,15 +6928,14 @@ class SharedFunctionInfo: public HeapObject {
|
||||
static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
|
||||
static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
|
||||
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
|
||||
static const int kFeedbackVectorOffset =
|
||||
kInferredNameOffset + kPointerSize;
|
||||
static const int kFeedbackMetadataOffset = kInferredNameOffset + kPointerSize;
|
||||
#if TRACE_MAPS
|
||||
static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
|
||||
static const int kUniqueIdOffset = kFeedbackMetadataOffset + kPointerSize;
|
||||
static const int kLastPointerFieldOffset = kUniqueIdOffset;
|
||||
#else
|
||||
// Just to not break the postmortrem support with conditional offsets
|
||||
static const int kUniqueIdOffset = kFeedbackVectorOffset;
|
||||
static const int kLastPointerFieldOffset = kFeedbackVectorOffset;
|
||||
static const int kUniqueIdOffset = kFeedbackMetadataOffset;
|
||||
static const int kLastPointerFieldOffset = kFeedbackMetadataOffset;
|
||||
#endif
|
||||
|
||||
#if V8_HOST_ARCH_32_BIT
|
||||
@ -7344,6 +7352,7 @@ class JSFunction: public JSObject {
|
||||
inline void set_code(Code* code);
|
||||
inline void set_code_no_write_barrier(Code* code);
|
||||
inline void ReplaceCode(Code* code);
|
||||
static void EnsureLiterals(Handle<JSFunction> function);
|
||||
|
||||
// Tells whether this function inlines the given shared function info.
|
||||
bool Inlines(SharedFunctionInfo* candidate);
|
||||
@ -7364,6 +7373,12 @@ class JSFunction: public JSObject {
|
||||
// Tells whether or not the function is on the concurrent recompilation queue.
|
||||
inline bool IsInOptimizationQueue();
|
||||
|
||||
// Unconditionally clear the type feedback vector (including vector ICs).
|
||||
void ClearTypeFeedbackInfo();
|
||||
|
||||
// Clear the type feedback vector with a more subtle policy at GC time.
|
||||
void ClearTypeFeedbackInfoAtGCTime();
|
||||
|
||||
// Completes inobject slack tracking on initial map if it is active.
|
||||
inline void CompleteInobjectSlackTrackingIfActive();
|
||||
|
||||
@ -7378,6 +7393,8 @@ class JSFunction: public JSObject {
|
||||
// access to.
|
||||
DECL_ACCESSORS(literals, LiteralsArray)
|
||||
|
||||
inline TypeFeedbackVector* feedback_vector();
|
||||
|
||||
// The initial map for an object created by this constructor.
|
||||
inline Map* initial_map();
|
||||
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
|
||||
|
@ -1390,9 +1390,9 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
|
||||
SetInternalReference(obj, entry,
|
||||
"optimized_code_map", shared->optimized_code_map(),
|
||||
SharedFunctionInfo::kOptimizedCodeMapOffset);
|
||||
SetInternalReference(obj, entry,
|
||||
"feedback_vector", shared->feedback_vector(),
|
||||
SharedFunctionInfo::kFeedbackVectorOffset);
|
||||
SetInternalReference(obj, entry, "feedback_metadata",
|
||||
shared->feedback_metadata(),
|
||||
SharedFunctionInfo::kFeedbackMetadataOffset);
|
||||
}
|
||||
|
||||
|
||||
|
@ -53,12 +53,10 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
|
||||
any_ic_changed_(false) {
|
||||
}
|
||||
|
||||
|
||||
static void GetICCounts(SharedFunctionInfo* shared,
|
||||
int* ic_with_type_info_count, int* ic_generic_count,
|
||||
int* ic_total_count, int* type_info_percentage,
|
||||
int* generic_percentage) {
|
||||
Code* shared_code = shared->code();
|
||||
static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
|
||||
int* ic_generic_count, int* ic_total_count,
|
||||
int* type_info_percentage, int* generic_percentage) {
|
||||
Code* shared_code = function->shared()->code();
|
||||
*ic_total_count = 0;
|
||||
*ic_generic_count = 0;
|
||||
*ic_with_type_info_count = 0;
|
||||
@ -71,7 +69,7 @@ static void GetICCounts(SharedFunctionInfo* shared,
|
||||
}
|
||||
|
||||
// Harvest vector-ics as well
|
||||
TypeFeedbackVector* vector = shared->feedback_vector();
|
||||
TypeFeedbackVector* vector = function->feedback_vector();
|
||||
int with = 0, gen = 0;
|
||||
vector->ComputeCounts(&with, &gen);
|
||||
*ic_with_type_info_count += with;
|
||||
@ -94,8 +92,8 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
|
||||
PrintF(" for recompilation, reason: %s", reason);
|
||||
if (FLAG_type_info_threshold > 0) {
|
||||
int typeinfo, generic, total, type_percentage, generic_percentage;
|
||||
GetICCounts(function->shared(), &typeinfo, &generic, &total,
|
||||
&type_percentage, &generic_percentage);
|
||||
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
|
||||
&generic_percentage);
|
||||
PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
|
||||
type_percentage);
|
||||
PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
|
||||
@ -223,7 +221,7 @@ void RuntimeProfiler::OptimizeNow() {
|
||||
|
||||
if (ticks >= kProfilerTicksBeforeOptimization) {
|
||||
int typeinfo, generic, total, type_percentage, generic_percentage;
|
||||
GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
|
||||
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
|
||||
&generic_percentage);
|
||||
if (type_percentage >= FLAG_type_info_threshold &&
|
||||
generic_percentage <= FLAG_generic_ic_threshold) {
|
||||
@ -246,7 +244,7 @@ void RuntimeProfiler::OptimizeNow() {
|
||||
// If no IC was patched since the last tick and this function is very
|
||||
// small, optimistically optimize it now.
|
||||
int typeinfo, generic, total, type_percentage, generic_percentage;
|
||||
GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
|
||||
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
|
||||
&generic_percentage);
|
||||
if (type_percentage >= FLAG_type_info_threshold &&
|
||||
generic_percentage <= FLAG_generic_ic_threshold) {
|
||||
|
@ -34,12 +34,28 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
|
||||
// Compile the target function.
|
||||
DCHECK(function->shared()->allows_lazy_compilation());
|
||||
|
||||
// There is one special case where we have optimized code but we
|
||||
// couldn't find a literals array for the native context. That's with
|
||||
// FLAG_turbo_cache_shared_code.
|
||||
if (FLAG_turbo_cache_shared_code) {
|
||||
SharedFunctionInfo* shared = function->shared();
|
||||
CodeAndLiterals result;
|
||||
result = shared->SearchOptimizedCodeMap(*isolate->native_context(),
|
||||
BailoutId::None());
|
||||
if (result.code != nullptr) {
|
||||
function->ReplaceCode(result.code);
|
||||
JSFunction::EnsureLiterals(function);
|
||||
return result.code;
|
||||
}
|
||||
}
|
||||
|
||||
Handle<Code> code;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
|
||||
Compiler::GetLazyCode(function));
|
||||
DCHECK(code->IsJavaScriptCode());
|
||||
|
||||
function->ReplaceCode(*code);
|
||||
JSFunction::EnsureLiterals(function);
|
||||
return *code;
|
||||
}
|
||||
|
||||
@ -71,6 +87,7 @@ Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
|
||||
|
||||
DCHECK(function->code()->kind() == Code::FUNCTION ||
|
||||
function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
|
||||
function->code()->is_interpreter_entry_trampoline() ||
|
||||
function->IsInOptimizationQueue());
|
||||
return function->code();
|
||||
}
|
||||
|
@ -168,7 +168,8 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
|
||||
target_shared->ReplaceCode(source_shared->code());
|
||||
target_shared->set_scope_info(source_shared->scope_info());
|
||||
target_shared->set_length(source_shared->length());
|
||||
target_shared->set_feedback_vector(source_shared->feedback_vector());
|
||||
target_shared->set_feedback_metadata(source_shared->feedback_metadata());
|
||||
target_shared->set_num_literals(source_shared->num_literals());
|
||||
target_shared->set_internal_formal_parameter_count(
|
||||
source_shared->internal_formal_parameter_count());
|
||||
target_shared->set_start_position_and_type(
|
||||
@ -187,16 +188,13 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
|
||||
target->ReplaceCode(source_shared->code());
|
||||
DCHECK(target->next_function_link()->IsUndefined());
|
||||
|
||||
// Make sure we get a fresh copy of the literal vector to avoid cross
|
||||
// context contamination.
|
||||
Handle<Context> context(source->context());
|
||||
target->set_context(*context);
|
||||
|
||||
int number_of_literals = source->NumberOfLiterals();
|
||||
Handle<LiteralsArray> literals =
|
||||
LiteralsArray::New(isolate, handle(target_shared->feedback_vector()),
|
||||
number_of_literals, TENURED);
|
||||
target->set_literals(*literals);
|
||||
// Make sure we get a fresh copy of the literal vector to avoid cross
|
||||
// context contamination, and that the literal vector makes it's way into
|
||||
// the target_shared optimized code map.
|
||||
JSFunction::EnsureLiterals(target);
|
||||
|
||||
if (isolate->logger()->is_logging_code_events() ||
|
||||
isolate->cpu_profiler()->is_profiling()) {
|
||||
|
@ -722,6 +722,13 @@ RUNTIME_FUNCTION(Runtime_NewRestParam) {
|
||||
rest_index);
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_InstallLiterals) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
|
||||
JSFunction::EnsureLiterals(function);
|
||||
return *function;
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_NewClosure) {
|
||||
HandleScope scope(isolate);
|
||||
|
@ -232,7 +232,7 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK(args.length() == 1);
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
|
||||
function->shared()->ClearTypeFeedbackInfo();
|
||||
function->ClearTypeFeedbackInfo();
|
||||
Code* unoptimized = function->shared()->code();
|
||||
if (unoptimized->kind() == Code::FUNCTION) {
|
||||
unoptimized->ClearInlineCaches();
|
||||
|
@ -539,7 +539,6 @@ namespace internal {
|
||||
F(RegExpExecReThrow, 4, 1) \
|
||||
F(IsRegExp, 1, 1)
|
||||
|
||||
|
||||
#define FOR_EACH_INTRINSIC_SCOPES(F) \
|
||||
F(ThrowConstAssignError, 0, 1) \
|
||||
F(DeclareGlobals, 2, 1) \
|
||||
@ -552,6 +551,7 @@ namespace internal {
|
||||
F(NewRestArguments_Generic, 2, 1) \
|
||||
F(NewSloppyArguments, 3, 1) \
|
||||
F(NewStrictArguments, 3, 1) \
|
||||
F(InstallLiterals, 1, 1) \
|
||||
F(NewRestParam, 3, 1) \
|
||||
F(NewClosure, 1, 1) \
|
||||
F(NewClosure_Tenured, 1, 1) \
|
||||
@ -568,7 +568,6 @@ namespace internal {
|
||||
F(ArgumentsLength, 0, 1) \
|
||||
F(Arguments, 1, 1)
|
||||
|
||||
|
||||
#define FOR_EACH_INTRINSIC_SIMD(F) \
|
||||
F(IsSimdValue, 1, 1) \
|
||||
F(SimdSameValue, 2, 1) \
|
||||
|
@ -1797,8 +1797,15 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
|
||||
|
||||
// Clear literal boilerplates.
|
||||
if (obj->IsJSFunction()) {
|
||||
FixedArray* literals = JSFunction::cast(obj)->literals();
|
||||
for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
|
||||
LiteralsArray* literals = JSFunction::cast(obj)->literals();
|
||||
for (int i = 0; i < literals->literals_count(); i++) {
|
||||
literals->set_undefined(i);
|
||||
}
|
||||
// TODO(mvstanton): remove this line when the vector moves to the closure.
|
||||
// We need to clear the vector so the serializer doesn't try to serialize
|
||||
// the vector in the startup snapshot and the partial snapshot(s).
|
||||
literals->set_feedback_vector(
|
||||
TypeFeedbackVector::cast(isolate_->heap()->empty_fixed_array()));
|
||||
}
|
||||
|
||||
// Object has not yet been serialized. Serialize it here.
|
||||
|
@ -80,17 +80,17 @@ FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
|
||||
return metadata()->GetKind(slot);
|
||||
}
|
||||
|
||||
|
||||
int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
|
||||
DCHECK(slot.ToInt() < slot_count());
|
||||
// static
|
||||
int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) {
|
||||
return kReservedIndexCount + slot.ToInt();
|
||||
}
|
||||
|
||||
|
||||
// Conversion from an integer index to either a slot or an ic slot. The caller
|
||||
// should know what kind she expects.
|
||||
FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) const {
|
||||
DCHECK(index >= kReservedIndexCount && index < length());
|
||||
// static
|
||||
FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) {
|
||||
DCHECK(index >= kReservedIndexCount);
|
||||
return FeedbackVectorSlot(index - kReservedIndexCount);
|
||||
}
|
||||
|
||||
|
@ -83,6 +83,14 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
|
||||
for (int i = 0; i < slot_count; i++) {
|
||||
metadata->SetKind(FeedbackVectorSlot(i), spec->GetKind(i));
|
||||
}
|
||||
|
||||
// It's important that the TypeFeedbackMetadata have a COW map, since it's
|
||||
// pointed to by both a SharedFunctionInfo and indirectly by closures through
|
||||
// the TypeFeedbackVector. The serializer uses the COW map type to decide
|
||||
// this object belongs in the startup snapshot and not the partial
|
||||
// snapshot(s).
|
||||
metadata->set_map(isolate->heap()->fixed_cow_array_map());
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
@ -102,6 +110,21 @@ bool TypeFeedbackMetadata::SpecDiffersFrom(
|
||||
return false;
|
||||
}
|
||||
|
||||
bool TypeFeedbackMetadata::DiffersFrom(
|
||||
const TypeFeedbackMetadata* other_metadata) const {
|
||||
if (other_metadata->slot_count() != slot_count()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int slots = slot_count();
|
||||
for (int i = 0; i < slots; i++) {
|
||||
FeedbackVectorSlot slot(i);
|
||||
if (GetKind(slot) != other_metadata->GetKind(slot)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
|
||||
switch (kind) {
|
||||
@ -246,15 +269,26 @@ void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
|
||||
SharedFunctionInfo::Iterator iterator(isolate);
|
||||
SharedFunctionInfo* shared;
|
||||
while ((shared = iterator.Next())) {
|
||||
TypeFeedbackVector* vector = shared->feedback_vector();
|
||||
vector->ClearKeyedStoreICs(shared);
|
||||
if (!shared->OptimizedCodeMapIsCleared()) {
|
||||
FixedArray* optimized_code_map = shared->optimized_code_map();
|
||||
int length = optimized_code_map->length();
|
||||
for (int i = SharedFunctionInfo::kEntriesStart; i < length;
|
||||
i += SharedFunctionInfo::kEntryLength) {
|
||||
WeakCell* cell = WeakCell::cast(
|
||||
optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
|
||||
if (cell->value()->IsLiteralsArray()) {
|
||||
TypeFeedbackVector* vector =
|
||||
LiteralsArray::cast(cell->value())->feedback_vector();
|
||||
vector->ClearKeyedStoreICs(shared);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
|
||||
Isolate* isolate = GetIsolate();
|
||||
|
||||
Code* host = shared->code();
|
||||
Object* uninitialized_sentinel =
|
||||
TypeFeedbackVector::RawUninitializedSentinel(isolate);
|
||||
|
@ -136,6 +136,8 @@ class TypeFeedbackMetadata : public FixedArray {
|
||||
|
||||
bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
|
||||
|
||||
bool DiffersFrom(const TypeFeedbackMetadata* other_metadata) const;
|
||||
|
||||
// Returns number of slots in the vector.
|
||||
inline int slot_count() const;
|
||||
|
||||
@ -194,12 +196,12 @@ class TypeFeedbackVector : public FixedArray {
|
||||
inline TypeFeedbackMetadata* metadata() const;
|
||||
|
||||
// Conversion from a slot to an integer index to the underlying array.
|
||||
inline int GetIndex(FeedbackVectorSlot slot) const;
|
||||
static inline int GetIndex(FeedbackVectorSlot slot);
|
||||
static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
|
||||
FeedbackVectorSlot slot);
|
||||
|
||||
// Conversion from an integer index to the underlying array to a slot.
|
||||
inline FeedbackVectorSlot ToSlot(int index) const;
|
||||
static inline FeedbackVectorSlot ToSlot(int index);
|
||||
inline Object* Get(FeedbackVectorSlot slot) const;
|
||||
inline void Set(FeedbackVectorSlot slot, Object* value,
|
||||
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
|
||||
|
@ -913,6 +913,132 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rdx : new target (preserved for callee)
|
||||
// -- rdi : target function (preserved for callee)
|
||||
// -----------------------------------
|
||||
// First lookup code, maybe we don't need to compile!
|
||||
Label gotta_call_runtime;
|
||||
Label maybe_call_runtime;
|
||||
Label try_shared;
|
||||
Label loop_top, loop_bottom;
|
||||
|
||||
Register closure = rdi;
|
||||
Register map = r8;
|
||||
Register index = r9;
|
||||
__ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
||||
__ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
|
||||
__ cmpl(index, Immediate(2));
|
||||
__ j(less, &gotta_call_runtime);
|
||||
|
||||
// Find literals.
|
||||
// r14 : native context
|
||||
// r9 : length / index
|
||||
// r8 : optimized code map
|
||||
// rdx : new target
|
||||
// rdi : closure
|
||||
Register native_context = r14;
|
||||
__ movp(native_context, NativeContextOperand());
|
||||
|
||||
__ bind(&loop_top);
|
||||
// Native context match?
|
||||
Register temp = r11;
|
||||
__ movp(temp, FieldOperand(map, index, times_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousContext()));
|
||||
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
|
||||
__ cmpp(temp, native_context);
|
||||
__ j(not_equal, &loop_bottom);
|
||||
// OSR id set to none?
|
||||
__ movp(temp, FieldOperand(map, index, times_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
|
||||
__ SmiToInteger32(temp, temp);
|
||||
const int bailout_id = BailoutId::None().ToInt();
|
||||
__ cmpl(temp, Immediate(bailout_id));
|
||||
__ j(not_equal, &loop_bottom);
|
||||
// Literals available?
|
||||
__ movp(temp, FieldOperand(map, index, times_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousLiterals()));
|
||||
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(temp, &gotta_call_runtime);
|
||||
|
||||
// Save the literals in the closure.
|
||||
__ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
|
||||
__ movp(rax, index);
|
||||
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, rax,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
|
||||
// Code available?
|
||||
Register entry = rcx;
|
||||
__ movp(entry,
|
||||
FieldOperand(map, index, times_pointer_size,
|
||||
SharedFunctionInfo::OffsetToPreviousCachedCode()));
|
||||
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &maybe_call_runtime);
|
||||
|
||||
// Found literals and code. Get them into the closure and return.
|
||||
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
|
||||
|
||||
Label install_optimized_code_and_tailcall;
|
||||
__ bind(&install_optimized_code_and_tailcall);
|
||||
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
|
||||
|
||||
// Link the closure into the optimized function list.
|
||||
// rcx : code entry (entry)
|
||||
// r14 : native context
|
||||
// rdx : new target
|
||||
// rdi : closure
|
||||
__ movp(rbx,
|
||||
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
||||
__ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
|
||||
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, rax,
|
||||
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
||||
const int function_list_offset =
|
||||
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
||||
__ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
|
||||
closure);
|
||||
// Save closure before the write barrier.
|
||||
__ movp(rbx, closure);
|
||||
__ RecordWriteContextSlot(native_context, function_list_offset, closure, rax,
|
||||
kDontSaveFPRegs);
|
||||
__ movp(closure, rbx);
|
||||
__ jmp(entry);
|
||||
|
||||
__ bind(&loop_bottom);
|
||||
__ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
|
||||
__ cmpl(index, Immediate(1));
|
||||
__ j(greater, &loop_top);
|
||||
|
||||
// We found neither literals nor code.
|
||||
__ jmp(&gotta_call_runtime);
|
||||
|
||||
__ bind(&maybe_call_runtime);
|
||||
|
||||
// Last possibility. Check the context free optimized code map entry.
|
||||
__ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
|
||||
SharedFunctionInfo::kSharedCodeIndex));
|
||||
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(entry, &try_shared);
|
||||
|
||||
// Store code entry in the closure.
|
||||
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
|
||||
__ jmp(&install_optimized_code_and_tailcall);
|
||||
|
||||
__ bind(&try_shared);
|
||||
// Is the full code valid?
|
||||
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
|
||||
__ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
|
||||
__ andl(rbx, Immediate(Code::KindField::kMask));
|
||||
__ shrl(rbx, Immediate(Code::KindField::kShift));
|
||||
__ cmpl(rbx, Immediate(Code::BUILTIN));
|
||||
__ j(equal, &gotta_call_runtime);
|
||||
// Yes, install the full code.
|
||||
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
|
||||
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
|
||||
__ jmp(entry);
|
||||
|
||||
__ bind(&gotta_call_runtime);
|
||||
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
|
||||
GenerateTailCallToReturnedCode(masm);
|
||||
}
|
||||
|
@ -4245,8 +4245,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
|
||||
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
|
||||
movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
|
||||
movp(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
|
||||
movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
|
||||
movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -586,6 +586,10 @@
|
||||
|
||||
['ignition == True and arch == arm64', {
|
||||
'test-decls/CrossScriptReferencesHarmony': [SKIP],
|
||||
|
||||
'test-decls/CrossScriptReferences_Simple2': [SKIP],
|
||||
'test-log/EquivalenceOfLoggingAndTraversal': [SKIP],
|
||||
|
||||
'test-js-arm64-variables/lookup_slots': [SKIP],
|
||||
'test-spaces/SizeOfFirstPageIsLargeEnough': [SKIP],
|
||||
'test-heap/AddInstructionChangesNewSpacePromotion': [SKIP],
|
||||
|
@ -182,6 +182,7 @@ class FunctionTester : public InitializedHandleScope {
|
||||
}
|
||||
CHECK(Compiler::Analyze(info.parse_info()));
|
||||
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
|
||||
JSFunction::EnsureLiterals(function);
|
||||
|
||||
Pipeline pipeline(&info);
|
||||
Handle<Code> code = pipeline.GenerateCode();
|
||||
@ -235,6 +236,7 @@ class FunctionTester : public InitializedHandleScope {
|
||||
Handle<Code>(function->shared()->code()));
|
||||
CHECK(Compiler::Analyze(info.parse_info()));
|
||||
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
|
||||
JSFunction::EnsureLiterals(function);
|
||||
|
||||
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
|
||||
CHECK(!code.is_null());
|
||||
|
@ -214,6 +214,7 @@ TEST(ContextLoadedFromActivation) {
|
||||
i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
|
||||
jsfun->set_code(T.function->code());
|
||||
jsfun->set_shared(T.function->shared());
|
||||
jsfun->set_literals(T.function->literals());
|
||||
CHECK(context->Global()
|
||||
->Set(context, v8_str("foo"), v8::Utils::CallableToLocal(jsfun))
|
||||
.FromJust());
|
||||
@ -238,6 +239,7 @@ TEST(BuiltinLoadedFromActivation) {
|
||||
i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
|
||||
jsfun->set_code(T.function->code());
|
||||
jsfun->set_shared(T.function->shared());
|
||||
jsfun->set_literals(T.function->literals());
|
||||
CHECK(context->Global()
|
||||
->Set(context, v8_str("foo"), v8::Utils::CallableToLocal(jsfun))
|
||||
.FromJust());
|
||||
|
@ -3627,7 +3627,7 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
|
||||
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
FeedbackVectorHelper feedback_helper(feedback_vector);
|
||||
|
||||
int expected_slots = 2;
|
||||
@ -3664,7 +3664,7 @@ static Code* FindFirstIC(Code* code, Code::Kind kind) {
|
||||
static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
|
||||
InlineCacheState desired_state) {
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector>(f->feedback_vector());
|
||||
FeedbackVectorHelper helper(vector);
|
||||
FeedbackVectorSlot slot = helper.slot(slot_index);
|
||||
if (vector->GetKind(slot) == FeedbackVectorSlotKind::LOAD_IC) {
|
||||
@ -3678,15 +3678,6 @@ static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
|
||||
}
|
||||
|
||||
|
||||
static void CheckVectorICCleared(Handle<JSFunction> f, int slot_index) {
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
|
||||
FeedbackVectorSlot slot(slot_index);
|
||||
LoadICNexus nexus(vector, slot);
|
||||
CHECK(IC::IsCleared(&nexus));
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
CcTest::InitializeVM();
|
||||
@ -3701,7 +3692,7 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
|
||||
|
||||
Handle<TypeFeedbackVector> vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> vector(f->feedback_vector());
|
||||
CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
|
||||
|
||||
SimulateIncrementalMarking(CcTest::heap());
|
||||
@ -3711,44 +3702,6 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalMarkingClearsMonomorphicConstructor) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
v8::Local<v8::Value> fun1;
|
||||
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
|
||||
|
||||
{
|
||||
LocalContext env;
|
||||
CompileRun("function fun() { this.x = 1; };");
|
||||
fun1 = env->Global()->Get(env.local(), v8_str("fun")).ToLocalChecked();
|
||||
}
|
||||
|
||||
// Prepare function f that contains a monomorphic constructor for object
|
||||
// originating from a different native context.
|
||||
CHECK(CcTest::global()->Set(ctx, v8_str("fun1"), fun1).FromJust());
|
||||
CompileRun(
|
||||
"function fun() { this.x = 1; };"
|
||||
"function f(o) { return new o(); } f(fun1); f(fun1);");
|
||||
Handle<JSFunction> f = Handle<JSFunction>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
|
||||
|
||||
|
||||
Handle<TypeFeedbackVector> vector(f->shared()->feedback_vector());
|
||||
CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
|
||||
|
||||
// Fire context dispose notification.
|
||||
CcTest::isolate()->ContextDisposedNotification();
|
||||
SimulateIncrementalMarking(CcTest::heap());
|
||||
CcTest::heap()->CollectAllGarbage();
|
||||
|
||||
CHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(isolate),
|
||||
vector->Get(FeedbackVectorSlot(0)));
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalMarkingPreservesMonomorphicIC) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
CcTest::InitializeVM();
|
||||
@ -3771,38 +3724,6 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalMarkingClearsMonomorphicIC) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
v8::Local<v8::Value> obj1;
|
||||
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
|
||||
|
||||
{
|
||||
LocalContext env;
|
||||
CompileRun("function fun() { this.x = 1; }; var obj = new fun();");
|
||||
obj1 = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
|
||||
}
|
||||
|
||||
// Prepare function f that contains a monomorphic IC for object
|
||||
// originating from a different native context.
|
||||
CHECK(CcTest::global()->Set(ctx, v8_str("obj1"), obj1).FromJust());
|
||||
CompileRun("function f(o) { return o.x; } f(obj1); f(obj1);");
|
||||
Handle<JSFunction> f = Handle<JSFunction>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
|
||||
|
||||
CheckVectorIC(f, 0, MONOMORPHIC);
|
||||
|
||||
// Fire context dispose notification.
|
||||
CcTest::isolate()->ContextDisposedNotification();
|
||||
SimulateIncrementalMarking(CcTest::heap());
|
||||
CcTest::heap()->CollectAllGarbage();
|
||||
|
||||
CheckVectorICCleared(f, 0);
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalMarkingPreservesPolymorphicIC) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
CcTest::InitializeVM();
|
||||
@ -3840,8 +3761,7 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
|
||||
CheckVectorIC(f, 0, POLYMORPHIC);
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalMarkingClearsPolymorphicIC) {
|
||||
TEST(ContextDisposeDoesntClearPolymorphicIC) {
|
||||
if (i::FLAG_always_opt) return;
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
@ -3876,7 +3796,7 @@ TEST(IncrementalMarkingClearsPolymorphicIC) {
|
||||
SimulateIncrementalMarking(CcTest::heap());
|
||||
CcTest::heap()->CollectAllGarbage();
|
||||
|
||||
CheckVectorICCleared(f, 0);
|
||||
CheckVectorIC(f, 0, POLYMORPHIC);
|
||||
}
|
||||
|
||||
|
||||
@ -4292,7 +4212,8 @@ TEST(Regress513507) {
|
||||
if (!code->is_optimized_code()) return;
|
||||
}
|
||||
|
||||
Handle<TypeFeedbackVector> vector = handle(shared->feedback_vector());
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
|
||||
Handle<LiteralsArray> lit =
|
||||
LiteralsArray::New(isolate, vector, shared->num_literals(), TENURED);
|
||||
Handle<Context> context(isolate->context());
|
||||
@ -4349,7 +4270,8 @@ TEST(Regress514122) {
|
||||
if (!code->is_optimized_code()) return;
|
||||
}
|
||||
|
||||
Handle<TypeFeedbackVector> vector = handle(shared->feedback_vector());
|
||||
Handle<TypeFeedbackVector> vector =
|
||||
TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
|
||||
Handle<LiteralsArray> lit =
|
||||
LiteralsArray::New(isolate, vector, shared->num_literals(), TENURED);
|
||||
Handle<Context> context(isolate->context());
|
||||
@ -5053,7 +4975,7 @@ TEST(WeakFunctionInConstructor) {
|
||||
// cleared. Now, verify that one additional call with a new function
|
||||
// allows monomorphicity.
|
||||
Handle<TypeFeedbackVector> feedback_vector = Handle<TypeFeedbackVector>(
|
||||
createObj->shared()->feedback_vector(), CcTest::i_isolate());
|
||||
createObj->feedback_vector(), CcTest::i_isolate());
|
||||
for (int i = 0; i < 20; i++) {
|
||||
Object* slot_value = feedback_vector->Get(FeedbackVectorSlot(0));
|
||||
CHECK(slot_value->IsWeakCell());
|
||||
@ -5254,12 +5176,11 @@ Handle<JSFunction> GetFunctionByName(Isolate* isolate, const char* name) {
|
||||
return Handle<JSFunction>::cast(obj);
|
||||
}
|
||||
|
||||
|
||||
void CheckIC(Code* code, Code::Kind kind, SharedFunctionInfo* shared,
|
||||
int slot_index, InlineCacheState state) {
|
||||
void CheckIC(Handle<JSFunction> function, Code::Kind kind, int slot_index,
|
||||
InlineCacheState state) {
|
||||
if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
|
||||
kind == Code::CALL_IC) {
|
||||
TypeFeedbackVector* vector = shared->feedback_vector();
|
||||
TypeFeedbackVector* vector = function->feedback_vector();
|
||||
FeedbackVectorSlot slot(slot_index);
|
||||
if (kind == Code::LOAD_IC) {
|
||||
LoadICNexus nexus(vector, slot);
|
||||
@ -5272,7 +5193,7 @@ void CheckIC(Code* code, Code::Kind kind, SharedFunctionInfo* shared,
|
||||
CHECK_EQ(nexus.StateFromFeedback(), state);
|
||||
}
|
||||
} else {
|
||||
Code* ic = FindFirstIC(code, kind);
|
||||
Code* ic = FindFirstIC(function->code(), kind);
|
||||
CHECK(ic->is_inline_cache_stub());
|
||||
CHECK(ic->ic_state() == state);
|
||||
}
|
||||
@ -5303,12 +5224,12 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
|
||||
CompileRun("(testIC())");
|
||||
}
|
||||
heap->CollectAllGarbage();
|
||||
CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, MONOMORPHIC);
|
||||
CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
|
||||
{
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
CompileRun("(testIC())");
|
||||
}
|
||||
CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, MONOMORPHIC);
|
||||
CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
|
||||
}
|
||||
|
||||
|
||||
@ -5339,12 +5260,12 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
|
||||
CompileRun("(testIC())");
|
||||
}
|
||||
heap->CollectAllGarbage();
|
||||
CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, POLYMORPHIC);
|
||||
CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
|
||||
{
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
CompileRun("(testIC())");
|
||||
}
|
||||
CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, POLYMORPHIC);
|
||||
CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
|
||||
}
|
||||
|
||||
|
||||
|
@ -148,7 +148,7 @@ class InterpreterTester {
|
||||
function->shared()->set_function_data(*bytecode_.ToHandleChecked());
|
||||
}
|
||||
if (!feedback_vector_.is_null()) {
|
||||
function->shared()->set_feedback_vector(
|
||||
function->literals()->set_feedback_vector(
|
||||
*feedback_vector_.ToHandleChecked());
|
||||
}
|
||||
return function;
|
||||
|
@ -306,7 +306,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
|
||||
// We shouldn't have deoptimization support. We want to recompile and
|
||||
// verify that our feedback vector preserves information.
|
||||
CHECK(!f->shared()->has_deoptimization_support());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
|
||||
// Verify that we gathered feedback.
|
||||
CHECK(!feedback_vector->is_empty());
|
||||
@ -321,7 +321,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
|
||||
// of the full code.
|
||||
CHECK(f->IsOptimized());
|
||||
CHECK(f->shared()->has_deoptimization_support());
|
||||
object = f->shared()->feedback_vector()->Get(slot_for_a);
|
||||
object = f->feedback_vector()->Get(slot_for_a);
|
||||
CHECK(object->IsWeakCell() &&
|
||||
WeakCell::cast(object)->value()->IsJSFunction());
|
||||
}
|
||||
@ -352,18 +352,16 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
|
||||
|
||||
// Not compiled, and so no feedback vector allocated yet.
|
||||
CHECK(!f->shared()->is_compiled());
|
||||
CHECK(f->shared()->feedback_vector()->is_empty());
|
||||
CHECK(f->feedback_vector()->is_empty());
|
||||
|
||||
CompileRun("morphing_call();");
|
||||
|
||||
// Now a feedback vector is allocated.
|
||||
CHECK(f->shared()->is_compiled());
|
||||
CHECK(!f->shared()->feedback_vector()->is_empty());
|
||||
CHECK(!f->feedback_vector()->is_empty());
|
||||
}
|
||||
|
||||
|
||||
// Test that optimized code for different closures is actually shared
|
||||
// immediately by the FastNewClosureStub when run in the same context.
|
||||
// Test that optimized code for different closures is actually shared.
|
||||
TEST(OptimizedCodeSharing1) {
|
||||
FLAG_stress_compaction = false;
|
||||
FLAG_allow_natives_syntax = true;
|
||||
@ -382,8 +380,8 @@ TEST(OptimizedCodeSharing1) {
|
||||
"%DebugPrint(closure0());"
|
||||
"%OptimizeFunctionOnNextCall(closure0);"
|
||||
"%DebugPrint(closure0());"
|
||||
"var closure1 = MakeClosure();"
|
||||
"var closure2 = MakeClosure();");
|
||||
"var closure1 = MakeClosure(); closure1();"
|
||||
"var closure2 = MakeClosure(); closure2();");
|
||||
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
env->Global()
|
||||
@ -400,9 +398,7 @@ TEST(OptimizedCodeSharing1) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Test that optimized code for different closures is actually shared
|
||||
// immediately by the FastNewClosureStub when run different contexts.
|
||||
// Test that optimized code for different closures is actually shared.
|
||||
TEST(OptimizedCodeSharing2) {
|
||||
if (FLAG_stress_compaction) return;
|
||||
FLAG_allow_natives_syntax = true;
|
||||
@ -453,8 +449,8 @@ TEST(OptimizedCodeSharing2) {
|
||||
"%DebugPrint(closure0());"
|
||||
"%OptimizeFunctionOnNextCall(closure0);"
|
||||
"%DebugPrint(closure0());"
|
||||
"var closure1 = MakeClosure();"
|
||||
"var closure2 = MakeClosure();");
|
||||
"var closure1 = MakeClosure(); closure1();"
|
||||
"var closure2 = MakeClosure(); closure2();");
|
||||
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
env->Global()
|
||||
@ -472,9 +468,7 @@ TEST(OptimizedCodeSharing2) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Test that optimized code for different closures is actually shared
|
||||
// immediately by the FastNewClosureStub without context-dependent entries.
|
||||
// Test that optimized code for different closures is actually shared.
|
||||
TEST(OptimizedCodeSharing3) {
|
||||
if (FLAG_stress_compaction) return;
|
||||
FLAG_allow_natives_syntax = true;
|
||||
@ -528,8 +522,8 @@ TEST(OptimizedCodeSharing3) {
|
||||
"%DebugPrint(closure0());"
|
||||
"%OptimizeFunctionOnNextCall(closure0);"
|
||||
"%DebugPrint(closure0());"
|
||||
"var closure1 = MakeClosure();"
|
||||
"var closure2 = MakeClosure();");
|
||||
"var closure1 = MakeClosure(); closure1();"
|
||||
"var closure2 = MakeClosure(); closure2();");
|
||||
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
|
||||
env->Global()
|
||||
|
@ -208,7 +208,7 @@ TEST(VectorCallICStates) {
|
||||
Handle<JSFunction> f = GetFunction("f");
|
||||
// There should be one IC.
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
|
||||
Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
|
||||
FeedbackVectorSlot slot(0);
|
||||
CallICNexus nexus(feedback_vector, slot);
|
||||
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
|
||||
@ -249,7 +249,7 @@ TEST(VectorLoadICStates) {
|
||||
Handle<JSFunction> f = GetFunction("f");
|
||||
// There should be one IC.
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
|
||||
Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
|
||||
FeedbackVectorSlot slot(0);
|
||||
LoadICNexus nexus(feedback_vector, slot);
|
||||
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
|
||||
@ -308,7 +308,7 @@ TEST(VectorLoadICSlotSharing) {
|
||||
Handle<JSFunction> f = GetFunction("f");
|
||||
// There should be one IC slot.
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
|
||||
Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(1, helper.slot_count());
|
||||
FeedbackVectorSlot slot(0);
|
||||
@ -332,7 +332,7 @@ TEST(VectorLoadICOnSmi) {
|
||||
Handle<JSFunction> f = GetFunction("f");
|
||||
// There should be one IC.
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
|
||||
Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
|
||||
FeedbackVectorSlot slot(0);
|
||||
LoadICNexus nexus(feedback_vector, slot);
|
||||
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
|
||||
@ -397,7 +397,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
|
||||
|
||||
// There should be two LOAD_ICs, one for a and one for y at the end.
|
||||
Handle<TypeFeedbackVector> feedback_vector =
|
||||
handle(f->shared()->feedback_vector(), isolate);
|
||||
handle(f->feedback_vector(), isolate);
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(4, helper.slot_count());
|
||||
CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::STORE_IC);
|
||||
@ -416,7 +416,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
|
||||
Handle<JSFunction> f = GetFunction("testprop");
|
||||
|
||||
// There should be one LOAD_IC, for the load of a.
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(2, helper.slot_count());
|
||||
}
|
||||
@ -433,7 +433,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
|
||||
Handle<JSFunction> f = GetFunction("testpropfunc");
|
||||
|
||||
// There should be 2 LOAD_ICs and 2 CALL_ICs.
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(5, helper.slot_count());
|
||||
CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::CALL_IC);
|
||||
@ -455,7 +455,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
|
||||
|
||||
// There should be 1 LOAD_ICs for the load of a, and one KEYED_LOAD_IC for
|
||||
// the load of x[0] in the return statement.
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(3, helper.slot_count());
|
||||
CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
|
||||
@ -474,7 +474,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
|
||||
Handle<JSFunction> f = GetFunction("testcompound");
|
||||
|
||||
// There should be 3 LOAD_ICs, for load of a and load of x.old and x.young.
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(6, helper.slot_count());
|
||||
CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
|
||||
@ -504,7 +504,7 @@ TEST(VectorStoreICBasic) {
|
||||
"f(a);");
|
||||
Handle<JSFunction> f = GetFunction("f");
|
||||
// There should be one IC slot.
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
|
||||
Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
|
||||
FeedbackVectorHelper helper(feedback_vector);
|
||||
CHECK_EQ(1, helper.slot_count());
|
||||
FeedbackVectorSlot slot(0);
|
||||
|
@ -767,6 +767,9 @@
|
||||
'throw-and-catch-function': [FAIL],
|
||||
'with-leave': [FAIL],
|
||||
|
||||
# TODO(mstarzinger,4674): Support exception handlers in BytecodeGraphBuilder.
|
||||
'regress/regress-568765': [SKIP],
|
||||
|
||||
'arguments-load-across-eval': [SKIP],
|
||||
'arguments-read-and-assignment': [SKIP],
|
||||
'array-constructor': [PASS, SLOW],
|
||||
|
@ -687,16 +687,14 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
|
||||
IsParameter(Linkage::kInterpreterRegisterFileParameter),
|
||||
IsIntPtrConstant(
|
||||
InterpreterFrameConstants::kFunctionFromRegisterPointer));
|
||||
Matcher<Node*> load_shared_function_info_matcher =
|
||||
m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
|
||||
IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
|
||||
kHeapObjectTag));
|
||||
Matcher<Node*> load_literals_matcher = m.IsLoad(
|
||||
MachineType::AnyTagged(), load_function_matcher,
|
||||
IsIntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag));
|
||||
|
||||
EXPECT_THAT(
|
||||
feedback_vector,
|
||||
m.IsLoad(MachineType::AnyTagged(), load_shared_function_info_matcher,
|
||||
IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
|
||||
kHeapObjectTag)));
|
||||
EXPECT_THAT(feedback_vector,
|
||||
m.IsLoad(MachineType::AnyTagged(), load_literals_matcher,
|
||||
IsIntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
|
||||
kHeapObjectTag)));
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user