Visit the Optimized Code Map on first call rather than closure creation.

This is useful for escape analysis, and helps upcoming changes to
type feedback gathering.

BUG=

Review URL: https://codereview.chromium.org/1670143002

Cr-Commit-Position: refs/heads/master@{#35395}
This commit is contained in:
mvstanton 2016-04-11 10:27:11 -07:00 committed by Commit bot
parent bf50532928
commit 9336f4cc6d
15 changed files with 904 additions and 229 deletions

View File

@ -1228,6 +1228,159 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
// -- r3 : new target (preserved for callee)
// -- r1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register argument_count = r0;
Register closure = r1;
Register new_target = r3;
__ push(argument_count);
__ push(new_target);
__ push(closure);
Register map = argument_count;
Register index = r2;
__ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Operand(Smi::FromInt(2)));
__ b(lt, &gotta_call_runtime);
// Find literals.
// r3 : native context
// r2 : length / index
// r0 : optimized code map
// stack[0] : new target
// stack[4] : closure
Register native_context = r3;
__ ldr(native_context, NativeContextMemOperand());
__ bind(&loop_top);
Register temp = r1;
Register array_pointer = r5;
// Does the native context match?
__ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
__ ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousContext));
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ b(ne, &loop_bottom);
// OSR id set to none?
__ ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousOsrAstId));
const int bailout_id = BailoutId::None().ToInt();
__ cmp(temp, Operand(Smi::FromInt(bailout_id)));
__ b(ne, &loop_bottom);
// Literals available?
__ ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ ldr(r4, MemOperand(sp, 0));
__ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
__ push(index);
__ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(index);
// Code available?
Register entry = r4;
__ ldr(entry,
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
// Link the closure into the optimized function list.
// r4 : code entry
// r3 : native context
// r1 : closure
__ ldr(r5,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ str(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(r5, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, r5);
__ pop(new_target);
__ pop(argument_count);
__ Jump(entry);
__ bind(&loop_bottom);
__ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ cmp(index, Operand(Smi::FromInt(1)));
__ b(gt, &loop_top);
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
__ and_(r5, r5, Operand(Code::KindField::kMask));
__ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
__ cmp(r5, Operand(Code::BUILTIN));
__ b(eq, &gotta_call_runtime_no_stack);
// Yes, install the full code.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
__ Jump(entry);
__ bind(&gotta_call_runtime);
__ pop(closure);
__ pop(new_target);
__ pop(argument_count);
__ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}

View File

@ -1177,6 +1177,138 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x3 : new target (preserved for callee)
// -- x1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register closure = x1;
Register new_target = x3;
Register map = x13;
Register index = x2;
__ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
__ Cmp(index, Operand(2));
__ B(lt, &gotta_call_runtime);
// Find literals.
// x3 : native context
// x2 : length / index
// x13 : optimized code map
// stack[0] : new target
// stack[4] : closure
Register native_context = x4;
__ Ldr(native_context, NativeContextMemOperand());
__ Bind(&loop_top);
Register temp = x5;
Register array_pointer = x6;
// Does the native context match?
__ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2));
__ Ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousContext));
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Cmp(temp, native_context);
__ B(ne, &loop_bottom);
// OSR id set to none?
__ Ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousOsrAstId));
const int bailout_id = BailoutId::None().ToInt();
__ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
__ B(ne, &loop_bottom);
// Literals available?
__ Ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Code available?
Register entry = x7;
__ Ldr(entry,
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ Bind(&install_optimized_code_and_tailcall);
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
// Link the closure into the optimized function list.
// x7 : code entry
// x4 : native context
// x1 : closure
__ Ldr(x8,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ Str(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Mov(x5, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, x5, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Jump(entry);
__ Bind(&loop_bottom);
__ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength));
__ Cmp(index, Operand(1));
__ B(gt, &loop_top);
// We found neither literals nor code.
__ B(&gotta_call_runtime);
__ Bind(&maybe_call_runtime);
// Last possibility. Check the context free optimized code map entry.
__ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ B(&install_optimized_code_and_tailcall);
__ Bind(&try_shared);
// Is the full code valid?
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
__ and_(x5, x5, Operand(Code::KindField::kMask));
__ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
__ Cmp(x5, Operand(Code::BUILTIN));
__ B(eq, &gotta_call_runtime);
// Yes, install the full code.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
__ Jump(entry);
__ Bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}

View File

@ -97,26 +97,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
// BuildCheckAndInstallOptimizedCode emits code to install the optimized
// function found in the optimized code map at map_index in js_function, if
// the function at map_index matches the given native_context. Builder is
// left in the "Then()" state after the install.
void BuildCheckAndInstallOptimizedCode(HValue* js_function,
HValue* native_context,
IfBuilder* builder,
HValue* optimized_map,
HValue* map_index);
void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
HValue* code_object, HValue* literals);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
HValue* iterator,
int field_offset);
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
HValue* shared_info,
HValue* native_context);
HValue* BuildToString(HValue* input, bool convert);
HValue* BuildToPrimitive(HValue* input, HValue* input_map);
@ -2013,182 +1993,6 @@ HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
Handle<Code> ToObjectStub::GenerateCode() { return DoGenerateCode(this); }
void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
IfBuilder* builder,
HValue* optimized_map,
HValue* map_index) {
HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
HValue* context_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
context_slot = Add<HLoadNamedField>(context_slot, nullptr,
HObjectAccess::ForWeakCellValue());
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
HValue* code_object = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
code_object = Add<HLoadNamedField>(code_object, nullptr,
HObjectAccess::ForWeakCellValue());
builder->If<HCompareObjectEqAndBranch>(native_context,
context_slot);
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
builder->And();
builder->IfNot<HCompareObjectEqAndBranch>(code_object,
graph()->GetConstant0());
builder->Then();
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
literals = Add<HLoadNamedField>(literals, nullptr,
HObjectAccess::ForWeakCellValue());
IfBuilder maybe_deopt(this);
maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
maybe_deopt.End();
BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
// The builder continues in the "then" after this function.
}
void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(HValue* js_function,
HValue* native_context,
HValue* code_object,
HValue* literals) {
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
Add<HStoreCodeEntry>(js_function, code_object);
Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
literals);
// Now link a function into a list of optimized functions.
HValue* optimized_functions_list = Add<HLoadNamedField>(
native_context, nullptr,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
optimized_functions_list);
// This store is the only one that should have a write barrier.
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
}
void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
HValue* shared_info) {
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
HValue* code_object = Add<HLoadNamedField>(shared_info, nullptr,
HObjectAccess::ForCodeOffset());
Add<HStoreCodeEntry>(js_function, code_object);
}
HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
HValue* optimized_map,
HValue* iterator,
int field_offset) {
// By making sure to express these loads in the form [<hvalue> + constant]
// the keyed load can be hoisted.
DCHECK(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
HValue* field_slot = iterator;
if (field_offset > 0) {
HValue* field_offset_value = Add<HConstant>(field_offset);
field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
}
HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
nullptr, nullptr, FAST_ELEMENTS);
return field_entry;
}
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* js_function,
HValue* shared_info,
HValue* native_context) {
Counters* counters = isolate()->counters();
Factory* factory = isolate()->factory();
IfBuilder is_optimized(this);
HInstruction* optimized_map = Add<HLoadNamedField>(
shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
HValue* null_constant = Add<HConstant>(0);
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
is_optimized.Then();
{
BuildInstallCode(js_function, shared_info);
}
is_optimized.Else();
{
AddIncrementCounter(counters->fast_new_closure_try_optimized());
// The {optimized_map} points to fixed array of 4-element entries:
// (native context, optimized code, literals, ast-id).
// Iterate through the {optimized_map} backwards. After the loop, if no
// matching optimized code was found, install unoptimized code.
// for(i = map.length() - SharedFunctionInfo::kEntryLength;
// i >= SharedFunctionInfo::kEntriesStart;
// i -= SharedFunctionInfo::kEntryLength) { ... }
HValue* first_entry_index =
Add<HConstant>(SharedFunctionInfo::kEntriesStart);
HValue* shared_function_entry_length =
Add<HConstant>(SharedFunctionInfo::kEntryLength);
LoopBuilder loop_builder(this, context(), LoopBuilder::kPostDecrement,
shared_function_entry_length);
HValue* array_length = Add<HLoadNamedField>(
optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
HValue* start_pos =
AddUncasted<HSub>(array_length, shared_function_entry_length);
HValue* slot_iterator =
loop_builder.BeginBody(start_pos, first_entry_index, Token::GTE);
{
IfBuilder done_check(this);
BuildCheckAndInstallOptimizedCode(js_function, native_context,
&done_check, optimized_map,
slot_iterator);
// Fall out of the loop
loop_builder.Break();
}
loop_builder.EndBody();
// If {slot_iterator} is less than the first entry index, then we failed to
// find a context-dependent code and try context-independent code next.
IfBuilder no_optimized_code_check(this);
no_optimized_code_check.If<HCompareNumericAndBranch>(
slot_iterator, first_entry_index, Token::LT);
no_optimized_code_check.Then();
{
IfBuilder shared_code_check(this);
HValue* shared_code =
Add<HLoadNamedField>(optimized_map, nullptr,
HObjectAccess::ForOptimizedCodeMapSharedCode());
shared_code = Add<HLoadNamedField>(shared_code, nullptr,
HObjectAccess::ForWeakCellValue());
shared_code_check.IfNot<HCompareObjectEqAndBranch>(
shared_code, graph()->GetConstant0());
shared_code_check.Then();
{
// Store the context-independent optimized code.
HValue* literals = Add<HConstant>(factory->empty_fixed_array());
BuildInstallOptimizedCode(js_function, native_context, shared_code,
literals);
}
shared_code_check.Else();
{
// Store the unoptimized code.
BuildInstallCode(js_function, shared_info);
}
}
}
}
template<>
HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Counters* counters = isolate()->counters();
@ -2228,10 +2032,13 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
context());
// Initialize the code pointer in the function to be the one found in the
// shared function info object. But first check if there is an optimized
// version for our context.
BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
Handle<Code> lazy_builtin(
isolate()->builtins()->builtin(Builtins::kCompileLazy));
HConstant* lazy = Add<HConstant>(lazy_builtin);
Add<HStoreCodeEntry>(js_function, lazy);
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
return js_function;
}

View File

@ -3688,11 +3688,7 @@ ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
return VectorStoreTransitionDescriptor(isolate());
}
void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(Runtime::FunctionForId(Runtime::kNewClosure)->entry);
}
void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}

View File

@ -891,6 +891,16 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
TRACE_EVENT0("v8", "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
if (FLAG_turbo_cache_shared_code) {
CodeAndLiterals result;
result = function->shared()->SearchOptimizedCodeMap(
*isolate->native_context(), BailoutId::None());
if (result.code != nullptr) {
return Handle<Code>(result.code);
}
}
// If the debugger is active, do not compile with turbofan unless we can
// deopt from turbofan code.
if (FLAG_turbo_asm && function->shared()->asm_function() &&
@ -1121,6 +1131,7 @@ bool Compiler::ParseAndAnalyze(ParseInfo* info) {
bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
MaybeHandle<Code> maybe_code = GetLazyCode(function);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {

View File

@ -842,6 +842,154 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
// -- edx : new target (preserved for callee)
// -- edi : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register closure = edi;
Register new_target = edx;
Register argument_count = eax;
__ push(argument_count);
__ push(new_target);
__ push(closure);
Register map = argument_count;
Register index = ebx;
__ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Immediate(Smi::FromInt(2)));
__ j(less, &gotta_call_runtime);
// Find literals.
// edx : native context
// ebx : length / index
// eax : optimized code map
// stack[0] : new target
// stack[4] : closure
Register native_context = edx;
__ mov(native_context, NativeContextOperand());
__ bind(&loop_top);
Register temp = edi;
// Does the native context match?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousContext));
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
// OSR id set to none?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousOsrAstId));
const int bailout_id = BailoutId::None().ToInt();
__ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
__ j(not_equal, &loop_bottom);
// Literals available?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ mov(ecx, Operand(esp, 0));
__ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
__ push(index);
__ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ pop(index);
// Code available?
Register entry = ecx;
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, eax);
// Link the closure into the optimized function list.
// ecx : code entry
// edx : native context
// edi : closure
__ mov(ebx,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ mov(ebx, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
kDontSaveFPRegs);
__ mov(closure, ebx);
__ pop(new_target);
__ pop(argument_count);
__ jmp(entry);
__ bind(&loop_bottom);
__ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ cmp(index, Immediate(Smi::FromInt(1)));
__ j(greater, &loop_top);
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
__ and_(ebx, Code::KindField::kMask);
__ shr(ebx, Code::KindField::kShift);
__ cmp(ebx, Immediate(Code::BUILTIN));
__ j(equal, &gotta_call_runtime_no_stack);
// Yes, install the full code.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, ebx);
__ jmp(entry);
__ bind(&gotta_call_runtime);
__ pop(closure);
__ pop(new_target);
__ pop(argument_count);
__ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}

View File

@ -1226,6 +1226,154 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register argument_count = a0;
Register closure = a1;
Register new_target = a3;
__ push(argument_count);
__ push(new_target);
__ push(closure);
Register map = a0;
Register index = a2;
__ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
// Find literals.
// a3 : native context
// a2 : length / index
// a0 : optimized code map
// stack[0] : new target
// stack[4] : closure
Register native_context = a3;
__ lw(native_context, NativeContextMemOperand());
__ bind(&loop_top);
Register temp = a1;
Register array_pointer = t1;
// Does the native context match?
__ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
__ Addu(array_pointer, map, Operand(at));
__ lw(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousContext));
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
// OSR id set to none?
__ lw(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousOsrAstId));
const int bailout_id = BailoutId::None().ToInt();
__ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
// Literals available?
__ lw(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ lw(t0, MemOperand(sp, 0));
__ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
__ push(index);
__ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(index);
// Code available?
Register entry = t0;
__ lw(entry,
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
// Link the closure into the optimized function list.
// t0 : code entry
// a3 : native context
// a1 : closure
__ lw(t1,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ sw(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(t1, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, t1);
__ pop(new_target);
__ pop(argument_count);
__ Jump(entry);
__ bind(&loop_bottom);
__ Subu(index, index,
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
__ And(t1, t1, Operand(Code::KindField::kMask));
__ srl(t1, t1, Code::KindField::kShift);
__ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
// Yes, install the full code.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
__ Jump(entry);
__ bind(&gotta_call_runtime);
__ pop(closure);
__ pop(new_target);
__ pop(argument_count);
__ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}

View File

@ -1215,6 +1215,154 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register argument_count = a0;
Register closure = a1;
Register new_target = a3;
__ push(argument_count);
__ push(new_target);
__ push(closure);
Register map = a0;
Register index = a2;
__ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
// Find literals.
// a3 : native context
// a2 : length / index
// a0 : optimized code map
// stack[0] : new target
// stack[4] : closure
Register native_context = a3;
__ ld(native_context, NativeContextMemOperand());
__ bind(&loop_top);
Register temp = a1;
Register array_pointer = a5;
// Does the native context match?
__ SmiScale(at, index, kPointerSizeLog2);
__ Daddu(array_pointer, map, Operand(at));
__ ld(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousContext));
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
// OSR id set to none?
__ ld(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousOsrAstId));
const int bailout_id = BailoutId::None().ToInt();
__ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
// Literals available?
__ ld(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ ld(a4, MemOperand(sp, 0));
__ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
__ push(index);
__ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(index);
// Code available?
Register entry = a4;
__ ld(entry,
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
// Link the closure into the optimized function list.
// a4 : code entry
// a3 : native context
// a1 : closure
__ ld(a5,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ sd(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(a5, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, a5);
__ pop(new_target);
__ pop(argument_count);
__ Jump(entry);
__ bind(&loop_bottom);
__ Dsubu(index, index,
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?
__ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ ld(a5, FieldMemOperand(entry, Code::kFlagsOffset));
__ And(a5, a5, Operand(Code::KindField::kMask));
__ dsrl(a5, a5, Code::KindField::kShift);
__ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
// Yes, install the full code.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
__ Jump(entry);
__ bind(&gotta_call_runtime);
__ pop(closure);
__ pop(new_target);
__ pop(argument_count);
__ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@ -1224,7 +1372,6 @@ void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}

View File

@ -5746,7 +5746,6 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
set_compiler_hints(hints);
}
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
kNeedsHomeObject)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)

View File

@ -6688,6 +6688,18 @@ class SharedFunctionInfo: public HeapObject {
static const int kNotFound = -1;
// Helpers for assembly code that does a backwards walk of the optimized code
// map.
static const int kOffsetToPreviousContext =
FixedArray::kHeaderSize + kPointerSize * (kContextOffset - kEntryLength);
static const int kOffsetToPreviousCachedCode =
FixedArray::kHeaderSize +
kPointerSize * (kCachedCodeOffset - kEntryLength);
static const int kOffsetToPreviousLiterals =
FixedArray::kHeaderSize + kPointerSize * (kLiteralsOffset - kEntryLength);
static const int kOffsetToPreviousOsrAstId =
FixedArray::kHeaderSize + kPointerSize * (kOsrAstIdOffset - kEntryLength);
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)

View File

@ -904,6 +904,134 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register closure = rdi;
Register map = r8;
Register index = r9;
__ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
__ cmpl(index, Immediate(2));
__ j(less, &gotta_call_runtime);
// Find literals.
// r14 : native context
// r9 : length / index
// r8 : optimized code map
// rdx : new target
// rdi : closure
Register native_context = r14;
__ movp(native_context, NativeContextOperand());
__ bind(&loop_top);
// Native context match?
Register temp = r11;
__ movp(temp, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousContext));
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmpp(temp, native_context);
__ j(not_equal, &loop_bottom);
// OSR id set to none?
__ movp(temp, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousOsrAstId));
__ SmiToInteger32(temp, temp);
const int bailout_id = BailoutId::None().ToInt();
__ cmpl(temp, Immediate(bailout_id));
__ j(not_equal, &loop_bottom);
// Literals available?
__ movp(temp, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
__ movp(r15, index);
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Code available?
Register entry = rcx;
__ movp(entry, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
// Link the closure into the optimized function list.
// rcx : code entry (entry)
// r14 : native context
// rdx : new target
// rdi : closure
__ movp(rbx,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ movp(rbx, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
kDontSaveFPRegs);
__ movp(closure, rbx);
__ jmp(entry);
__ bind(&loop_bottom);
__ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
__ cmpl(index, Immediate(1));
__ j(greater, &loop_top);
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
// Last possibility. Check the context free optimized code map entry.
__ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
// Is the full code valid?
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
__ andl(rbx, Immediate(Code::KindField::kMask));
__ shrl(rbx, Immediate(Code::KindField::kShift));
__ cmpl(rbx, Immediate(Code::BUILTIN));
__ j(equal, &gotta_call_runtime);
// Yes, install the full code.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
__ jmp(entry);
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}

View File

@ -489,7 +489,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
// easier.
DCHECK(js_function.is(rdi));
DCHECK(code_entry.is(rcx));
DCHECK(scratch.is(rax));
DCHECK(scratch.is(r15));
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
@ -537,13 +537,13 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
movp(arg_reg_1, js_function); // rcx gets rdi.
movp(arg_reg_2, dst); // rdx gets rax.
movp(arg_reg_2, dst); // rdx gets r15.
} else {
// AMD64 calling convention.
DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
// rdi is already loaded with js_function.
movp(arg_reg_2, dst); // rsi gets rax.
movp(arg_reg_2, dst); // rsi gets r15.
}
Move(arg_reg_3, ExternalReference::isolate_address(isolate()));

View File

@ -528,6 +528,9 @@
# code.
'test-api/TurboAsmDisablesNeuter': [FAIL],
# TODO(mvstanton,4900): CHECK(!g_function->is_compiled());
'test-heap/TestUseOfIncrementalBarrierOnCompileLazy': [FAIL],
# TODO(rmcilroy,4837): We don't set a LoadContextSlot for a function as
# immutable in the BytecodeGraphBuilder, therefore no inlining happens.
'test-run-inlining/InlineLoopGuardedTwice': [FAIL],

View File

@ -1546,10 +1546,7 @@ TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
Handle<Object> g_value =
Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked();
Handle<JSFunction> g_function = Handle<JSFunction>::cast(g_value);
// TODO(mvstanton): change to check that g is *not* compiled when optimized
// cache
// map lookup moves to the compile lazy builtin.
CHECK(g_function->is_compiled());
CHECK(!g_function->is_compiled());
SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(f); f();");

View File

@ -364,9 +364,7 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
CHECK(!f->shared()->feedback_vector()->is_empty());
}
// Test that optimized code for different closures is actually shared
// immediately by the FastNewClosureStub when run in the same context.
// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing1) {
FLAG_stress_compaction = false;
FLAG_allow_natives_syntax = true;
@ -385,8 +383,8 @@ TEST(OptimizedCodeSharing1) {
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
"var closure1 = MakeClosure();"
"var closure2 = MakeClosure();");
"var closure1 = MakeClosure(); closure1();"
"var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
@ -403,9 +401,7 @@ TEST(OptimizedCodeSharing1) {
}
}
// Test that optimized code for different closures is actually shared
// immediately by the FastNewClosureStub when run different contexts.
// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing2) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
@ -456,8 +452,8 @@ TEST(OptimizedCodeSharing2) {
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
"var closure1 = MakeClosure();"
"var closure2 = MakeClosure();");
"var closure1 = MakeClosure(); closure1();"
"var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
@ -475,9 +471,7 @@ TEST(OptimizedCodeSharing2) {
}
}
// Test that optimized code for different closures is actually shared
// immediately by the FastNewClosureStub without context-dependent entries.
// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing3) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
@ -531,8 +525,8 @@ TEST(OptimizedCodeSharing3) {
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
"var closure1 = MakeClosure();"
"var closure2 = MakeClosure();");
"var closure1 = MakeClosure(); closure1();"
"var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()