[compiler] Drive optimizations with feedback vector (reland)

For interpreted functions, use the optimized code slot in the feedback
vector to store an optimization marker (optimize/in optimization queue)
rather than changing the JSFunction's code object. Then, adapt the
self-healing mechanism to also dispatch based on this optimization
marker. Similarly, replace SFI marking with optimization marker checks
in CompileLazy.

This allows JSFunctions to share optimization information (replacing
shared function marking) without leaking this information across native
contexts. Non I+TF functions (asm.js or --no-turbo) use a
CheckOptimizationMarker shim which generalises the old
CompileOptimized/InOptimizationQueue builtins and also checks the same
optimization marker as CompileLazy and InterpreterEntryTrampoline.

This is a reland of https://chromium-review.googlesource.com/c/509716

Change-Id: I02b790544596562373da4c9c9f6afde5fb3bcffe
Reviewed-on: https://chromium-review.googlesource.com/535460
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45997}
This commit is contained in:
Leszek Swirski 2017-06-15 15:53:38 +01:00 committed by Commit Bot
parent fea10e322f
commit 24b7026d73
32 changed files with 1431 additions and 873 deletions

View File

@ -67,6 +67,7 @@ namespace internal {
V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedBooleanValue, "Expected boolean value") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedFixedDoubleArrayMap, \
"Expected a fixed double array map in fast shallow clone array literal") \
V(kExpectedFunctionObject, "Expected function object in register") \
@ -75,6 +76,8 @@ namespace internal {
V(kExpectedNativeContext, "Expected native context") \
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \

View File

@ -427,23 +427,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(r2);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -1008,6 +991,119 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count, LeaveCC);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
__ b(ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee if needed, and caller)
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = r1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ ldr(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1026,34 +1122,31 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = r1;
Register feedback_vector = r2;
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = r4;
__ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
__ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
__ ldr(
optimized_code_entry,
FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
__ ldr(r2, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r2);
__ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r4);
__ b(ne, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@ -1066,15 +1159,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
__ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(r9,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ str(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ str(r9,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1149,11 +1242,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
__ ldr(r9, FieldMemOperand(r2, DebugInfo::kFlagsOffset));
__ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
__ SmiUntag(r9);
__ tst(r9, Operand(DebugInfo::kHasBreakInfo));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, DebugInfo::kDebugBytecodeArrayOffset), ne);
FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
__ b(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@ -1161,36 +1254,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(r1, r4, r5);
__ str(r4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, r4, r5);
__ Jump(r4);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ ldr(r5, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
r2);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1421,6 +1490,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
// -- r3 : new target (preserved for callee)
// -- r1 : target function (preserved for callee)
// -----------------------------------
Register closure = r1;
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@ -1429,42 +1525,24 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = r1;
Register index = r2;
Register feedback_vector = r2;
// Do we have a valid feedback vector?
__ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is optimized code available in the feedback vector?
Register entry = r4;
__ ldr(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ ldr(r5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &gotta_call_runtime);
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
__ Jump(entry);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// We found no optimized code.
__ bind(&try_shared);
Register entry = r4;
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ ldr(r5, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r5, Operand(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ b(ne, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1482,15 +1560,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)

View File

@ -428,22 +428,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Br(x2);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However, not
// checking may delay installing ready functions, and always checking would be
// quite expensive. A good compromise is to first check against stack limit as
// a cue for an interrupt signal.
Label ok;
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &ok);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ Bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -1031,6 +1015,117 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Drop(args_count, 1);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee if needed, and caller)
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = x1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ Ldr(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ CompareAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ Cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ Ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1049,31 +1144,28 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = x1;
Register feedback_vector = x2;
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, x1);
__ Push(lr, fp, cp, closure);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = x7;
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
__ Ldr(x0, FieldMemOperand(x0, Cell::kValueOffset));
__ Ldr(
optimized_code_entry,
FieldMemOperand(x0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
__ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
@ -1089,7 +1181,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
@ -1183,35 +1275,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
__ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(x1, x7, x5);
__ Str(x7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, x7, x5);
__ Jump(x7);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ Ldr(w8, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
&gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, x1, x4, x5,
x13);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1448,6 +1517,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x3 : new target (preserved for callee)
// -- x1 : target function (preserved for callee)
// -----------------------------------
Register closure = x1;
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@ -1456,50 +1552,29 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = x1;
Register index = x2;
Register feedback_vector = x2;
// Do we have a valid feedback vector?
__ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is optimized code available in the feedback vector?
Register entry = x7;
__ Ldr(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ Ldr(w8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
&gotta_call_runtime);
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, x4, x5, x13);
__ Jump(entry);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// We found no optimized code.
Register temp = x5;
__ Bind(&try_shared);
Register entry = x7;
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ Ldr(temp.W(),
FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAnySet(temp.W(),
SharedFunctionInfo::MarkedForTierUpBit::kMask,
&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(temp, masm->CodeObject());
__ Cmp(entry, temp);
__ Move(x5, masm->CodeObject());
__ Cmp(entry, x5);
__ B(eq, &gotta_call_runtime);
// Install the SFI's code entry.
@ -1512,15 +1587,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)

View File

@ -137,23 +137,27 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
}
{
// If the feedback vector has optimized code, check whether it is marked
// for deopt and, if so, clear it.
Label optimized_code_ok(this);
// for deopt and, if so, clear the slot.
Label optimized_code_ok(this), clear_optimized_code(this);
Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
Node* optimized_code_cell =
Node* optimized_code_cell_slot =
LoadFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex);
GotoIf(TaggedIsSmi(optimized_code_cell_slot), &optimized_code_ok);
Node* optimized_code =
LoadWeakCellValue(optimized_code_cell, &optimized_code_ok);
LoadWeakCellValue(optimized_code_cell_slot, &clear_optimized_code);
Node* code_flags = LoadObjectField(
optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
Node* marked_for_deopt =
DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
GotoIf(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok);
Branch(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok,
&clear_optimized_code);
// Code is marked for deopt, clear the optimized code slot.
// Cell is empty or code is marked for deopt, clear the optimized code slot.
BIND(&clear_optimized_code);
StoreFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex,
EmptyWeakCellConstant(), SKIP_WRITE_BARRIER);
SmiConstant(Smi::kZero), SKIP_WRITE_BARRIER);
Goto(&optimized_code_ok);
BIND(&optimized_code_ok);

View File

@ -149,9 +149,7 @@ namespace internal {
ASM(InterpreterOnStackReplacement) \
\
/* Code life-cycle */ \
ASM(CompileOptimized) \
ASM(CompileOptimizedConcurrent) \
ASM(InOptimizationQueue) \
ASM(CheckOptimizationMarker) \
ASM(InstantiateAsmJs) \
ASM(MarkCodeAsToBeExecutedOnce) \
ASM(MarkCodeAsExecutedOnce) \

View File

@ -92,24 +92,6 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ jmp(ebx);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -668,6 +650,121 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ push(return_pc);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ cmp(smi_entry, Immediate(Smi::FromEnum(marker)));
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee if needed, and caller)
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = edi;
Register optimized_code_entry = scratch;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is an optimization marker.
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ mov(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
__ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ push(eax);
__ push(edx);
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
__ pop(edx);
__ pop(eax);
__ jmp(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -685,9 +782,20 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = edi;
Register feedback_vector = ebx;
// Load the feedback vector from the closure.
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
// MANUAL indicates that the scope shouldn't actually generate code to set
// up the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
@ -695,19 +803,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = ecx;
__ mov(ebx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
__ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
__ mov(optimized_code_entry,
FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ mov(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
@ -727,11 +822,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ EmitLoadFeedbackVector(ecx);
__ add(
FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
Immediate(Smi::FromInt(1)));
__ add(FieldOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
Immediate(Smi::FromInt(1)));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -806,10 +900,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
__ push(ebx); // feedback_vector == ebx, so save it.
__ mov(ecx, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
__ mov(ebx, FieldOperand(ecx, DebugInfo::kFlagsOffset));
__ SmiUntag(ebx);
__ test(ebx, Immediate(DebugInfo::kHasBreakInfo));
__ pop(ebx);
__ j(zero, &bytecode_array_loaded);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(ecx, DebugInfo::kDebugBytecodeArrayOffset));
@ -829,31 +925,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
__ RecordWriteCodeEntryField(edi, ecx, ebx);
__ jmp(ecx);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ push(edx);
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, edi, edx,
eax, ebx);
__ pop(edx);
__ leave();
__ jmp(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
__ leave();
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1224,6 +1295,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
Register closure = edi;
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@ -1232,46 +1330,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = edi;
Register new_target = edx;
Register argument_count = eax;
Register feedback_vector = ebx;
// Do we have a valid feedback vector?
__ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
__ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is optimized code available in the feedback vector?
Register entry = ecx;
__ mov(entry,
FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ test(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Code is good, get it into the closure and tail call.
__ push(argument_count);
__ push(new_target);
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, edx, eax, ebx);
__ pop(new_target);
__ pop(argument_count);
__ jmp(entry);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// We found no optimized code.
__ bind(&try_shared);
Register entry = ecx;
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ test(FieldOperand(entry, SharedFunctionInfo::kCompilerHintsOffset),
Immediate(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1286,19 +1361,9 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ jmp(entry);
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)

View File

@ -423,22 +423,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -999,6 +983,115 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Addu(sp, sp, args_count);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ lw(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ Branch(&fallthrough, eq, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ Assert(
eq, kExpectedOptimizationSentinel, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&fallthrough, hs, sp, Operand(at));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ lw(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ lw(scratch2, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1017,29 +1110,27 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = a1;
Register feedback_vector = a2;
// Load the feedback vector from the closure.
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = t0;
__ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(optimized_code_entry,
FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
@ -1055,15 +1146,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
__ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(t0, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(t0,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ sw(t0,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1152,35 +1243,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
__ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, t0, t1);
__ sw(t0, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, t0, t1);
__ Jump(t0);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ lw(t1,
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
__ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
t2);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1414,6 +1482,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
Register closure = a1;
// Get the feedback vector.
Register feedback_vector = a2;
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@ -1422,41 +1518,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = a1;
Register index = a2;
Register feedback_vector = a2;
// Do we have a valid feedback vector?
__ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is optimized code available in the feedback vector?
Register entry = t0;
__ lw(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ lw(t1, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
__ Jump(entry);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// We found no optimized code.
__ bind(&try_shared);
Register entry = t0;
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ lw(t1, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ And(t1, t1, Operand(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1473,15 +1551,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)

View File

@ -426,22 +426,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(a4, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(a4));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -1000,6 +984,115 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Daddu(sp, sp, args_count);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ Ld(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ Branch(&fallthrough, eq, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ Assert(
eq, kExpectedOptimizationSentinel, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&fallthrough, hs, sp, Operand(t0));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ Ld(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ Lw(a5, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1018,29 +1111,27 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = a1;
Register feedback_vector = a2;
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = a4;
__ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ Ld(optimized_code_entry,
FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
@ -1056,15 +1147,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
__ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ Ld(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(a4,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ Sd(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Sd(a4,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1153,35 +1244,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, a4, a5);
__ Sd(a4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, a4, a5);
__ Jump(a4);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ Lw(a5,
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, a5,
t0);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1416,6 +1484,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
Register closure = a1;
// Get the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@ -1424,41 +1520,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = a1;
Register index = a2;
Register feedback_vector = a2;
// Do we have a valid feedback vector?
__ Ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is optimized code available in the feedback vector?
Register entry = a4;
__ Ld(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ Lw(a5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, a5, t0);
__ Jump(entry);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// We found no optimized code.
__ bind(&try_shared);
Register entry = a4;
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ Lwu(a5, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ And(a5, a5, Operand(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1475,15 +1553,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)

View File

@ -98,22 +98,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ jmp(rbx);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -747,6 +731,117 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ PushReturnAddressFrom(return_pc);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ SmiCompare(smi_entry, Smi::FromEnum(marker));
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee if needed, and caller)
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, rax, rdx, rdi, scratch1, scratch2,
scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = rdi;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ movp(optimized_code_entry,
FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kNone));
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
__ Assert(equal, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ movp(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ testl(
FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ jmp(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -764,6 +859,17 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = rdi;
Register feedback_vector = rbx;
// Load the feedback vector from the closure.
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@ -774,22 +880,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = rcx;
__ movp(rbx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
__ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ movp(rbx,
FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ movp(optimized_code_entry, FieldOperand(rbx, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
__ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
@ -805,11 +899,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
__ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
__ SmiAddConstant(
FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
FieldOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
Smi::FromInt(1));
// Check function data field is actually a BytecodeArray object.
@ -905,28 +998,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
__ RecordWriteCodeEntryField(rdi, rcx, r15);
__ jmp(rcx);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ leave();
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ testl(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, rdi, r14,
r15, rbx);
__ jmp(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(
@ -1198,6 +1269,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
Register closure = rdi;
// Get the feedback vector.
Register feedback_vector = rbx;
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@ -1206,40 +1304,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = rdi;
Register feedback_vector = rbx;
// Do we have a valid feedback vector?
__ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is optimized code available in the feedback vector?
Register entry = rcx;
__ movp(entry,
FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ testl(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r14, r15, rbx);
__ jmp(entry);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// We found no optimized code.
__ bind(&try_shared);
Register entry = rcx;
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ testl(FieldOperand(entry, SharedFunctionInfo::kCompilerHintsOffset),
Immediate(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1257,15 +1338,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)

View File

@ -22,6 +22,9 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->info()->closure();
function->ReplaceCode(function->shared()->code());
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
// TODO(mvstanton): We can't call ensureliterals here due to allocation,
// but we probably shouldn't call ReplaceCode either, as this
// sometimes runs on the worker thread!
@ -196,7 +199,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
CompilationInfo* info = job->info();
Handle<JSFunction> function(*info->closure());
if (function->IsOptimized()) {
if (function->HasOptimizedCode()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Aborting compilation for ");
function->ShortPrint();

View File

@ -395,12 +395,12 @@ bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
return scope->asm_module();
}
bool UseCompilerDispatcher(Compiler::ConcurrencyMode inner_function_mode,
bool UseCompilerDispatcher(ConcurrencyMode inner_function_mode,
CompilerDispatcher* dispatcher,
DeclarationScope* scope,
Handle<SharedFunctionInfo> shared_info,
bool is_debug, bool will_serialize) {
return inner_function_mode == Compiler::CONCURRENT &&
return inner_function_mode == ConcurrencyMode::kConcurrent &&
dispatcher->IsEnabled() && !is_debug && !will_serialize &&
!UseAsmWasm(scope, shared_info, is_debug);
}
@ -550,8 +550,8 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
bool CompileUnoptimizedInnerFunctions(
Compiler::EagerInnerFunctionLiterals* literals,
Compiler::ConcurrencyMode inner_function_mode,
std::shared_ptr<Zone> parse_zone, CompilationInfo* outer_info) {
ConcurrencyMode inner_function_mode, std::shared_ptr<Zone> parse_zone,
CompilationInfo* outer_info) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileUnoptimizedInnerFunctions");
Isolate* isolate = outer_info->isolate();
@ -618,14 +618,14 @@ bool InnerFunctionIsAsmModule(
}
bool CompileUnoptimizedCode(CompilationInfo* info,
Compiler::ConcurrencyMode inner_function_mode) {
ConcurrencyMode inner_function_mode) {
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
Compiler::EagerInnerFunctionLiterals inner_literals;
{
base::Optional<CompilationHandleScope> compilation_handle_scope;
if (inner_function_mode == Compiler::CONCURRENT) {
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
compilation_handle_scope.emplace(info);
}
if (!Compiler::Analyze(info, &inner_literals)) {
@ -639,11 +639,11 @@ bool CompileUnoptimizedCode(CompilationInfo* info,
// builder doesn't do parsing when visiting function declarations.
if (info->scope()->IsAsmModule() ||
InnerFunctionIsAsmModule(&inner_literals)) {
inner_function_mode = Compiler::NOT_CONCURRENT;
inner_function_mode = ConcurrencyMode::kNotConcurrent;
}
std::shared_ptr<Zone> parse_zone;
if (inner_function_mode == Compiler::CONCURRENT) {
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
// Seal the parse zone so that it can be shared by parallel inner function
// compilation jobs.
DCHECK_NE(info->parse_info()->zone(), info->zone());
@ -680,7 +680,7 @@ void EnsureSharedFunctionInfosArrayOnScript(CompilationInfo* info) {
}
MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
CompilationInfo* info, Compiler::ConcurrencyMode inner_function_mode) {
CompilationInfo* info, ConcurrencyMode inner_function_mode) {
RuntimeCallTimerScope runtimeTimer(
info->isolate(), &RuntimeCallStats::CompileGetUnoptimizedCode);
VMState<COMPILER> state(info->isolate());
@ -688,12 +688,13 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
// Parse and update ParseInfo with the results.
{
if (!parsing::ParseAny(info->parse_info(), info->isolate(),
inner_function_mode != Compiler::CONCURRENT)) {
if (!parsing::ParseAny(
info->parse_info(), info->isolate(),
inner_function_mode != ConcurrencyMode::kConcurrent)) {
return MaybeHandle<Code>();
}
if (inner_function_mode == Compiler::CONCURRENT) {
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
ParseHandleScope parse_handles(info->parse_info(), info->isolate());
info->parse_info()->ReopenHandlesInNewHandleScope();
info->parse_info()->ast_value_factory()->Internalize(info->isolate());
@ -745,13 +746,27 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
return MaybeHandle<Code>();
}
void ClearOptimizedCodeCache(CompilationInfo* info) {
Handle<JSFunction> function = info->closure();
if (info->osr_ast_id().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
vector->ClearOptimizedCode();
}
}
void InsertCodeIntoOptimizedCodeCache(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
// Function context specialization folds-in the function context,
// so no sharing can occur.
if (info->is_function_context_specializing()) return;
if (info->is_function_context_specializing()) {
// Native context specialized code is not shared, so make sure the optimized
// code cache is clear.
ClearOptimizedCodeCache(info);
return;
}
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
@ -810,16 +825,6 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
if (FLAG_mark_optimizing_shared_functions &&
info->closure()->shared()->has_concurrent_optimization_job()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation job already running for ");
info->shared_info()->ShortPrint();
PrintF(".\n");
}
return false;
}
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@ -854,7 +859,6 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
info->closure()->shared()->set_has_concurrent_optimization_job(true);
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
@ -865,7 +869,7 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
}
MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
Compiler::ConcurrencyMode mode,
ConcurrencyMode mode,
BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr) {
Isolate* isolate = function->GetIsolate();
@ -875,6 +879,12 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
// Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize.
if (function->HasOptimizationMarker()) {
function->ClearOptimizationMarker();
}
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_ast_id)
.ToHandle(&cached_code)) {
@ -958,7 +968,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// allocated in a deferred handle scope that is detached and handed off to
// the background thread when we return.
base::Optional<CompilationHandleScope> compilation;
if (mode == Compiler::CONCURRENT) {
if (mode == ConcurrencyMode::kConcurrent) {
compilation.emplace(info);
}
@ -969,10 +979,17 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
info->ReopenHandlesInNewHandleScope();
parse_info->ReopenHandlesInNewHandleScope();
if (mode == Compiler::CONCURRENT) {
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(job.get())) {
job.release(); // The background recompile job owns this now.
return isolate->builtins()->InOptimizationQueue();
// Set the optimization marker and return a code object which checks it.
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
if (function->IsInterpreted()) {
return isolate->builtins()->InterpreterEntryTrampoline();
} else {
return isolate->builtins()->CheckOptimizationMarker();
}
}
} else {
if (GetOptimizedCodeNow(job.get())) return info->code();
@ -982,13 +999,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
MaybeHandle<Code> GetOptimizedCodeMaybeLater(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
return GetOptimizedCode(function, isolate->concurrent_recompilation_enabled()
? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT);
}
CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
@ -1004,11 +1014,6 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
// Reset profiler ticks, function is no longer considered hot.
shared->set_profiler_ticks(0);
shared->set_has_concurrent_optimization_job(false);
// Shared function no longer needs to be tiered up.
shared->set_marked_for_tier_up(false);
DCHECK(!shared->HasBreakInfo());
// 1) Optimization on the concurrent thread may have failed.
@ -1042,6 +1047,10 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
}
info->closure()->ReplaceCode(shared->code());
// Clear the InOptimizationQueue marker, if it exists.
if (info->closure()->IsInOptimizationQueue()) {
info->closure()->ClearOptimizationMarker();
}
return CompilationJob::FAILED;
}
@ -1056,8 +1065,11 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
if (function->shared()->is_compiled()) {
// Function has already been compiled, get the optimized code if possible,
// otherwise return baseline code.
// Function has already been compiled. Normally we'd expect the CompileLazy
// builtin to catch cases where we already have compiled code or optimized
// code, but there are paths that call the CompileLazy runtime function
// directly (e.g. failed asm.js compilations), so we include a check for
// those.
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, BailoutId::None())
.ToHandle(&cached_code)) {
@ -1066,26 +1078,10 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
function->ShortPrint();
PrintF(" during unoptimized compile]\n");
}
DCHECK(function->shared()->is_compiled());
return cached_code;
}
if (function->shared()->marked_for_tier_up()) {
DCHECK(FLAG_mark_shared_functions_for_tier_up);
function->shared()->set_marked_for_tier_up(false);
if (FLAG_trace_opt) {
PrintF("[optimizing method ");
function->ShortPrint();
PrintF(" eagerly (shared function marked for tier up)]\n");
}
Handle<Code> code;
if (GetOptimizedCodeMaybeLater(function).ToHandle(&code)) {
return code;
}
}
// TODO(leszeks): Either handle optimization markers here, or DCHECK that
// there aren't any.
return Handle<Code>(function->shared()->code());
} else {
@ -1103,16 +1099,21 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
script->preparsed_scope_data());
}
}
Compiler::ConcurrencyMode inner_function_mode =
FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT;
ConcurrencyMode inner_function_mode = FLAG_compiler_dispatcher_eager_inner
? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kNotConcurrent;
Handle<Code> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, GetUnoptimizedCode(&info, inner_function_mode), Code);
if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
PrintF(" because --always-opt]\n");
}
Handle<Code> opt_code;
if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent)
.ToHandle(&opt_code)) {
result = opt_code;
}
@ -1130,9 +1131,9 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
Compiler::ConcurrencyMode inner_function_mode =
FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT;
ConcurrencyMode inner_function_mode = FLAG_compiler_dispatcher_eager_inner
? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kNotConcurrent;
RuntimeCallTimerScope runtimeTimer(
isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
@ -1144,12 +1145,13 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
{ VMState<COMPILER> state(info->isolate());
if (parse_info->literal() == nullptr) {
if (!parsing::ParseProgram(parse_info, info->isolate(),
inner_function_mode != Compiler::CONCURRENT)) {
if (!parsing::ParseProgram(
parse_info, info->isolate(),
inner_function_mode != ConcurrencyMode::kConcurrent)) {
return Handle<SharedFunctionInfo>::null();
}
if (inner_function_mode == Compiler::CONCURRENT) {
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
ParseHandleScope parse_handles(parse_info, info->isolate());
parse_info->ReopenHandlesInNewHandleScope();
parse_info->ast_value_factory()->Internalize(info->isolate());
@ -1306,6 +1308,12 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared()->is_compiled());
DCHECK(function->is_compiled());
DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->IsInOptimizationQueue());
DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->ChecksOptimizationMarker());
DCHECK_IMPLIES(function->IsInOptimizationQueue(),
mode == ConcurrencyMode::kConcurrent);
return true;
}
@ -1318,7 +1326,7 @@ bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
CompilationInfo info(parse_info.zone(), &parse_info, isolate,
Handle<JSFunction>::null());
info.MarkAsDebug();
if (GetUnoptimizedCode(&info, Compiler::NOT_CONCURRENT).is_null()) {
if (GetUnoptimizedCode(&info, ConcurrencyMode::kNotConcurrent).is_null()) {
isolate->clear_pending_exception();
return false;
}
@ -1371,7 +1379,8 @@ bool Compiler::EnsureBytecode(CompilationInfo* info) {
CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
if (dispatcher->IsEnqueued(info->shared_info())) {
if (!dispatcher->FinishNow(info->shared_info())) return false;
} else if (GetUnoptimizedCode(info, Compiler::NOT_CONCURRENT).is_null()) {
} else if (GetUnoptimizedCode(info, ConcurrencyMode::kNotConcurrent)
.is_null()) {
return false;
}
}
@ -1855,7 +1864,8 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_ast_id.IsNone());
DCHECK_NOT_NULL(osr_frame);
return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent, osr_ast_id,
osr_frame);
}
CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
@ -1889,7 +1899,15 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!function->shared()->HasAsmWasmData() &&
function->shared()->is_compiled()) {
function->MarkForOptimization();
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
if (!function->IsOptimized()) {
// Only mark for optimization if we don't already have optimized code.
if (!function->HasOptimizedCode()) {
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
}
}
if (shared->is_compiled()) {

View File

@ -40,7 +40,6 @@ class ThreadedListZoneEntry;
class V8_EXPORT_PRIVATE Compiler : public AllStatic {
public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// ===========================================================================
// The following family of methods ensures a given function is compiled. The

View File

@ -523,9 +523,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
if (function != nullptr && function->IsOptimized() &&
(compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_->deopt_already_counted())) {
if (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_->deopt_already_counted()) {
// If the function is optimized, and we haven't counted that deopt yet, then
// increment the function's deopt count so that we can avoid optimising
// functions that deopt too often.
@ -534,7 +533,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
} else {
} else if (function != nullptr) {
function->shared()->increment_deopt_count();
}
}

View File

@ -96,9 +96,10 @@ class V8_EXPORT_PRIVATE StackGuard final {
V(API_INTERRUPT, ApiInterrupt, 4) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
inline void Request##Name() { RequestInterrupt(NAME); } \
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
inline bool CheckAndClear##Name() { return CheckAndClearInterrupt(NAME); } \
inline void Request##Name() { RequestInterrupt(NAME); } \
inline void Clear##Name() { ClearInterrupt(NAME); }
INTERRUPT_LIST(V)
#undef V

View File

@ -114,13 +114,30 @@ void FeedbackVector::clear_invocation_count() {
set(kInvocationCountIndex, Smi::kZero);
}
Object* FeedbackVector::optimized_code_cell() const {
return get(kOptimizedCodeIndex);
}
Code* FeedbackVector::optimized_code() const {
WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
Object* slot = optimized_code_cell();
if (slot->IsSmi()) return nullptr;
WeakCell* cell = WeakCell::cast(slot);
return cell->cleared() ? nullptr : Code::cast(cell->value());
}
OptimizationMarker FeedbackVector::optimization_marker() const {
Object* slot = optimized_code_cell();
if (!slot->IsSmi()) return OptimizationMarker::kNone;
Smi* value = Smi::cast(slot);
return static_cast<OptimizationMarker>(value->value());
}
bool FeedbackVector::has_optimized_code() const {
return !WeakCell::cast(get(kOptimizedCodeIndex))->cleared();
return optimized_code() != nullptr;
}
bool FeedbackVector::has_optimization_marker() const {
return optimization_marker() != OptimizationMarker::kNone;
}
// Conversion from an integer index to either a slot or an ic slot.

View File

@ -202,7 +202,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set_map_no_write_barrier(isolate->heap()->feedback_vector_map());
array->set(kSharedFunctionInfoIndex, *shared);
array->set(kOptimizedCodeIndex, *factory->empty_weak_cell());
array->set(kOptimizedCodeIndex, Smi::FromEnum(OptimizationMarker::kNone));
array->set(kInvocationCountIndex, Smi::kZero);
// Ensure we can skip the write barrier
@ -305,28 +305,38 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
vector->set(kOptimizedCodeIndex, *cell);
}
void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
set(kOptimizedCodeIndex, Smi::FromEnum(marker));
}
void FeedbackVector::ClearOptimizedCode() {
set(kOptimizedCodeIndex, GetIsolate()->heap()->empty_weak_cell());
set(kOptimizedCodeIndex, Smi::FromEnum(OptimizationMarker::kNone));
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo* shared, const char* reason) {
WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
if (!cell->cleared()) {
Code* code = Code::cast(cell->value());
if (code->marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
reason);
shared->ShortPrint();
PrintF("]\n");
}
if (!code->deopt_already_counted()) {
shared->increment_deopt_count();
code->set_deopt_already_counted(true);
}
ClearOptimizedCode();
Object* slot = get(kOptimizedCodeIndex);
if (slot->IsSmi()) return;
WeakCell* cell = WeakCell::cast(slot);
if (cell->cleared()) {
ClearOptimizedCode();
return;
}
Code* code = Code::cast(cell->value());
if (code->marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
reason);
shared->ShortPrint();
PrintF("]\n");
}
if (!code->deopt_already_counted()) {
shared->increment_deopt_count();
code->set_deopt_already_counted(true);
}
ClearOptimizedCode();
}
}

View File

@ -325,13 +325,17 @@ class FeedbackVector : public FixedArray {
inline int invocation_count() const;
inline void clear_invocation_count();
inline Object* optimized_code_cell() const;
inline Code* optimized_code() const;
inline OptimizationMarker optimization_marker() const;
inline bool has_optimized_code() const;
inline bool has_optimization_marker() const;
void ClearOptimizedCode();
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo* shared,
const char* reason);
static void SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code);
void SetOptimizationMarker(OptimizationMarker marker);
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) {

View File

@ -270,10 +270,6 @@ DEFINE_IMPLICATION(future, turbo)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
"mark shared functions for tier up")
DEFINE_BOOL(mark_optimizing_shared_functions, true,
"mark shared functions if they are concurrently optimizing")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")

View File

@ -1416,6 +1416,31 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
enum class OptimizationMarker {
kNone,
kCompileOptimized,
kCompileOptimizedConcurrent,
kInOptimizationQueue
};
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileOptimized:
return os << "OptimizationMarker::kCompileOptimized";
case OptimizationMarker::kCompileOptimizedConcurrent:
return os << "OptimizationMarker::kCompileOptimizedConcurrent";
case OptimizationMarker::kInOptimizationQueue:
return os << "OptimizationMarker::kInOptimizationQueue";
}
UNREACHABLE();
return os;
}
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
} // namespace internal
} // namespace v8

View File

@ -3872,6 +3872,13 @@ inline bool Code::is_interpreter_trampoline_builtin() {
this == *builtins->InterpreterEnterBytecodeDispatch();
}
inline bool Code::checks_optimization_marker() {
Builtins* builtins = GetIsolate()->builtins();
return this == *builtins->CompileLazy() ||
this == *builtins->InterpreterEntryTrampoline() ||
this == *builtins->CheckOptimizationMarker();
}
inline bool Code::has_unwinding_info() const {
return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
@ -4792,25 +4799,45 @@ bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
bool JSFunction::HasOptimizedCode() {
return IsOptimized() ||
(has_feedback_vector() && feedback_vector()->has_optimized_code());
}
bool JSFunction::HasOptimizationMarker() {
return has_feedback_vector() && feedback_vector()->has_optimization_marker();
}
void JSFunction::ClearOptimizationMarker() {
DCHECK(has_feedback_vector());
DCHECK(!feedback_vector()->has_optimized_code());
feedback_vector()->SetOptimizationMarker(OptimizationMarker::kNone);
}
bool JSFunction::IsInterpreted() {
return code()->is_interpreter_trampoline_builtin();
}
bool JSFunction::ChecksOptimizationMarker() {
return code()->checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
return code() == GetIsolate()->builtins()->builtin(
Builtins::kCompileOptimized);
return has_feedback_vector() && feedback_vector()->optimization_marker() ==
OptimizationMarker::kCompileOptimized;
}
bool JSFunction::IsMarkedForConcurrentOptimization() {
return code() == GetIsolate()->builtins()->builtin(
Builtins::kCompileOptimizedConcurrent);
return has_feedback_vector() &&
feedback_vector()->optimization_marker() ==
OptimizationMarker::kCompileOptimizedConcurrent;
}
bool JSFunction::IsInOptimizationQueue() {
return code() == GetIsolate()->builtins()->builtin(
Builtins::kInOptimizationQueue);
return has_feedback_vector() && feedback_vector()->optimization_marker() ==
OptimizationMarker::kInOptimizationQueue;
}
@ -4871,20 +4898,24 @@ void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
reason);
shared()->ShortPrint();
ShortPrint();
PrintF("]\n");
}
feedback_vector()->ClearOptimizedCode();
}
}
void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
DCHECK(has_feedback_vector());
DCHECK(ChecksOptimizationMarker());
DCHECK(!HasOptimizedCode());
if (was_optimized && is_optimized) {
ClearOptimizedCodeSlot("Replacing with another optimized code");
}
feedback_vector()->SetOptimizationMarker(marker);
}
void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = this->code()->kind() == Code::OPTIMIZED_FUNCTION;
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
set_code(code);
@ -4892,8 +4923,7 @@ void JSFunction::ReplaceCode(Code* code) {
// context based on the state change.
if (!was_optimized && is_optimized) {
context()->native_context()->AddOptimizedFunction(this);
}
if (was_optimized && !is_optimized) {
} else if (was_optimized && !is_optimized) {
// TODO(titzer): linear in the number of optimized functions; fix!
context()->native_context()->RemoveOptimizedFunction(this);
}
@ -4988,9 +5018,7 @@ Object* JSFunction::prototype() {
bool JSFunction::is_compiled() {
Builtins* builtins = GetIsolate()->builtins();
return code() != builtins->builtin(Builtins::kCompileLazy) &&
code() != builtins->builtin(Builtins::kCompileOptimized) &&
code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
return code() != builtins->builtin(Builtins::kCompileLazy);
}
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)

View File

@ -11958,46 +11958,43 @@ bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
return false;
}
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
if (FLAG_mark_shared_functions_for_tier_up) {
shared()->set_marked_for_tier_up(true);
}
}
void JSFunction::AttemptConcurrentOptimization() {
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
Isolate* isolate = GetIsolate();
if (!isolate->concurrent_recompilation_enabled() ||
isolate->bootstrapper()->IsActive()) {
MarkForOptimization();
return;
}
DCHECK(!IsInOptimizationQueue());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
DCHECK(isolate->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
ShortPrint();
PrintF(" for concurrent recompilation.\n");
mode = ConcurrencyMode::kNotConcurrent;
}
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
// No write barrier required, since the builtin is part of the root set.
if (FLAG_mark_shared_functions_for_tier_up) {
// TODO(leszeks): The compilation isn't concurrent if we trigger it using
// this bit.
shared()->set_marked_for_tier_up(true);
DCHECK(!IsOptimized());
DCHECK(!HasOptimizedCode());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
if (mode == ConcurrencyMode::kConcurrent) {
if (IsInOptimizationQueue()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Not marking ");
ShortPrint();
PrintF(" -- already in optimization queue.\n");
}
return;
}
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
}
if (!IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
// No write barrier required, since the builtin is part of the root set.
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileOptimizedConcurrent
: OptimizationMarker::kCompileOptimized);
}
// static

View File

@ -1587,6 +1587,13 @@ class Smi: public Object {
return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
}
template <typename E,
typename = typename std::enable_if<std::is_enum<E>::value>::type>
static inline Smi* FromEnum(E value) {
STATIC_ASSERT(sizeof(E) <= sizeof(int));
return FromInt(static_cast<int>(value));
}
// Returns whether value can be represented in a Smi.
static inline bool IsValid(intptr_t value) {
bool result = Internals::IsValidSmi(value);
@ -3687,6 +3694,10 @@ class Code: public HeapObject {
// Testers for interpreter builtins.
inline bool is_interpreter_trampoline_builtin();
// Tells whether the code checks the optimization marker in the function's
// feedback vector.
inline bool checks_optimization_marker();
// [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
// object was generated by either the hydrogen or the TurboFan optimizing
// compiler (but it may not be an optimized function).
@ -5032,13 +5043,27 @@ class JSFunction: public JSObject {
// optimized.
inline bool IsInterpreted();
// Tells whether or not this function has been optimized.
// Tells whether or not this function checks its optimization marker in its
// feedback vector.
inline bool ChecksOptimizationMarker();
// Tells whether or not this function holds optimized code.
//
// Note: Returning false does not necessarily mean that this function hasn't
// been optimized, as it may have optimized code on its feedback vector.
inline bool IsOptimized();
// Tells whether or not this function has optimized code available to it,
// either because it is optimized or because it has optimized code in its
// feedback vector.
inline bool HasOptimizedCode();
// Tells whether or not this function has a (non-zero) optimization marker.
inline bool HasOptimizationMarker();
// Mark this function for lazy recompilation. The function will be recompiled
// the next time it is executed.
void MarkForOptimization();
void AttemptConcurrentOptimization();
void MarkForOptimization(ConcurrencyMode mode);
// Tells whether or not the function is already marked for lazy recompilation.
inline bool IsMarkedForOptimization();
@ -5050,6 +5075,12 @@ class JSFunction: public JSObject {
// Clears the optimized code slot in the function's feedback vector.
inline void ClearOptimizedCodeSlot(const char* reason);
// Sets the optimization marker in the function's feedback vector.
inline void SetOptimizationMarker(OptimizationMarker marker);
// Clears the optimization marker in the function's feedback vector.
inline void ClearOptimizationMarker();
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();

View File

@ -86,11 +86,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function,
SharedFunctionInfo::IsAsmFunctionBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
SharedFunctionInfo::IsDeclarationBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
SharedFunctionInfo::MarkedForTierUpBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
has_concurrent_optimization_job,
SharedFunctionInfo::HasConcurrentOptimizationJobBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
SharedFunctionInfo::NeedsHomeObjectBit)
@ -229,8 +224,7 @@ ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
bool SharedFunctionInfo::is_compiled() const {
Builtins* builtins = GetIsolate()->builtins();
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimized));
DCHECK(code() != builtins->builtin(Builtins::kCheckOptimizationMarker));
return code() != builtins->builtin(Builtins::kCompileLazy);
}

View File

@ -305,12 +305,6 @@ class SharedFunctionInfo : public HeapObject {
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
// Whether this function was marked to be tiered up.
DECL_BOOLEAN_ACCESSORS(marked_for_tier_up)
// Whether this function has a concurrent compilation job running.
DECL_BOOLEAN_ACCESSORS(has_concurrent_optimization_job)
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
@ -481,24 +475,22 @@ class SharedFunctionInfo : public HeapObject {
#undef START_POSITION_AND_TYPE_BIT_FIELDS
// Bit positions in |compiler_hints|.
#define COMPILER_HINTS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(FunctionKindBits, FunctionKind, 10, _) \
V(MarkedForTierUpBit, bool, 1, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(OptimizationDisabledBit, bool, 1, _) \
V(UsesArgumentsBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
V(ForceInlineBit, bool, 1, _) \
V(IsAsmFunctionBit, bool, 1, _) \
V(MustUseIgnitionTurboBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(HasConcurrentOptimizationJobBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 4, _) \
/* Bits 28-31 are unused. */
#define COMPILER_HINTS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(FunctionKindBits, FunctionKind, 10, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(OptimizationDisabledBit, bool, 1, _) \
V(UsesArgumentsBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
V(ForceInlineBit, bool, 1, _) \
V(IsAsmFunctionBit, bool, 1, _) \
V(MustUseIgnitionTurboBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 4, _) \
/* Bits 26-31 are unused. */
DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
#undef COMPILER_HINTS_BIT_FIELDS

View File

@ -150,7 +150,7 @@ void RuntimeProfiler::Optimize(JSFunction* function,
OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
function->AttemptConcurrentOptimization();
function->MarkForOptimization(ConcurrencyMode::kConcurrent);
}
void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
@ -218,7 +218,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
} else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
function->HasOptimizedCode())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
int ticks = shared->profiler_ticks();
@ -343,7 +343,7 @@ bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
function->HasOptimizedCode())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =

View File

@ -51,7 +51,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
if (!Compiler::CompileOptimized(function, Compiler::CONCURRENT)) {
if (!Compiler::CompileOptimized(function, ConcurrencyMode::kConcurrent)) {
return isolate->heap()->exception();
}
DCHECK(function->is_compiled());
@ -67,7 +67,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
if (!Compiler::CompileOptimized(function, Compiler::NOT_CONCURRENT)) {
if (!Compiler::CompileOptimized(function, ConcurrencyMode::kNotConcurrent)) {
return isolate->heap()->exception();
}
DCHECK(function->is_compiled());
@ -79,7 +79,8 @@ RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DCHECK(function->is_compiled());
DCHECK(function->shared()->is_compiled());
function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "Runtime_EvictOptimizedCodeSlot");
return function->code();
@ -354,22 +355,17 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
ast_id.ToInt(), data->OsrPcOffset()->value());
}
if (result->is_turbofanned()) {
// When we're waiting for concurrent optimization, set to compile on
// the next call - otherwise we'd run unoptimized once more
// and potentially compile for OSR another time as well.
if (function->IsMarkedForConcurrentOptimization()) {
if (FLAG_trace_osr) {
PrintF("[OSR - Re-marking ");
function->PrintName();
PrintF(" for non-concurrent optimization]\n");
}
function->ReplaceCode(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
DCHECK(result->is_turbofanned());
if (!function->HasOptimizedCode()) {
// If we're not already optimized, set to optimize non-concurrently on
// the next call, otherwise we'd run unoptimized once more and
// potentially compile for OSR again.
if (FLAG_trace_osr) {
PrintF("[OSR - Re-marking ");
function->PrintName();
PrintF(" for non-concurrent optimization]\n");
}
} else {
// Crankshafted OSR code can be installed into the function.
function->ReplaceCode(*result);
function->SetOptimizationMarker(OptimizationMarker::kCompileOptimized);
}
return *result;
}
@ -401,7 +397,11 @@ RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
return isolate->StackOverflow();
}
isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
// Only try to install optimized functions if the interrupt was InstallCode.
if (isolate->stack_guard()->CheckAndClearInstallCode()) {
isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
}
return (function->IsOptimized()) ? function->code()
: function->shared()->code();
}

View File

@ -122,7 +122,6 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
function->shared()->set_marked_for_tier_up(false);
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
@ -234,22 +233,41 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
function->MarkForOptimization();
if (FLAG_trace_opt) {
PrintF("[manually marking ");
function->ShortPrint();
PrintF(" for optimization]\n");
if (function->IsOptimized()) {
return isolate->heap()->undefined_value();
}
// If the function has optimized code, ensure that we check for it and return.
if (function->HasOptimizedCode()) {
if (!function->IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
function->ReplaceCode(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
DCHECK(function->ChecksOptimizationMarker());
return isolate->heap()->undefined_value();
}
ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
function->AttemptConcurrentOptimization();
concurrency_mode = ConcurrencyMode::kConcurrent;
}
}
if (FLAG_trace_opt) {
PrintF("[manually marking ");
function->ShortPrint();
PrintF(" for %s optimization]\n",
concurrency_mode == ConcurrencyMode::kConcurrent ? "concurrent"
: "non-concurrent");
}
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
function->MarkForOptimization(concurrency_mode);
return isolate->heap()->undefined_value();
}
@ -272,6 +290,17 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
// Ensure that the function is marked for non-concurrent optimization, so that
// subsequent runs don't also optimize.
if (!function->HasOptimizedCode()) {
if (FLAG_trace_osr) {
PrintF("[OSR - OptimizeOsr marking ");
function->ShortPrint();
PrintF(" for non-concurrent optimization]\n");
}
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
// Make the profiler arm all back edges in unoptimized code.
if (it.frame()->type() == StackFrame::JAVA_SCRIPT ||
it.frame()->type() == StackFrame::INTERPRETED) {

View File

@ -26,7 +26,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -67,7 +67,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
/* 40 S> */ B(LdaUndefined),
@ -357,7 +357,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -398,7 +398,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
/* 40 S> */ B(LdaUndefined),
@ -705,7 +705,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -746,7 +746,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
/* 40 S> */ B(LdaUndefined),

View File

@ -653,7 +653,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -834,7 +834,7 @@ bytecodes: [
B(RestoreGeneratorState), R(10),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
B(LdaSmi), I8(-2),
@ -882,7 +882,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 35 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kAbort), R(17), U8(1),
/* 30 S> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
@ -1263,7 +1263,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -1301,7 +1301,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 40 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(22),
B(CallRuntime), U16(Runtime::kAbort), R(22), U8(1),
/* 35 S> */ B(LdaNamedProperty), R(5), U8(3), U8(9),

View File

@ -23,7 +23,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), I8(-2),
@ -78,7 +78,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), I8(-2),
@ -156,7 +156,7 @@ bytecodes: [
B(RestoreGeneratorState), R(10),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
B(LdaSmi), I8(-2),
@ -200,7 +200,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 30 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(16),
B(CallRuntime), U16(Runtime::kAbort), R(16), U8(1),
/* 25 S> */ B(LdaNamedProperty), R(4), U8(7), U8(10),
@ -369,7 +369,7 @@ bytecodes: [
B(RestoreGeneratorState), R(9),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
B(LdaSmi), I8(-2),
@ -417,7 +417,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(10),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
B(StackCheck),

View File

@ -23,7 +23,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -86,7 +86,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -151,7 +151,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -237,7 +237,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -321,7 +321,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -407,7 +407,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -491,7 +491,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -559,7 +559,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -639,7 +639,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -702,7 +702,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -766,7 +766,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),

View File

@ -282,7 +282,7 @@ bytecodes: [
B(RestoreGeneratorState), R(7),
B(Star), R(8),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(9),
B(CallRuntime), U16(Runtime::kAbort), R(9), U8(1),
B(LdaSmi), I8(-2),
@ -377,7 +377,7 @@ bytecodes: [
B(RestoreGeneratorState), R(6),
B(Star), R(7),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(8),
B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
B(LdaSmi), I8(-2),
@ -414,7 +414,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 54 E> */ B(TestEqualStrictNoFeedback), R(7),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(8),
B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
B(StackCheck),
@ -440,7 +440,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(7),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(8),
B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
B(LdaSmi), I8(1),
@ -635,7 +635,7 @@ bytecodes: [
B(RestoreGeneratorState), R(7),
B(Star), R(8),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(9),
B(CallRuntime), U16(Runtime::kAbort), R(9), U8(1),
B(LdaSmi), I8(-2),
@ -662,7 +662,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 59 E> */ B(TestEqualStrictNoFeedback), R(8),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(StackCheck),
@ -688,7 +688,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(8),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
B(LdaSmi), I8(81),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(1),