Revert "[compiler] Drive optimizations with feedback vector"

This reverts commit e39c9e020f.

Reason for revert: Breaks https://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20debug/builds/15561

Original change's description:
> [compiler] Drive optimizations with feedback vector
> 
> For interpreted functions, use the optimized code slot in the feedback vector
> to store an optimization marker (optimize/in optimization queue) rather than
> changing the JSFunction's code object. Then, adapt the self-healing mechanism
> to also dispatch based on this optimization marker. Similarly, replace SFI
> marking with optimization marker checks in CompileLazy.
> 
> This allows JSFunctions to share optimization information (replacing shared
> function marking) without leaking this information across native contexts. Non
> I+TF functions (asm.js or --no-turbo) use a CheckOptimizationMarker shim which
> generalises the old CompileOptimized/InOptimizationQueue builtins and also
> checks the same optimization marker as CompileLazy and
> InterpreterEntryTrampoline.
> 
> Change-Id: I6826bdde7ab9a919cdb6b69bc0ebc6174bcb91ae
> Reviewed-on: https://chromium-review.googlesource.com/509716
> Commit-Queue: Leszek Swirski <leszeks@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#45901}

TBR=rmcilroy@chromium.org,mstarzinger@chromium.org,leszeks@chromium.org
No-Presubmit: true
No-Tree-Checks: true
No-Try: true

Change-Id: Ib6c2b4d90fc5f659a6dcaf3fd30321507ca9cb94
Reviewed-on: https://chromium-review.googlesource.com/532916
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45903}
This commit is contained in:
Leszek Swirski 2017-06-13 13:24:05 +00:00 committed by Commit Bot
parent 9cae2e8c97
commit 58978da698
32 changed files with 855 additions and 1407 deletions

View File

@ -67,7 +67,6 @@ namespace internal {
V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedBooleanValue, "Expected boolean value") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedFixedDoubleArrayMap, \
"Expected a fixed double array map in fast shallow clone array literal") \
V(kExpectedFunctionObject, "Expected function object in register") \
@ -76,8 +75,6 @@ namespace internal {
V(kExpectedNativeContext, "Expected native context") \
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \

View File

@ -429,6 +429,23 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(r2);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -994,119 +1011,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count, LeaveCC);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
__ b(ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee if needed, and caller)
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = r1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ ldr(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1125,31 +1029,34 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = r1;
Register feedback_vector = r2;
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
__ PushStandardFrame(r1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = r4;
__ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
__ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
__ ldr(
optimized_code_entry,
FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
__ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r4);
__ ldr(r2, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r2);
__ b(ne, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@ -1162,15 +1069,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ ldr(r9,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
__ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ str(r9,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ str(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1245,11 +1152,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
__ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
__ ldr(r9, FieldMemOperand(r2, DebugInfo::kFlagsOffset));
__ SmiUntag(r9);
__ tst(r9, Operand(DebugInfo::kHasBreakInfo));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
FieldMemOperand(r2, DebugInfo::kDebugBytecodeArrayOffset), ne);
__ b(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@ -1257,12 +1164,36 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, r4, r5);
__ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(r1, r4, r5);
__ Jump(r4);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ ldr(r5, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
r2);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1493,33 +1424,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
// -- r3 : new target (preserved for callee)
// -- r1 : target function (preserved for callee)
// -----------------------------------
Register closure = r1;
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@ -1528,24 +1432,42 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = r1;
Register feedback_vector = r2;
Register index = r2;
// Do we have a valid feedback vector?
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
__ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Is optimized code available in the feedback vector?
Register entry = r4;
__ ldr(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ ldr(r5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &gotta_call_runtime);
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
__ Jump(entry);
// We found no optimized code.
Register entry = r4;
__ bind(&try_shared);
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ ldr(r5, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r5, Operand(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ b(ne, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1563,6 +1485,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)

View File

@ -430,6 +430,22 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Br(x2);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However, not
// checking may delay installing ready functions, and always checking would be
// quite expensive. A good compromise is to first check against stack limit as
// a cue for an interrupt signal.
Label ok;
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &ok);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ Bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -1018,117 +1034,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Drop(args_count, 1);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee if needed, and caller)
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = x1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ Ldr(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ CompareAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ Cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ Ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1147,28 +1052,31 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = x1;
Register feedback_vector = x2;
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = x7;
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
__ Ldr(x0, FieldMemOperand(x0, Cell::kValueOffset));
__ Ldr(
optimized_code_entry,
FieldMemOperand(x0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
__ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
@ -1184,7 +1092,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
@ -1278,12 +1186,35 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ Ldr(x7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
__ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(x7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, x7, x5);
__ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(x1, x7, x5);
__ Jump(x7);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ Ldr(w8, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
&gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, x1, x4, x5,
x13);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1520,33 +1451,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x3 : new target (preserved for callee)
// -- x1 : target function (preserved for callee)
// -----------------------------------
Register closure = x1;
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@ -1555,29 +1459,50 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = x1;
Register feedback_vector = x2;
Register index = x2;
// Do we have a valid feedback vector?
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
__ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Is optimized code available in the feedback vector?
Register entry = x7;
__ Ldr(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ Ldr(w8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
&gotta_call_runtime);
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, x4, x5, x13);
__ Jump(entry);
// We found no optimized code.
Register entry = x7;
Register temp = x5;
__ Bind(&try_shared);
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ Ldr(temp.W(),
FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAnySet(temp.W(),
SharedFunctionInfo::MarkedForTierUpBit::kMask,
&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(x5, masm->CodeObject());
__ Cmp(entry, x5);
__ Move(temp, masm->CodeObject());
__ Cmp(entry, temp);
__ B(eq, &gotta_call_runtime);
// Install the SFI's code entry.
@ -1590,6 +1515,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)

View File

@ -137,27 +137,23 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
}
{
// If the feedback vector has optimized code, check whether it is marked
// for deopt and, if so, clear the slot.
Label optimized_code_ok(this), clear_optimized_code(this);
// for deopt and, if so, clear it.
Label optimized_code_ok(this);
Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
Node* optimized_code_cell_slot =
Node* optimized_code_cell =
LoadFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex);
GotoIf(TaggedIsSmi(optimized_code_cell_slot), &optimized_code_ok);
Node* optimized_code =
LoadWeakCellValue(optimized_code_cell_slot, &clear_optimized_code);
LoadWeakCellValue(optimized_code_cell, &optimized_code_ok);
Node* code_flags = LoadObjectField(
optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
Node* marked_for_deopt =
DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
Branch(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok,
&clear_optimized_code);
GotoIf(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok);
// Cell is empty or code is marked for deopt, clear the optimized code slot.
BIND(&clear_optimized_code);
// Code is marked for deopt, clear the optimized code slot.
StoreFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex,
SmiConstant(Smi::kZero), SKIP_WRITE_BARRIER);
EmptyWeakCellConstant(), SKIP_WRITE_BARRIER);
Goto(&optimized_code_ok);
BIND(&optimized_code_ok);

View File

@ -149,7 +149,9 @@ namespace internal {
ASM(InterpreterOnStackReplacement) \
\
/* Code life-cycle */ \
ASM(CheckOptimizationMarker) \
ASM(CompileOptimized) \
ASM(CompileOptimizedConcurrent) \
ASM(InOptimizationQueue) \
ASM(InstantiateAsmJs) \
ASM(MarkCodeAsToBeExecutedOnce) \
ASM(MarkCodeAsExecutedOnce) \

View File

@ -92,6 +92,24 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ jmp(ebx);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -651,121 +669,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ push(return_pc);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ cmp(smi_entry, Immediate(Smi::FromEnum(marker)));
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee if needed, and caller)
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = edi;
Register optimized_code_entry = scratch;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is an optimization marker.
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ mov(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
__ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ push(eax);
__ push(edx);
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
__ pop(edx);
__ pop(eax);
__ jmp(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -783,20 +686,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = edi;
Register feedback_vector = ebx;
// Load the feedback vector from the closure.
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set
// up the frame (that is done below).
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
@ -804,6 +696,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = ecx;
__ mov(ebx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
__ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
__ mov(optimized_code_entry,
FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ mov(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
@ -823,10 +728,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ add(FieldOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
Immediate(Smi::FromInt(1)));
__ EmitLoadFeedbackVector(ecx);
__ add(
FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
Immediate(Smi::FromInt(1)));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -901,12 +807,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
__ push(ebx); // feedback_vector == ebx, so save it.
__ mov(ecx, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
__ mov(ebx, FieldOperand(ecx, DebugInfo::kFlagsOffset));
__ SmiUntag(ebx);
__ test(ebx, Immediate(DebugInfo::kHasBreakInfo));
__ pop(ebx);
__ j(zero, &bytecode_array_loaded);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(ecx, DebugInfo::kDebugBytecodeArrayOffset));
@ -926,6 +830,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
__ RecordWriteCodeEntryField(edi, ecx, ebx);
__ jmp(ecx);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ push(edx);
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, edi, edx,
eax, ebx);
__ pop(edx);
__ leave();
__ jmp(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
__ leave();
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1296,33 +1225,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
Register closure = edi;
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@ -1331,23 +1233,46 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = edi;
Register feedback_vector = ebx;
Register new_target = edx;
Register argument_count = eax;
// Do we have a valid feedback vector?
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
__ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
__ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Is optimized code available in the feedback vector?
Register entry = ecx;
__ mov(entry,
FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ test(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Code is good, get it into the closure and tail call.
__ push(argument_count);
__ push(new_target);
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, edx, eax, ebx);
__ pop(new_target);
__ pop(argument_count);
__ jmp(entry);
// We found no optimized code.
Register entry = ecx;
__ bind(&try_shared);
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ test(FieldOperand(entry, SharedFunctionInfo::kCompilerHintsOffset),
Immediate(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1362,9 +1287,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ jmp(entry);
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)

View File

@ -425,6 +425,22 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -986,115 +1002,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Addu(sp, sp, args_count);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ lw(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ Branch(&fallthrough, eq, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ Assert(
eq, kExpectedOptimizationSentinel, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&fallthrough, hs, sp, Operand(at));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ lw(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ lw(scratch2, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1113,27 +1020,29 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = a1;
Register feedback_vector = a2;
// Load the feedback vector from the closure.
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
__ PushStandardFrame(a1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = t0;
__ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(optimized_code_entry,
FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
@ -1149,15 +1058,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
__ lw(t0,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(t0, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ sw(t0, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1246,12 +1155,35 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ lw(t0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
__ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(t0, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, t0, t1);
__ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, t0, t1);
__ Jump(t0);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ lw(t1,
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
__ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
t2);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1485,34 +1417,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
Register closure = a1;
// Get the feedback vector.
Register feedback_vector = a2;
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@ -1521,23 +1425,41 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = a1;
Register feedback_vector = a2;
Register index = a2;
// Do we have a valid feedback vector?
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
__ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Is optimized code available in the feedback vector?
Register entry = t0;
__ lw(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ lw(t1, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
__ Jump(entry);
// We found no optimized code.
Register entry = t0;
__ bind(&try_shared);
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ lw(t1, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ And(t1, t1, Operand(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1554,6 +1476,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)

View File

@ -428,6 +428,22 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ LoadRoot(a4, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(a4));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -987,115 +1003,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Daddu(sp, sp, args_count);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ Ld(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ Branch(&fallthrough, eq, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ Assert(
eq, kExpectedOptimizationSentinel, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&fallthrough, hs, sp, Operand(t0));
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ Ld(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ Lw(a5, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -1114,27 +1021,29 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = a1;
Register feedback_vector = a2;
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
__ PushStandardFrame(a1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = a4;
__ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ Ld(optimized_code_entry,
FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
@ -1150,15 +1059,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
__ Ld(a4,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ Ld(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ Sd(a4,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Sd(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1247,12 +1156,35 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
__ Ld(a4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Sd(a4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, a4, a5);
__ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, a4, a5);
__ Jump(a4);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ Lw(a5,
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, a5,
t0);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@ -1487,34 +1419,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
Register closure = a1;
// Get the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@ -1523,23 +1427,41 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = a1;
Register feedback_vector = a2;
Register index = a2;
// Do we have a valid feedback vector?
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
__ Ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ld(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Is optimized code available in the feedback vector?
Register entry = a4;
__ Ld(entry, FieldMemOperand(
index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ Lw(a5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, a5, t0);
__ Jump(entry);
// We found no optimized code.
Register entry = a4;
__ bind(&try_shared);
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ Lwu(a5, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
__ And(a5, a5, Operand(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1556,6 +1478,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)

View File

@ -98,6 +98,22 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ jmp(rbx);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@ -732,117 +748,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ PushReturnAddressFrom(return_pc);
}
// Tail-call |function_id| if |smi_entry| == |marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry,
OptimizationMarker marker,
Runtime::FunctionId function_id) {
Label no_match;
__ SmiCompare(smi_entry, Smi::FromEnum(marker));
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee if needed, and caller)
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, rax, rdx, rdi, scratch1, scratch2,
scratch3));
Label optimized_code_slot_is_cell, fallthrough;
Register closure = rdi;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ movp(optimized_code_entry,
FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kNone));
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue.
if (FLAG_debug_code) {
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
__ Assert(equal, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
}
{
// Optimized code slot is a WeakCell.
__ bind(&optimized_code_slot_is_cell);
__ movp(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfSmi(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ testl(
FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ jmp(optimized_code_entry);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@ -860,17 +765,6 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register closure = rdi;
Register feedback_vector = rbx;
// Load the feedback vector from the closure.
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@ -881,10 +775,22 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = rcx;
__ movp(rbx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
__ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ movp(rbx,
FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ movp(optimized_code_entry, FieldOperand(rbx, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
__ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
@ -900,10 +806,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
__ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
__ SmiAddConstant(
FieldOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
Smi::FromInt(1));
// Check function data field is actually a BytecodeArray object.
@ -999,6 +906,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
__ RecordWriteCodeEntryField(rdi, rcx, r15);
__ jmp(rcx);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ leave();
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ testl(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, rdi, r14,
r15, rbx);
__ jmp(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(
@ -1270,33 +1199,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
Register closure = rdi;
// Get the feedback vector.
Register feedback_vector = rbx;
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@ -1305,23 +1207,40 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
Register closure = rdi;
Register feedback_vector = rbx;
// Do we have a valid feedback vector?
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
__ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Is optimized code available in the feedback vector?
Register entry = rcx;
__ movp(entry,
FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found code, check if it is marked for deopt, if so call into runtime to
// clear the optimized code slot.
__ testl(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Code is good, get it into the closure and tail call.
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r14, r15, rbx);
__ jmp(entry);
// We found no optimized code.
Register entry = rcx;
__ bind(&try_shared);
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ testl(FieldOperand(entry, SharedFunctionInfo::kCompilerHintsOffset),
Immediate(SharedFunctionInfo::MarkedForTierUpBit::kMask));
__ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@ -1339,6 +1258,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)

View File

@ -22,9 +22,6 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->info()->closure();
function->ReplaceCode(function->shared()->code());
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
// TODO(mvstanton): We can't call ensureliterals here due to allocation,
// but we probably shouldn't call ReplaceCode either, as this
// sometimes runs on the worker thread!
@ -199,7 +196,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
CompilationInfo* info = job->info();
Handle<JSFunction> function(*info->closure());
if (function->HasOptimizedCode()) {
if (function->IsOptimized()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Aborting compilation for ");
function->ShortPrint();

View File

@ -395,12 +395,12 @@ bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
return scope->asm_module();
}
bool UseCompilerDispatcher(ConcurrencyMode inner_function_mode,
bool UseCompilerDispatcher(Compiler::ConcurrencyMode inner_function_mode,
CompilerDispatcher* dispatcher,
DeclarationScope* scope,
Handle<SharedFunctionInfo> shared_info,
bool is_debug, bool will_serialize) {
return inner_function_mode == ConcurrencyMode::kConcurrent &&
return inner_function_mode == Compiler::CONCURRENT &&
dispatcher->IsEnabled() && !is_debug && !will_serialize &&
!UseAsmWasm(scope, shared_info, is_debug);
}
@ -550,8 +550,8 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
bool CompileUnoptimizedInnerFunctions(
Compiler::EagerInnerFunctionLiterals* literals,
ConcurrencyMode inner_function_mode, std::shared_ptr<Zone> parse_zone,
CompilationInfo* outer_info) {
Compiler::ConcurrencyMode inner_function_mode,
std::shared_ptr<Zone> parse_zone, CompilationInfo* outer_info) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileUnoptimizedInnerFunctions");
Isolate* isolate = outer_info->isolate();
@ -618,14 +618,14 @@ bool InnerFunctionIsAsmModule(
}
bool CompileUnoptimizedCode(CompilationInfo* info,
ConcurrencyMode inner_function_mode) {
Compiler::ConcurrencyMode inner_function_mode) {
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
Compiler::EagerInnerFunctionLiterals inner_literals;
{
base::Optional<CompilationHandleScope> compilation_handle_scope;
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
if (inner_function_mode == Compiler::CONCURRENT) {
compilation_handle_scope.emplace(info);
}
if (!Compiler::Analyze(info, &inner_literals)) {
@ -639,11 +639,11 @@ bool CompileUnoptimizedCode(CompilationInfo* info,
// builder doesn't do parsing when visiting function declarations.
if (info->scope()->IsAsmModule() ||
InnerFunctionIsAsmModule(&inner_literals)) {
inner_function_mode = ConcurrencyMode::kNotConcurrent;
inner_function_mode = Compiler::NOT_CONCURRENT;
}
std::shared_ptr<Zone> parse_zone;
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
if (inner_function_mode == Compiler::CONCURRENT) {
// Seal the parse zone so that it can be shared by parallel inner function
// compilation jobs.
DCHECK_NE(info->parse_info()->zone(), info->zone());
@ -680,7 +680,7 @@ void EnsureSharedFunctionInfosArrayOnScript(CompilationInfo* info) {
}
MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
CompilationInfo* info, ConcurrencyMode inner_function_mode) {
CompilationInfo* info, Compiler::ConcurrencyMode inner_function_mode) {
RuntimeCallTimerScope runtimeTimer(
info->isolate(), &RuntimeCallStats::CompileGetUnoptimizedCode);
VMState<COMPILER> state(info->isolate());
@ -688,13 +688,12 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
// Parse and update ParseInfo with the results.
{
if (!parsing::ParseAny(
info->parse_info(), info->isolate(),
inner_function_mode != ConcurrencyMode::kConcurrent)) {
if (!parsing::ParseAny(info->parse_info(), info->isolate(),
inner_function_mode != Compiler::CONCURRENT)) {
return MaybeHandle<Code>();
}
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
if (inner_function_mode == Compiler::CONCURRENT) {
ParseHandleScope parse_handles(info->parse_info(), info->isolate());
info->parse_info()->ReopenHandlesInNewHandleScope();
info->parse_info()->ast_value_factory()->Internalize(info->isolate());
@ -746,27 +745,13 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
return MaybeHandle<Code>();
}
void ClearOptimizedCodeCache(CompilationInfo* info) {
Handle<JSFunction> function = info->closure();
if (info->osr_ast_id().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
vector->ClearOptimizedCode();
}
}
void InsertCodeIntoOptimizedCodeCache(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
// Function context specialization folds-in the function context,
// so no sharing can occur.
if (info->is_function_context_specializing()) {
// Native context specialized code is not shared, so make sure the optimized
// code cache is clear.
ClearOptimizedCodeCache(info);
return;
}
if (info->is_function_context_specializing()) return;
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
@ -825,6 +810,16 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
if (FLAG_mark_optimizing_shared_functions &&
info->closure()->shared()->has_concurrent_optimization_job()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation job already running for ");
info->shared_info()->ShortPrint();
PrintF(".\n");
}
return false;
}
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@ -859,6 +854,7 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
info->closure()->shared()->set_has_concurrent_optimization_job(true);
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
@ -869,7 +865,7 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
}
MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
ConcurrencyMode mode,
Compiler::ConcurrencyMode mode,
BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr) {
Isolate* isolate = function->GetIsolate();
@ -879,12 +875,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
// Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize.
if (function->HasOptimizationMarker()) {
function->ClearOptimizationMarker();
}
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_ast_id)
.ToHandle(&cached_code)) {
@ -968,7 +958,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// allocated in a deferred handle scope that is detached and handed off to
// the background thread when we return.
base::Optional<CompilationHandleScope> compilation;
if (mode == ConcurrencyMode::kConcurrent) {
if (mode == Compiler::CONCURRENT) {
compilation.emplace(info);
}
@ -979,17 +969,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
info->ReopenHandlesInNewHandleScope();
parse_info->ReopenHandlesInNewHandleScope();
if (mode == ConcurrencyMode::kConcurrent) {
if (mode == Compiler::CONCURRENT) {
if (GetOptimizedCodeLater(job.get())) {
job.release(); // The background recompile job owns this now.
// Set the optimization marker and return a code object which checks it.
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
if (function->IsInterpreted()) {
return isolate->builtins()->InterpreterEntryTrampoline();
} else {
return isolate->builtins()->CheckOptimizationMarker();
}
return isolate->builtins()->InOptimizationQueue();
}
} else {
if (GetOptimizedCodeNow(job.get())) return info->code();
@ -999,6 +982,13 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
MaybeHandle<Code> GetOptimizedCodeMaybeLater(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
return GetOptimizedCode(function, isolate->concurrent_recompilation_enabled()
? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT);
}
CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
@ -1014,6 +1004,11 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
// Reset profiler ticks, function is no longer considered hot.
shared->set_profiler_ticks(0);
shared->set_has_concurrent_optimization_job(false);
// Shared function no longer needs to be tiered up.
shared->set_marked_for_tier_up(false);
DCHECK(!shared->HasBreakInfo());
// 1) Optimization on the concurrent thread may have failed.
@ -1047,10 +1042,6 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
}
info->closure()->ReplaceCode(shared->code());
// Clear the InOptimizationQueue marker, if it exists.
if (info->closure()->IsInOptimizationQueue()) {
info->closure()->ClearOptimizationMarker();
}
return CompilationJob::FAILED;
}
@ -1065,11 +1056,8 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
if (function->shared()->is_compiled()) {
// Function has already been compiled. Normally we'd expect the CompileLazy
// builtin to catch cases where we already have compiled code or optimized
// code, but there are paths that call the CompileLazy runtime function
// directly (e.g. failed asm.js compilations), so we include a check for
// those.
// Function has already been compiled, get the optimized code if possible,
// otherwise return baseline code.
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, BailoutId::None())
.ToHandle(&cached_code)) {
@ -1078,10 +1066,26 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
function->ShortPrint();
PrintF(" during unoptimized compile]\n");
}
DCHECK(function->shared()->is_compiled());
return cached_code;
}
// TODO(leszeks): Either handle optimization markers here, or DCHECK that
// there aren't any.
if (function->shared()->marked_for_tier_up()) {
DCHECK(FLAG_mark_shared_functions_for_tier_up);
function->shared()->set_marked_for_tier_up(false);
if (FLAG_trace_opt) {
PrintF("[optimizing method ");
function->ShortPrint();
PrintF(" eagerly (shared function marked for tier up)]\n");
}
Handle<Code> code;
if (GetOptimizedCodeMaybeLater(function).ToHandle(&code)) {
return code;
}
}
return Handle<Code>(function->shared()->code());
} else {
@ -1099,21 +1103,16 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
script->preparsed_scope_data());
}
}
ConcurrencyMode inner_function_mode = FLAG_compiler_dispatcher_eager_inner
? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kNotConcurrent;
Compiler::ConcurrencyMode inner_function_mode =
FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT;
Handle<Code> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, GetUnoptimizedCode(&info, inner_function_mode), Code);
if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
PrintF(" because --always-opt]\n");
}
Handle<Code> opt_code;
if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent)
if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
.ToHandle(&opt_code)) {
result = opt_code;
}
@ -1131,9 +1130,9 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
ConcurrencyMode inner_function_mode = FLAG_compiler_dispatcher_eager_inner
? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kNotConcurrent;
Compiler::ConcurrencyMode inner_function_mode =
FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT;
RuntimeCallTimerScope runtimeTimer(
isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
@ -1145,13 +1144,12 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
{ VMState<COMPILER> state(info->isolate());
if (parse_info->literal() == nullptr) {
if (!parsing::ParseProgram(
parse_info, info->isolate(),
inner_function_mode != ConcurrencyMode::kConcurrent)) {
if (!parsing::ParseProgram(parse_info, info->isolate(),
inner_function_mode != Compiler::CONCURRENT)) {
return Handle<SharedFunctionInfo>::null();
}
if (inner_function_mode == ConcurrencyMode::kConcurrent) {
if (inner_function_mode == Compiler::CONCURRENT) {
ParseHandleScope parse_handles(parse_info, info->isolate());
parse_info->ReopenHandlesInNewHandleScope();
parse_info->ast_value_factory()->Internalize(info->isolate());
@ -1308,12 +1306,6 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared()->is_compiled());
DCHECK(function->is_compiled());
DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->IsInOptimizationQueue());
DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->ChecksOptimizationMarker());
DCHECK_IMPLIES(function->IsInOptimizationQueue(),
mode == ConcurrencyMode::kConcurrent);
return true;
}
@ -1326,7 +1318,7 @@ bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
CompilationInfo info(parse_info.zone(), &parse_info, isolate,
Handle<JSFunction>::null());
info.MarkAsDebug();
if (GetUnoptimizedCode(&info, ConcurrencyMode::kNotConcurrent).is_null()) {
if (GetUnoptimizedCode(&info, Compiler::NOT_CONCURRENT).is_null()) {
isolate->clear_pending_exception();
return false;
}
@ -1379,8 +1371,7 @@ bool Compiler::EnsureBytecode(CompilationInfo* info) {
CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
if (dispatcher->IsEnqueued(info->shared_info())) {
if (!dispatcher->FinishNow(info->shared_info())) return false;
} else if (GetUnoptimizedCode(info, ConcurrencyMode::kNotConcurrent)
.is_null()) {
} else if (GetUnoptimizedCode(info, Compiler::NOT_CONCURRENT).is_null()) {
return false;
}
}
@ -1864,8 +1855,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_ast_id.IsNone());
DCHECK_NOT_NULL(osr_frame);
return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent, osr_ast_id,
osr_frame);
return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
}
CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
@ -1899,15 +1889,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!function->shared()->HasAsmWasmData() &&
function->shared()->is_compiled()) {
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
if (!function->IsOptimized()) {
// Only mark for optimization if we don't already have optimized code.
if (!function->HasOptimizedCode()) {
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
}
function->MarkForOptimization();
}
if (shared->is_compiled()) {

View File

@ -40,6 +40,7 @@ class ThreadedListZoneEntry;
class V8_EXPORT_PRIVATE Compiler : public AllStatic {
public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// ===========================================================================
// The following family of methods ensures a given function is compiled. The

View File

@ -524,8 +524,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
if (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_->deopt_already_counted()) {
if (function != nullptr && function->IsOptimized() &&
(compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_->deopt_already_counted())) {
// If the function is optimized, and we haven't counted that deopt yet, then
// increment the function's deopt count so that we can avoid optimising
// functions that deopt too often.
@ -534,7 +535,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
} else if (function != nullptr) {
} else {
function->shared()->increment_deopt_count();
}
}

View File

@ -96,10 +96,9 @@ class V8_EXPORT_PRIVATE StackGuard final {
V(API_INTERRUPT, ApiInterrupt, 4) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
inline bool CheckAndClear##Name() { return CheckAndClearInterrupt(NAME); } \
inline void Request##Name() { RequestInterrupt(NAME); } \
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
inline void Request##Name() { RequestInterrupt(NAME); } \
inline void Clear##Name() { ClearInterrupt(NAME); }
INTERRUPT_LIST(V)
#undef V

View File

@ -114,30 +114,13 @@ void FeedbackVector::clear_invocation_count() {
set(kInvocationCountIndex, Smi::kZero);
}
Object* FeedbackVector::optimized_code_cell() const {
return get(kOptimizedCodeIndex);
}
Code* FeedbackVector::optimized_code() const {
Object* slot = optimized_code_cell();
if (slot->IsSmi()) return nullptr;
WeakCell* cell = WeakCell::cast(slot);
WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
return cell->cleared() ? nullptr : Code::cast(cell->value());
}
OptimizationMarker FeedbackVector::optimization_marker() const {
Object* slot = optimized_code_cell();
if (!slot->IsSmi()) return OptimizationMarker::kNone;
Smi* value = Smi::cast(slot);
return static_cast<OptimizationMarker>(value->value());
}
bool FeedbackVector::has_optimized_code() const {
return optimized_code() != nullptr;
}
bool FeedbackVector::has_optimization_marker() const {
return optimization_marker() != OptimizationMarker::kNone;
return !WeakCell::cast(get(kOptimizedCodeIndex))->cleared();
}
// Conversion from an integer index to either a slot or an ic slot.

View File

@ -202,7 +202,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set_map_no_write_barrier(isolate->heap()->feedback_vector_map());
array->set(kSharedFunctionInfoIndex, *shared);
array->set(kOptimizedCodeIndex, Smi::FromEnum(OptimizationMarker::kNone));
array->set(kOptimizedCodeIndex, *factory->empty_weak_cell());
array->set(kInvocationCountIndex, Smi::kZero);
// Ensure we can skip the write barrier
@ -305,38 +305,28 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
vector->set(kOptimizedCodeIndex, *cell);
}
void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
set(kOptimizedCodeIndex, Smi::FromEnum(marker));
}
void FeedbackVector::ClearOptimizedCode() {
set(kOptimizedCodeIndex, Smi::FromEnum(OptimizationMarker::kNone));
set(kOptimizedCodeIndex, GetIsolate()->heap()->empty_weak_cell());
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo* shared, const char* reason) {
Object* slot = get(kOptimizedCodeIndex);
if (slot->IsSmi()) return;
WeakCell* cell = WeakCell::cast(slot);
if (cell->cleared()) {
ClearOptimizedCode();
return;
}
Code* code = Code::cast(cell->value());
if (code->marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
reason);
shared->ShortPrint();
PrintF("]\n");
WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
if (!cell->cleared()) {
Code* code = Code::cast(cell->value());
if (code->marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
reason);
shared->ShortPrint();
PrintF("]\n");
}
if (!code->deopt_already_counted()) {
shared->increment_deopt_count();
code->set_deopt_already_counted(true);
}
ClearOptimizedCode();
}
if (!code->deopt_already_counted()) {
shared->increment_deopt_count();
code->set_deopt_already_counted(true);
}
ClearOptimizedCode();
}
}

View File

@ -325,17 +325,13 @@ class FeedbackVector : public FixedArray {
inline int invocation_count() const;
inline void clear_invocation_count();
inline Object* optimized_code_cell() const;
inline Code* optimized_code() const;
inline OptimizationMarker optimization_marker() const;
inline bool has_optimized_code() const;
inline bool has_optimization_marker() const;
void ClearOptimizedCode();
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo* shared,
const char* reason);
static void SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code);
void SetOptimizationMarker(OptimizationMarker marker);
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) {

View File

@ -270,6 +270,10 @@ DEFINE_IMPLICATION(future, turbo)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
"mark shared functions for tier up")
DEFINE_BOOL(mark_optimizing_shared_functions, true,
"mark shared functions if they are concurrently optimizing")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")

View File

@ -1410,31 +1410,6 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
enum class OptimizationMarker {
kNone,
kCompileOptimized,
kCompileOptimizedConcurrent,
kInOptimizationQueue
};
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileOptimized:
return os << "OptimizationMarker::kCompileOptimized";
case OptimizationMarker::kCompileOptimizedConcurrent:
return os << "OptimizationMarker::kCompileOptimizedConcurrent";
case OptimizationMarker::kInOptimizationQueue:
return os << "OptimizationMarker::kInOptimizationQueue";
}
UNREACHABLE();
return os;
}
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
} // namespace internal
} // namespace v8

View File

@ -3896,13 +3896,6 @@ inline bool Code::is_interpreter_trampoline_builtin() {
this == *builtins->InterpreterEnterBytecodeDispatch();
}
inline bool Code::checks_optimization_marker() {
Builtins* builtins = GetIsolate()->builtins();
return this == *builtins->CompileLazy() ||
this == *builtins->InterpreterEntryTrampoline() ||
this == *builtins->CheckOptimizationMarker();
}
inline bool Code::has_unwinding_info() const {
return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
@ -4834,45 +4827,25 @@ bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
bool JSFunction::HasOptimizedCode() {
return IsOptimized() ||
(has_feedback_vector() && feedback_vector()->has_optimized_code());
}
bool JSFunction::HasOptimizationMarker() {
return has_feedback_vector() && feedback_vector()->has_optimization_marker();
}
void JSFunction::ClearOptimizationMarker() {
DCHECK(has_feedback_vector());
DCHECK(!feedback_vector()->has_optimized_code());
feedback_vector()->SetOptimizationMarker(OptimizationMarker::kNone);
}
bool JSFunction::IsInterpreted() {
return code()->is_interpreter_trampoline_builtin();
}
bool JSFunction::ChecksOptimizationMarker() {
return code()->checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
return has_feedback_vector() && feedback_vector()->optimization_marker() ==
OptimizationMarker::kCompileOptimized;
return code() == GetIsolate()->builtins()->builtin(
Builtins::kCompileOptimized);
}
bool JSFunction::IsMarkedForConcurrentOptimization() {
return has_feedback_vector() &&
feedback_vector()->optimization_marker() ==
OptimizationMarker::kCompileOptimizedConcurrent;
return code() == GetIsolate()->builtins()->builtin(
Builtins::kCompileOptimizedConcurrent);
}
bool JSFunction::IsInOptimizationQueue() {
return has_feedback_vector() && feedback_vector()->optimization_marker() ==
OptimizationMarker::kInOptimizationQueue;
return code() == GetIsolate()->builtins()->builtin(
Builtins::kInOptimizationQueue);
}
@ -4933,32 +4906,29 @@ void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
reason);
ShortPrint();
shared()->ShortPrint();
PrintF("]\n");
}
feedback_vector()->ClearOptimizedCode();
}
}
void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
DCHECK(has_feedback_vector());
DCHECK(ChecksOptimizationMarker());
DCHECK(!HasOptimizedCode());
feedback_vector()->SetOptimizationMarker(marker);
}
void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = this->code()->kind() == Code::OPTIMIZED_FUNCTION;
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
if (was_optimized && is_optimized) {
ClearOptimizedCodeSlot("Replacing with another optimized code");
}
set_code(code);
// Add/remove the function from the list of optimized functions for this
// context based on the state change.
if (!was_optimized && is_optimized) {
context()->native_context()->AddOptimizedFunction(this);
} else if (was_optimized && !is_optimized) {
}
if (was_optimized && !is_optimized) {
// TODO(titzer): linear in the number of optimized functions; fix!
context()->native_context()->RemoveOptimizedFunction(this);
}
@ -5053,7 +5023,9 @@ Object* JSFunction::prototype() {
bool JSFunction::is_compiled() {
Builtins* builtins = GetIsolate()->builtins();
return code() != builtins->builtin(Builtins::kCompileLazy);
return code() != builtins->builtin(Builtins::kCompileLazy) &&
code() != builtins->builtin(Builtins::kCompileOptimized) &&
code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
}
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)

View File

@ -11997,43 +11997,46 @@ bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
return false;
}
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
if (FLAG_mark_shared_functions_for_tier_up) {
shared()->set_marked_for_tier_up(true);
}
}
void JSFunction::AttemptConcurrentOptimization() {
Isolate* isolate = GetIsolate();
if (!isolate->concurrent_recompilation_enabled() ||
isolate->bootstrapper()->IsActive()) {
mode = ConcurrencyMode::kNotConcurrent;
MarkForOptimization();
return;
}
DCHECK(!IsInOptimizationQueue());
DCHECK(!IsOptimized());
DCHECK(!HasOptimizedCode());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
if (mode == ConcurrencyMode::kConcurrent) {
if (IsInOptimizationQueue()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Not marking ");
ShortPrint();
PrintF(" -- already in optimization queue.\n");
}
return;
}
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
DCHECK(isolate->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
if (!IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
// No write barrier required, since the builtin is part of the root set.
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
// No write barrier required, since the builtin is part of the root set.
if (FLAG_mark_shared_functions_for_tier_up) {
// TODO(leszeks): The compilation isn't concurrent if we trigger it using
// this bit.
shared()->set_marked_for_tier_up(true);
}
SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileOptimizedConcurrent
: OptimizationMarker::kCompileOptimized);
}
// static

View File

@ -1588,13 +1588,6 @@ class Smi: public Object {
return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
}
template <typename E,
typename = typename std::enable_if<std::is_enum<E>::value>::type>
static inline Smi* FromEnum(E value) {
STATIC_ASSERT(sizeof(E) <= sizeof(int));
return FromInt(static_cast<int>(value));
}
// Returns whether value can be represented in a Smi.
static inline bool IsValid(intptr_t value) {
bool result = Internals::IsValidSmi(value);
@ -3714,10 +3707,6 @@ class Code: public HeapObject {
// Testers for interpreter builtins.
inline bool is_interpreter_trampoline_builtin();
// Tells whether the code checks the optimization marker in the function's
// feedback vector.
inline bool checks_optimization_marker();
// [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
// object was generated by either the hydrogen or the TurboFan optimizing
// compiler (but it may not be an optimized function).
@ -5069,27 +5058,13 @@ class JSFunction: public JSObject {
// optimized.
inline bool IsInterpreted();
// Tells whether or not this function checks its optimization marker in its
// feedback vector.
inline bool ChecksOptimizationMarker();
// Tells whether or not this function holds optimized code.
//
// Note: Returning false does not necessarily mean that this function hasn't
// been optimized, as it may have optimized code on its feedback vector.
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
// Tells whether or not this function has optimized code available to it,
// either because it is optimized or because it has optimized code in its
// feedback vector.
inline bool HasOptimizedCode();
// Tells whether or not this function has a (non-zero) optimization marker.
inline bool HasOptimizationMarker();
// Mark this function for lazy recompilation. The function will be recompiled
// the next time it is executed.
void MarkForOptimization(ConcurrencyMode mode);
void MarkForOptimization();
void AttemptConcurrentOptimization();
// Tells whether or not the function is already marked for lazy recompilation.
inline bool IsMarkedForOptimization();
@ -5101,12 +5076,6 @@ class JSFunction: public JSObject {
// Clears the optimized code slot in the function's feedback vector.
inline void ClearOptimizedCodeSlot(const char* reason);
// Sets the optimization marker in the function's feedback vector.
inline void SetOptimizationMarker(OptimizationMarker marker);
// Clears the optimization marker in the function's feedback vector.
inline void ClearOptimizationMarker();
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();

View File

@ -86,6 +86,11 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function,
SharedFunctionInfo::IsAsmFunctionBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
SharedFunctionInfo::IsDeclarationBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
SharedFunctionInfo::MarkedForTierUpBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
has_concurrent_optimization_job,
SharedFunctionInfo::HasConcurrentOptimizationJobBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
SharedFunctionInfo::NeedsHomeObjectBit)
@ -224,7 +229,8 @@ ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
bool SharedFunctionInfo::is_compiled() const {
Builtins* builtins = GetIsolate()->builtins();
DCHECK(code() != builtins->builtin(Builtins::kCheckOptimizationMarker));
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimized));
return code() != builtins->builtin(Builtins::kCompileLazy);
}

View File

@ -305,6 +305,12 @@ class SharedFunctionInfo : public HeapObject {
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
// Whether this function was marked to be tiered up.
DECL_BOOLEAN_ACCESSORS(marked_for_tier_up)
// Whether this function has a concurrent compilation job running.
DECL_BOOLEAN_ACCESSORS(has_concurrent_optimization_job)
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
@ -495,22 +501,24 @@ class SharedFunctionInfo : public HeapObject {
#undef START_POSITION_AND_TYPE_BIT_FIELDS
// Bit positions in |compiler_hints|.
#define COMPILER_HINTS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(FunctionKindBits, FunctionKind, 10, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(OptimizationDisabledBit, bool, 1, _) \
V(UsesArgumentsBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
V(ForceInlineBit, bool, 1, _) \
V(IsAsmFunctionBit, bool, 1, _) \
V(MustUseIgnitionTurboBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 4, _) \
/* Bits 26-31 are unused. */
#define COMPILER_HINTS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(FunctionKindBits, FunctionKind, 10, _) \
V(MarkedForTierUpBit, bool, 1, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(OptimizationDisabledBit, bool, 1, _) \
V(UsesArgumentsBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
V(ForceInlineBit, bool, 1, _) \
V(IsAsmFunctionBit, bool, 1, _) \
V(MustUseIgnitionTurboBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(HasConcurrentOptimizationJobBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 4, _) \
/* Bits 28-31 are unused. */
DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
#undef COMPILER_HINTS_BIT_FIELDS

View File

@ -150,7 +150,7 @@ void RuntimeProfiler::Optimize(JSFunction* function,
OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
function->MarkForOptimization(ConcurrencyMode::kConcurrent);
function->AttemptConcurrentOptimization();
}
void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
@ -218,7 +218,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
} else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->HasOptimizedCode())) {
function->IsOptimized())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
int ticks = shared->profiler_ticks();
@ -343,7 +343,7 @@ bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->HasOptimizedCode())) {
function->IsOptimized())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =

View File

@ -51,7 +51,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
if (!Compiler::CompileOptimized(function, ConcurrencyMode::kConcurrent)) {
if (!Compiler::CompileOptimized(function, Compiler::CONCURRENT)) {
return isolate->heap()->exception();
}
DCHECK(function->is_compiled());
@ -67,7 +67,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
if (!Compiler::CompileOptimized(function, ConcurrencyMode::kNotConcurrent)) {
if (!Compiler::CompileOptimized(function, Compiler::NOT_CONCURRENT)) {
return isolate->heap()->exception();
}
DCHECK(function->is_compiled());
@ -79,8 +79,7 @@ RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DCHECK(function->shared()->is_compiled());
DCHECK(function->is_compiled());
function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "Runtime_EvictOptimizedCodeSlot");
return function->code();
@ -365,8 +364,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
function->PrintName();
PrintF(" for non-concurrent optimization]\n");
}
function->SetOptimizationMarker(
OptimizationMarker::kCompileOptimized);
function->ReplaceCode(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
}
} else {
// Crankshafted OSR code can be installed into the function.
@ -402,11 +401,7 @@ RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
return isolate->StackOverflow();
}
// Only try to install optimized functions if the interrupt was InstallCode.
if (isolate->stack_guard()->CheckAndClearInstallCode()) {
isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
}
isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
return (function->IsOptimized()) ? function->code()
: function->shared()->code();
}

View File

@ -122,6 +122,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
function->shared()->set_marked_for_tier_up(false);
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
@ -233,41 +234,22 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
// If the function is already optimized, just return.
if (function->IsOptimized()) {
return isolate->heap()->undefined_value();
if (function->IsOptimized()) return isolate->heap()->undefined_value();
function->MarkForOptimization();
if (FLAG_trace_opt) {
PrintF("[manually marking ");
function->ShortPrint();
PrintF(" for optimization]\n");
}
// If the function has optimized code, ensure that we check for it and return.
if (function->HasOptimizedCode()) {
if (!function->IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
function->ReplaceCode(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
DCHECK(function->ChecksOptimizationMarker());
return isolate->heap()->undefined_value();
}
ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
concurrency_mode = ConcurrencyMode::kConcurrent;
function->AttemptConcurrentOptimization();
}
}
if (FLAG_trace_opt) {
PrintF("[manually marking ");
function->ShortPrint();
PrintF(" for %s optimization]\n",
concurrency_mode == ConcurrencyMode::kConcurrent ? "concurrent"
: "non-concurrent");
}
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
function->MarkForOptimization(concurrency_mode);
return isolate->heap()->undefined_value();
}

View File

@ -26,7 +26,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -67,7 +67,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
/* 40 S> */ B(LdaUndefined),
@ -357,7 +357,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -398,7 +398,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
/* 40 S> */ B(LdaUndefined),
@ -705,7 +705,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -746,7 +746,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
/* 40 S> */ B(LdaUndefined),

View File

@ -653,7 +653,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -838,7 +838,7 @@ bytecodes: [
B(RestoreGeneratorState), R(10),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
B(LdaSmi), I8(-2),
@ -887,7 +887,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 35 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kAbort), R(17), U8(1),
/* 30 S> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
@ -1272,7 +1272,7 @@ bytecodes: [
B(RestoreGeneratorState), R(11),
B(Star), R(12),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(-2),
@ -1310,7 +1310,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 40 E> */ B(TestEqualStrictNoFeedback), R(12),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(22),
B(CallRuntime), U16(Runtime::kAbort), R(22), U8(1),
/* 35 S> */ B(LdaNamedProperty), R(5), U8(3), U8(9),

View File

@ -23,7 +23,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), I8(-2),
@ -82,7 +82,7 @@ bytecodes: [
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), I8(-2),
@ -165,7 +165,7 @@ bytecodes: [
B(RestoreGeneratorState), R(10),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
B(LdaSmi), I8(-2),
@ -210,7 +210,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 30 E> */ B(TestEqualStrictNoFeedback), R(11),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(16),
B(CallRuntime), U16(Runtime::kAbort), R(16), U8(1),
/* 25 S> */ B(LdaNamedProperty), R(4), U8(7), U8(10),
@ -383,7 +383,7 @@ bytecodes: [
B(RestoreGeneratorState), R(9),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
B(LdaSmi), I8(-2),
@ -432,7 +432,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(10),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
B(StackCheck),

View File

@ -23,7 +23,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -90,7 +90,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -159,7 +159,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -249,7 +249,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -337,7 +337,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -427,7 +427,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -515,7 +515,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -587,7 +587,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -671,7 +671,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -738,7 +738,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
@ -806,7 +806,7 @@ bytecodes: [
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),

View File

@ -283,7 +283,7 @@ bytecodes: [
B(RestoreGeneratorState), R(7),
B(Star), R(8),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(9),
B(CallRuntime), U16(Runtime::kAbort), R(9), U8(1),
B(LdaSmi), I8(-2),
@ -382,7 +382,7 @@ bytecodes: [
B(RestoreGeneratorState), R(6),
B(Star), R(7),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(8),
B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
B(LdaSmi), I8(-2),
@ -420,7 +420,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 54 E> */ B(TestEqualStrictNoFeedback), R(7),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(8),
B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
B(StackCheck),
@ -446,7 +446,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(7),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(8),
B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
B(LdaSmi), I8(1),
@ -645,7 +645,7 @@ bytecodes: [
B(RestoreGeneratorState), R(7),
B(Star), R(8),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(9),
B(CallRuntime), U16(Runtime::kAbort), R(9), U8(1),
B(LdaSmi), I8(-2),
@ -672,7 +672,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
/* 59 E> */ B(TestEqualStrictNoFeedback), R(8),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(StackCheck),
@ -698,7 +698,7 @@ bytecodes: [
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(8),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(81),
B(LdaSmi), I8(79),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
B(LdaSmi), I8(1),