Revert "[builtins] Remove CheckOptimizationMarker builtin"

This reverts commit daa224d4d0.

Reason for revert: https://bugs.chromium.org/p/chromium/issues/detail?id=819873

Original change's description:
> [builtins] Remove CheckOptimizationMarker builtin
> 
> This was a shim for the non-I+TF codepath, which is now the only
> codepath (that still uses this tier-up mechanism anyway). There were a
> couple of places we were accidentally using it due to CompileLazy or
> deopts, so this also fixes those.
> 
> Change-Id: I00a7fdf9fb5cf74844138dac62d01ceaaf192e17
> Reviewed-on: https://chromium-review.googlesource.com/951490
> Commit-Queue: Michael Achenbach <machenbach@chromium.org>
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#51786}

TBR=rmcilroy@chromium.org,machenbach@chromium.org,leszeks@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Change-Id: I80765eb18aaf5086e6db5d5df96f608a317c999f
Reviewed-on: https://chromium-review.googlesource.com/957022
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51829}
This commit is contained in:
Leszek Swirski 2018-03-09 09:06:50 +00:00 committed by Commit Bot
parent 481aa56d87
commit 37c362638b
16 changed files with 275 additions and 18 deletions

View File

@ -1261,6 +1261,37 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
// -- r3 : new target (preserved for callee)
// -- r1 : target function (preserved for callee)
// -----------------------------------
Register closure = r1;
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -1381,6 +1381,37 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x3 : new target (preserved for callee)
// -- x1 : target function (preserved for callee)
// -----------------------------------
Register closure = x1;
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -123,6 +123,7 @@ namespace internal {
/* Code life-cycle */ \
ASM(CompileLazy) \
ASM(CompileLazyDeoptimizedCode) \
ASM(CheckOptimizationMarker) \
ASM(DeserializeLazy) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \

View File

@ -253,6 +253,7 @@ bool Builtins::IsLazy(int index) {
case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDebugBreakTrampoline:
case kDeserializeLazy:

View File

@ -1308,6 +1308,37 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
Register closure = edi;
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -1266,6 +1266,38 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
Register closure = a1;
// Get the feedback vector.
Register feedback_vector = a2;
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
__ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -1264,6 +1264,38 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
// -- a3 : new target (preserved for callee)
// -- a1 : target function (preserved for callee)
// -----------------------------------
Register closure = a1;
// Get the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -1297,6 +1297,38 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
// -- r6 : new target (preserved for callee)
// -- r4 : target function (preserved for callee)
// -----------------------------------
Register closure = r4;
// Get the feedback vector.
Register feedback_vector = r5;
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r5);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -1295,6 +1295,38 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
// -- r6 : new target (preserved for callee)
// -- r4 : target function (preserved for callee)
// -----------------------------------
Register closure = r3;
// Get the feedback vector.
Register feedback_vector = r4;
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r4);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to the trampoline to the
// interpreter entry.

View File

@ -1280,6 +1280,37 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
Register closure = rdi;
// Get the feedback vector.
Register feedback_vector = rbx;
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
__ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
__ jmp(rcx);
}
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
// builtin does not set the code field in the JS function. If there isn't then
// we do not need this builtin and can jump directly to CompileLazy.

View File

@ -703,10 +703,11 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Set the optimization marker and return a code object which checks it.
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
DCHECK(function->IsInterpreted() ||
(!function->is_compiled() && function->shared()->IsInterpreted()));
DCHECK(function->shared()->HasBytecodeArray());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
if (function->IsInterpreted()) {
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
} else {
return BUILTIN_CODE(isolate, CheckOptimizationMarker);
}
}
} else {
if (GetOptimizedCodeNow(job.get(), isolate))

View File

@ -2551,12 +2551,8 @@ void JSFunction::ClearOptimizationMarker() {
feedback_vector()->ClearOptimizationMarker();
}
// Optimized code marked for deoptimization will tier back down to running
// interpreted on its next activation, and already doesn't count as IsOptimized.
bool JSFunction::IsInterpreted() {
return code()->is_interpreter_trampoline_builtin() ||
(code()->kind() == Code::OPTIMIZED_FUNCTION &&
code()->marked_for_deoptimization());
return code()->is_interpreter_trampoline_builtin();
}
bool JSFunction::ChecksOptimizationMarker() {

View File

@ -12166,7 +12166,6 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
mode = ConcurrencyMode::kNotConcurrent;
}
DCHECK(IsInterpreted());
DCHECK(!IsOptimized());
DCHECK(!HasOptimizedCode());
DCHECK(shared()->allows_lazy_compilation() ||
@ -12188,6 +12187,12 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
}
if (!IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
// No write barrier required, since the builtin is part of the root set.
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileOptimizedConcurrent
: OptimizationMarker::kCompileOptimized);

View File

@ -332,7 +332,8 @@ inline bool Code::checks_optimization_marker() const {
Builtins* builtins = GetIsolate()->builtins();
bool checks_marker =
(this == builtins->builtin(Builtins::kCompileLazy) ||
this == builtins->builtin(Builtins::kInterpreterEntryTrampoline));
this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
this == builtins->builtin(Builtins::kCheckOptimizationMarker));
DCHECK_IMPLIES(checks_marker, !Builtins::IsLazy(builtin_index()));
return checks_marker ||
(kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());

View File

@ -234,6 +234,7 @@ ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
bool SharedFunctionInfo::is_compiled() const {
Builtins* builtins = GetIsolate()->builtins();
DCHECK(code() != builtins->builtin(Builtins::kCheckOptimizationMarker));
return code() != builtins->builtin(Builtins::kCompileLazy);
}

View File

@ -211,6 +211,11 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// If the function has optimized code, ensure that we check for it and return.
if (function->HasOptimizedCode()) {
if (!function->IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
function->set_code(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
DCHECK(function->ChecksOptimizationMarker());
return isolate->heap()->undefined_value();
}
@ -231,14 +236,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
: "non-concurrent");
}
// This function may not have been lazily compiled yet, even though its shared
// function has.
if (!function->is_compiled()) {
DCHECK(function->shared()->IsInterpreted());
function->set_code(function->shared()->code());
}
JSFunction::EnsureFeedbackVector(function);
function->MarkForOptimization(concurrency_mode);
return isolate->heap()->undefined_value();