diff --git a/src/common/globals.h b/src/common/globals.h index 1d833173b5..c507283d68 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -1833,6 +1833,15 @@ static constexpr uint32_t kNoneOrInProgressMask = 0b110; TIERING_STATE_LIST(V) #undef V +constexpr bool IsRequestMaglev(TieringState state) { + return IsRequestMaglev_Concurrent(state) || + IsRequestMaglev_Synchronous(state); +} +constexpr bool IsRequestTurbofan(TieringState state) { + return IsRequestTurbofan_Concurrent(state) || + IsRequestTurbofan_Synchronous(state); +} + constexpr const char* ToString(TieringState marker) { switch (marker) { #define V(Name, Value) \ diff --git a/src/execution/tiering-manager.cc b/src/execution/tiering-manager.cc index 7da8359550..a18463e7ec 100644 --- a/src/execution/tiering-manager.cc +++ b/src/execution/tiering-manager.cc @@ -11,12 +11,14 @@ #include "src/codegen/compilation-cache.h" #include "src/codegen/compiler.h" #include "src/codegen/pending-optimization-table.h" +#include "src/common/globals.h" #include "src/diagnostics/code-tracer.h" #include "src/execution/execution.h" #include "src/execution/frames-inl.h" #include "src/handles/global-handles.h" #include "src/init/bootstrapper.h" #include "src/interpreter/interpreter.h" +#include "src/objects/code-kind.h" #include "src/objects/code.h" #include "src/tracing/trace-event.h" @@ -261,7 +263,7 @@ void TieringManager::RequestOsrAtNextOpportunity(JSFunction function) { } void TieringManager::MaybeOptimizeFrame(JSFunction function, - CodeKind code_kind) { + CodeKind calling_code_kind) { const TieringState tiering_state = function.feedback_vector().tiering_state(); const TieringState osr_tiering_state = function.feedback_vector().osr_tiering_state(); @@ -288,24 +290,15 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function, // Continue below and do a normal optimized compile as well. } - const bool is_marked_for_any_optimization = - (static_cast(tiering_state) & kNoneOrInProgressMask) != 0; // Baseline OSR uses a separate mechanism and must not be considered here, // therefore we limit to kOptimizedJSFunctionCodeKindsMask. // TODO(v8:7700): Change the condition below for Maglev OSR once it is // implemented. - if (is_marked_for_any_optimization || - function.HasAvailableHigherTierCodeThanWithFilter( - code_kind, kOptimizedJSFunctionCodeKindsMask)) { + if (IsRequestTurbofan(tiering_state) || + function.HasAvailableCodeKind(CodeKind::TURBOFAN)) { // OSR kicks in only once we've previously decided to tier up, but we are - // still in the lower-tier frame (this implies a long-running loop). - // - // TODO(v8:7700): In the presence of Maglev, OSR is triggered much earlier - // than with the old pipeline since we tier up to Maglev earlier which - // affects both conditions above. This *seems* fine (when stuck in a loop - // we want to tier up, regardless of the active tier), but we may want to - // think about this again at some point. - if (SmallEnoughForOSR(isolate_, function, code_kind)) { + // still in a lower-tier frame (this implies a long-running loop). + if (SmallEnoughForOSR(isolate_, function, calling_code_kind)) { TryIncrementOsrUrgency(isolate_, function); } @@ -314,20 +307,33 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function, return; } - DCHECK(!is_marked_for_any_optimization && - !function.HasAvailableHigherTierCodeThanWithFilter( - code_kind, kOptimizedJSFunctionCodeKindsMask)); - OptimizationDecision d = ShouldOptimize(function, code_kind); + DCHECK(!IsRequestTurbofan(tiering_state)); + DCHECK(!function.HasAvailableCodeKind(CodeKind::TURBOFAN)); + OptimizationDecision d = ShouldOptimize(function, calling_code_kind); + // We might be stuck in a baseline frame that wants to tier up to Maglev, but + // is in a loop, and can't OSR, because Maglev doesn't have OSR. Allow it to + // skip over Maglev by re-checking ShouldOptimize as if we were in Maglev. + // TODO(v8:7700): Remove this when Maglev can OSR. + static_assert(!CodeKindCanOSR(CodeKind::MAGLEV)); + if (d.should_optimize() && d.code_kind == CodeKind::MAGLEV) { + bool is_marked_for_maglev_optimization = + IsRequestMaglev(tiering_state) || + function.HasAvailableCodeKind(CodeKind::MAGLEV); + if (is_marked_for_maglev_optimization) { + d = ShouldOptimize(function, CodeKind::MAGLEV); + } + } + if (d.should_optimize()) Optimize(function, d); } -OptimizationDecision TieringManager::ShouldOptimize(JSFunction function, - CodeKind code_kind) { - if (TiersUpToMaglev(code_kind) && +OptimizationDecision TieringManager::ShouldOptimize( + JSFunction function, CodeKind calling_code_kind) { + if (TiersUpToMaglev(calling_code_kind) && function.shared().PassesFilter(v8_flags.maglev_filter) && !function.shared(isolate_).maglev_compilation_failed()) { return OptimizationDecision::Maglev(); - } else if (code_kind == CodeKind::TURBOFAN) { + } else if (calling_code_kind == CodeKind::TURBOFAN) { // Already in the top tier. return OptimizationDecision::DoNotOptimize(); } diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 7ffcf72fe8..a45dba3eeb 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -3558,6 +3558,7 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label, // See also: InterpreterAssembler::OnStackReplacement. baseline::BaselineAssembler basm(masm); + __ AssertFeedbackVector(scratch0); // Case 1). Label deopt; @@ -3569,7 +3570,6 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label, // Case 2). { - __ AssertFeedbackVector(scratch0); __ movb(scratch0, FieldOperand(scratch0, FeedbackVector::kOsrStateOffset)); __ DecodeField(scratch0); basm.JumpIfByte(baseline::Condition::kUnsignedLessThanEqual, scratch0, @@ -3601,18 +3601,19 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label, } } }); + DCHECK(!snapshot.live_registers.has(maybe_target_code)); SaveRegisterStateForCall save_register_state(masm, snapshot); __ Move(kContextRegister, masm->native_context().object()); __ Push(Smi::FromInt(osr_offset.ToInt())); __ CallRuntime(Runtime::kCompileOptimizedOSRFromMaglev, 1); save_register_state.DefineSafepoint(); - __ Move(scratch0, rax); + __ Move(maybe_target_code, kReturnRegister0); } // A `0` return value means there is no OSR code available yet. Fall // through for now, OSR code will be picked up once it exists and is // cached on the feedback vector. - __ testq(scratch0, scratch0); + __ Cmp(maybe_target_code, 0); __ j(equal, return_label, Label::kNear); } diff --git a/test/mjsunit/maglev/osr-to-tf.js b/test/mjsunit/maglev/osr-to-tf.js index d810226c0e..9b8b998ee9 100644 --- a/test/mjsunit/maglev/osr-to-tf.js +++ b/test/mjsunit/maglev/osr-to-tf.js @@ -5,13 +5,25 @@ // Flags: --allow-natives-syntax --maglev --no-stress-opt // Flags: --no-baseline-batch-compilation --use-osr --turbofan -let keep_going = 100000; // A counter to avoid test hangs on failure. +let keep_going = 10000000; // A counter to avoid test hangs on failure. function f() { let reached_tf = false; + let prev_status = 0; while (!reached_tf && --keep_going) { // This loop should trigger OSR. reached_tf = %CurrentFrameIsTurbofan(); + let status = %GetOptimizationStatus(f); + if (status !== prev_status) { + let p = [] + for (let k in V8OptimizationStatus) { + if (V8OptimizationStatus[k] & status) { + p.push(k); + } + } + print(p.join(",")); + prev_status = status; + } } }