[maglev] Re-enable maglev code on the FBV

Change the has-optimized FeedbackVector bit to two bits, one for Maglev
and one for Turbofan. Ignition and Sparkplug can check both bits, while
Maglev will only check the Turbofan one.

Bug: v8:7700
Change-Id: I95f6e4326180cac02f127a97438f960950f09d82
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3856569
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Victor Gomes <victorgomes@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82748}
This commit is contained in:
Leszek Swirski 2022-08-26 14:29:03 +02:00 committed by V8 LUCI CQ
parent 45019f34f3
commit 453abb7c9b
38 changed files with 206 additions and 96 deletions

View File

@ -958,7 +958,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// and outside it can be reused. // and outside it can be reused.
optimization_state = temps.Acquire(); optimization_state = temps.Acquire();
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
} }
{ {
@ -1125,7 +1126,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Register optimization_state = r4; Register optimization_state = r4;
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1301,7 +1303,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check the tiering state. // Check the tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ mov(r2, kInterpreterBytecodeArrayRegister); __ mov(r2, kInterpreterBytecodeArrayRegister);

View File

@ -1115,7 +1115,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = temps.AcquireW(); Register optimization_state = temps.AcquireW();
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1283,7 +1284,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = w7; Register optimization_state = w7;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1473,7 +1475,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check the tiering state. // Check the tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Move(x2, kInterpreterBytecodeArrayRegister); __ Move(x2, kInterpreterBytecodeArrayRegister);

View File

@ -40,7 +40,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
GotoIfNot( GotoIfNot(
IsSetWord32( IsSetWord32(
optimization_state, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask), FeedbackVector::kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask),
&fallthrough); &fallthrough);
GotoIfNot(IsSetWord32(optimization_state, GotoIfNot(IsSetWord32(optimization_state,

View File

@ -926,6 +926,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = ecx; Register optimization_state = ecx;
__ LoadTieringStateAndJumpIfNeedsProcessing(optimization_state, xmm1, __ LoadTieringStateAndJumpIfNeedsProcessing(optimization_state, xmm1,
CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state); &has_optimized_code_or_state);
// Reload the feedback vector. // Reload the feedback vector.
@ -1136,6 +1137,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check the tiering state. // Check the tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing(optimization_state, xmm1, __ LoadTieringStateAndJumpIfNeedsProcessing(optimization_state, xmm1,
CodeKind::BASELINE,
&has_optimized_code_or_state); &has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
@ -1559,7 +1561,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = ecx; Register optimization_state = ecx;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, saved_feedback_vector, &has_optimized_code_or_state); optimization_state, saved_feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Reload the feedback vector. // Reload the feedback vector.
__ movd(feedback_vector, saved_feedback_vector); __ movd(feedback_vector, saved_feedback_vector);

View File

@ -932,7 +932,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// optimization_state will be used only in |has_optimized_code_or_state| // optimization_state will be used only in |has_optimized_code_or_state|
// and outside it can be reused. // and outside it can be reused.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
} }
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1097,7 +1098,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = a4; Register optimization_state = a4;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1275,7 +1277,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check for an tiering state. // Check for an tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister); __ Move(a2, kInterpreterBytecodeArrayRegister);

View File

@ -999,14 +999,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
static void LoadTieringStateAndJumpIfNeedsProcessing( static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector, MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
Register scratch = t6; Register scratch = t6;
__ lhu(optimization_state, __ lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And( __ And(
scratch, optimization_state, scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
__ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
} }

View File

@ -931,7 +931,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// optimization_state will be used only in |has_optimized_code_or_state| // optimization_state will be used only in |has_optimized_code_or_state|
// and outside it can be reused. // and outside it can be reused.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
} }
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1093,7 +1094,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = a4; Register optimization_state = a4;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1269,7 +1271,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check for an tiering state. // Check for an tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister); __ Move(a2, kInterpreterBytecodeArrayRegister);

View File

@ -1209,7 +1209,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register optimization_state = r10; Register optimization_state = r10;
{ {
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
} }
{ ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r11, r0); } { ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r11, r0); }
@ -1381,7 +1382,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Register optimization_state = r7; Register optimization_state = r7;
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1572,7 +1574,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check for an tiering state. // Check for an tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ mr(r5, kInterpreterBytecodeArrayRegister); __ mr(r5, kInterpreterBytecodeArrayRegister);

View File

@ -972,7 +972,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = temps.Acquire(); Register optimization_state = temps.Acquire();
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
@ -1139,7 +1140,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = a4; Register optimization_state = a4;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
@ -1321,7 +1323,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check for an tiering state. // Check for an tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister); __ Move(a2, kInterpreterBytecodeArrayRegister);

View File

@ -1250,7 +1250,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register optimization_state = r9; Register optimization_state = r9;
{ {
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
} }
{ {
@ -1417,7 +1418,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Register optimization_state = r6; Register optimization_state = r6;
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
@ -1602,7 +1604,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check for an tiering state. // Check for an tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ mov(r4, kInterpreterBytecodeArrayRegister); __ mov(r4, kInterpreterBytecodeArrayRegister);

View File

@ -1039,7 +1039,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
Register optimization_state = rcx; Register optimization_state = rcx;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&has_optimized_code_or_state);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister); ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);
@ -1217,7 +1218,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check the tiering state. // Check the tiering state.
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ Move(rcx, kInterpreterBytecodeArrayRegister); __ Move(rcx, kInterpreterBytecodeArrayRegister);
@ -1548,7 +1550,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Check the tiering state. // Check the tiering state.
Label has_optimized_code_or_state; Label has_optimized_code_or_state;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::BASELINE,
&has_optimized_code_or_state);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister); ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);

View File

@ -2034,13 +2034,18 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
DCHECK(CodeKindCanTierUp(current_code_kind));
ldrh(optimization_state, ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
tst(optimization_state, tst(optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
b(ne, has_optimized_code_or_state); b(ne, has_optimized_code_or_state);
} }

View File

@ -769,7 +769,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -1427,14 +1427,17 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
DCHECK(CodeKindCanTierUp(current_code_kind));
Ldrh(optimization_state, Ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
TestAndBranchIfAnySet( TestAndBranchIfAnySet(
optimization_state, optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask, current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask,
has_optimized_code_or_state); has_optimized_code_or_state);
} }

View File

@ -1832,7 +1832,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -978,7 +978,9 @@ class OptimizedCodeCache : public AllStatic {
if (is_function_context_specializing) { if (is_function_context_specializing) {
// Function context specialization folds-in the function context, so no // Function context specialization folds-in the function context, so no
// sharing can occur. Make sure the optimized code cache is cleared. // sharing can occur. Make sure the optimized code cache is cleared.
if (feedback_vector.has_optimized_code()) { // Only do so if the specialized code's kind matches the cached code kind.
if (feedback_vector.has_optimized_code() &&
feedback_vector.optimized_code().kind() == code.kind()) {
feedback_vector.ClearOptimizedCode(); feedback_vector.ClearOptimizedCode();
} }
return; return;
@ -3997,20 +3999,15 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
// when all the bytecodes are implemented. // when all the bytecodes are implemented.
USE(status); USE(status);
// TODO(v8:7700): Re-enable caching in a separate feedback vector slot. We
// probably shouldn't reuse the same slot as TF since that makes tiering
// logic from ML to TF more involved (it'd have to check the cached code
// kind).
// const bool kIsContextSpecializing = false;
// OptimizedCodeCache::Insert(isolate, *job->function(),
// BytecodeOffset::None(),
// job->function()->code(),
// kIsContextSpecializing);
static constexpr BytecodeOffset osr_offset = BytecodeOffset::None(); static constexpr BytecodeOffset osr_offset = BytecodeOffset::None();
ResetTieringState(*job->function(), osr_offset); ResetTieringState(*job->function(), osr_offset);
if (status == CompilationJob::SUCCEEDED) { if (status == CompilationJob::SUCCEEDED) {
const bool kIsContextSpecializing = false;
OptimizedCodeCache::Insert(isolate, *job->function(),
BytecodeOffset::None(), job->function()->code(),
kIsContextSpecializing);
// Note the finalized Code object has already been installed on the // Note the finalized Code object has already been installed on the
// function by MaglevCompilationJob::FinalizeJobImpl. // function by MaglevCompilationJob::FinalizeJobImpl.

View File

@ -826,8 +826,9 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// Registers optimization_state and feedback_vector must be aliased. // Registers optimization_state and feedback_vector must be aliased.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, XMMRegister saved_feedback_vector, Register optimization_state, XMMRegister saved_feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(CodeKindCanTierUp(current_code_kind));
Register feedback_vector = optimization_state; Register feedback_vector = optimization_state;
// Store feedback_vector. We may need it if we need to load the optimize code // Store feedback_vector. We may need it if we need to load the optimize code
@ -838,9 +839,13 @@ void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
// Check if there is optimized code or a tiering state that needes to be // Check if there is optimized code or a tiering state that needes to be
// processed. // processed.
test_w(optimization_state, test_w(
Immediate( optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Immediate(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
j(not_zero, has_optimized_code_or_state); j(not_zero, has_optimized_code_or_state);
} }

View File

@ -563,7 +563,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, XMMRegister saved_feedback_vector, Register optimization_state, XMMRegister saved_feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, XMMRegister saved_feedback_vector); Register optimization_state, XMMRegister saved_feedback_vector);

View File

@ -4254,14 +4254,19 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
Register scratch = t2; Register scratch = t2;
DCHECK(!AreAliased(t2, optimization_state, feedback_vector)); DCHECK(!AreAliased(t2, optimization_state, feedback_vector));
DCHECK(CodeKindCanTierUp(current_code_kind));
Ld_hu(optimization_state, Ld_hu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
And(scratch, optimization_state, And(scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
} }

View File

@ -1053,7 +1053,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -6300,13 +6300,18 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(CodeKindCanTierUp(current_code_kind));
Register scratch = t2; Register scratch = t2;
Lhu(optimization_state, Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
And(scratch, optimization_state, And(scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
} }

View File

@ -1242,7 +1242,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -2104,15 +2104,23 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
DCHECK(CodeKindCanTierUp(current_code_kind));
LoadU16(optimization_state, LoadU16(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
CHECK(is_uint16( CHECK(is_uint16(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
mov(r0, mov(r0,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
AndU32(r0, optimization_state, r0, SetRC); AndU32(r0, optimization_state, r0, SetRC);
bne(has_optimized_code_or_state, cr0); bne(has_optimized_code_or_state, cr0);
} }

View File

@ -1304,7 +1304,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -190,15 +190,20 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
DCHECK(CodeKindCanTierUp(current_code_kind));
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
Lhu(optimization_state, Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
And(scratch, optimization_state, And(scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
} }

View File

@ -1343,7 +1343,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -2099,16 +2099,23 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
DCHECK(CodeKindCanTierUp(current_code_kind));
LoadU16(optimization_state, LoadU16(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
CHECK(is_uint16( CHECK(is_uint16(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); current_code_kind == CodeKind::MAGLEV
tmll( ? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
optimization_state, : FeedbackVector::
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
tmll(optimization_state,
Operand(
current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
b(Condition(7), has_optimized_code_or_state); b(Condition(7), has_optimized_code_or_state);
} }

View File

@ -1754,7 +1754,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state, void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector); Register feedback_vector);

View File

@ -894,13 +894,17 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
// is optimized code or a tiering state that needs to be processed. // is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing( void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) { CodeKind current_code_kind, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(CodeKindCanTierUp(current_code_kind));
movzxwl(optimization_state, movzxwl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
testw(optimization_state, testw(optimization_state,
Immediate( Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); current_code_kind == CodeKind::MAGLEV
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
: FeedbackVector::
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
j(not_zero, has_optimized_code_or_state); j(not_zero, has_optimized_code_or_state);
} }

View File

@ -838,7 +838,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
JumpMode jump_mode = JumpMode::kJump); JumpMode jump_mode = JumpMode::kJump);
void LoadTieringStateAndJumpIfNeedsProcessing( void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state); CodeKind current_code_kind, Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector, Register closure, Register optimization_state, Register feedback_vector, Register closure,
JumpMode jump_mode = JumpMode::kJump); JumpMode jump_mode = JumpMode::kJump);

View File

@ -1222,7 +1222,8 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) {
os << "\n - no optimized code"; os << "\n - no optimized code";
} }
os << "\n - tiering state: " << tiering_state(); os << "\n - tiering state: " << tiering_state();
os << "\n - maybe has optimized code: " << maybe_has_optimized_code(); os << "\n - maybe has maglev code: " << maybe_has_maglev_code();
os << "\n - maybe has turbofan code: " << maybe_has_turbofan_code();
os << "\n - invocation count: " << invocation_count(); os << "\n - invocation count: " << invocation_count();
os << "\n - profiler ticks: " << profiler_ticks(); os << "\n - profiler ticks: " << profiler_ticks();
os << "\n - closure feedback cell array: "; os << "\n - closure feedback cell array: ";

View File

@ -1433,7 +1433,8 @@ void V8FileLogger::FeedbackVectorEvent(FeedbackVector vector,
<< vector.length(); << vector.length();
msg << kNext << reinterpret_cast<void*>(code.InstructionStart(cage_base)); msg << kNext << reinterpret_cast<void*>(code.InstructionStart(cage_base));
msg << kNext << vector.tiering_state(); msg << kNext << vector.tiering_state();
msg << kNext << vector.maybe_has_optimized_code(); msg << kNext << vector.maybe_has_maglev_code();
msg << kNext << vector.maybe_has_turbofan_code();
msg << kNext << vector.invocation_count(); msg << kNext << vector.invocation_count();
msg << kNext << vector.profiler_ticks() << kNext; msg << kNext << vector.profiler_ticks() << kNext;

View File

@ -460,7 +460,8 @@ class MaglevCodeGeneratingNodeProcessor {
Label has_optimized_code_or_state, next; Label has_optimized_code_or_state, next;
__ LoadTieringStateAndJumpIfNeedsProcessing( __ LoadTieringStateAndJumpIfNeedsProcessing(
optimization_state, feedback_vector, &has_optimized_code_or_state); optimization_state, feedback_vector, CodeKind::MAGLEV,
&has_optimized_code_or_state);
__ jmp(&next); __ jmp(&next);
__ bind(&has_optimized_code_or_state); __ bind(&has_optimized_code_or_state);

View File

@ -160,7 +160,12 @@ CodeT FeedbackVector::optimized_code() const {
// It is possible that the maybe_optimized_code slot is cleared but the flags // It is possible that the maybe_optimized_code slot is cleared but the flags
// haven't been updated yet. We update them when we execute the function next // haven't been updated yet. We update them when we execute the function next
// time / when we create new closure. // time / when we create new closure.
DCHECK_IMPLIES(!code.is_null(), maybe_has_optimized_code()); DCHECK_IMPLIES(!code.is_null(),
maybe_has_maglev_code() || maybe_has_turbofan_code());
DCHECK_IMPLIES(!code.is_null() && code.is_maglevved(),
maybe_has_maglev_code());
DCHECK_IMPLIES(!code.is_null() && code.is_turbofanned(),
maybe_has_turbofan_code());
return code; return code;
} }
@ -169,16 +174,25 @@ TieringState FeedbackVector::tiering_state() const {
} }
bool FeedbackVector::has_optimized_code() const { bool FeedbackVector::has_optimized_code() const {
DCHECK_IMPLIES(!optimized_code().is_null(), maybe_has_optimized_code()); DCHECK_IMPLIES(!optimized_code().is_null(),
maybe_has_maglev_code() || maybe_has_turbofan_code());
return !optimized_code().is_null(); return !optimized_code().is_null();
} }
bool FeedbackVector::maybe_has_optimized_code() const { bool FeedbackVector::maybe_has_maglev_code() const {
return MaybeHasOptimizedCodeBit::decode(flags()); return MaybeHasMaglevCodeBit::decode(flags());
} }
void FeedbackVector::set_maybe_has_optimized_code(bool value) { void FeedbackVector::set_maybe_has_maglev_code(bool value) {
set_flags(MaybeHasOptimizedCodeBit::update(flags(), value)); set_flags(MaybeHasMaglevCodeBit::update(flags(), value));
}
bool FeedbackVector::maybe_has_turbofan_code() const {
return MaybeHasTurbofanCodeBit::decode(flags());
}
void FeedbackVector::set_maybe_has_turbofan_code(bool value) {
set_flags(MaybeHasTurbofanCodeBit::update(flags(), value));
} }
base::Optional<CodeT> FeedbackVector::GetOptimizedOsrCode(Isolate* isolate, base::Optional<CodeT> FeedbackVector::GetOptimizedOsrCode(Isolate* isolate,

View File

@ -261,7 +261,8 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->shared_function_info(), *shared); DCHECK_EQ(vector->shared_function_info(), *shared);
DCHECK_EQ(vector->tiering_state(), TieringState::kNone); DCHECK_EQ(vector->tiering_state(), TieringState::kNone);
DCHECK(!vector->maybe_has_optimized_code()); DCHECK(!vector->maybe_has_maglev_code());
DCHECK(!vector->maybe_has_turbofan_code());
DCHECK_EQ(vector->invocation_count(), 0); DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0); DCHECK_EQ(vector->profiler_ticks(), 0);
DCHECK(vector->maybe_optimized_code()->IsCleared()); DCHECK(vector->maybe_optimized_code()->IsCleared());
@ -388,10 +389,10 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
void FeedbackVector::SetOptimizedCode(CodeT code) { void FeedbackVector::SetOptimizedCode(CodeT code) {
DCHECK(CodeKindIsOptimizedJSFunction(code.kind())); DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
// We should set optimized code only when there is no valid optimized code. // We should set optimized code only when there is no valid optimized code.
// TODO(v8:7700): Update this check once optimized code can be promoted to a
// higher tier (in particular, maglev to turbofan).
DCHECK(!has_optimized_code() || DCHECK(!has_optimized_code() ||
optimized_code().marked_for_deoptimization() || optimized_code().marked_for_deoptimization() ||
(CodeKindCanTierUp(optimized_code().kind()) &&
optimized_code().kind() < code.kind()) ||
FLAG_stress_concurrent_inlining_attach_code); FLAG_stress_concurrent_inlining_attach_code);
// TODO(mythria): We could see a CompileOptimized state here either from // TODO(mythria): We could see a CompileOptimized state here either from
// tests that use %OptimizeFunctionOnNextCall, --always-turbofan or because we // tests that use %OptimizeFunctionOnNextCall, --always-turbofan or because we
@ -403,16 +404,24 @@ void FeedbackVector::SetOptimizedCode(CodeT code) {
// TODO(leszeks): Reconsider whether this could clear the tiering state vs. // TODO(leszeks): Reconsider whether this could clear the tiering state vs.
// the callers doing so. // the callers doing so.
state = TieringStateBits::update(state, TieringState::kNone); state = TieringStateBits::update(state, TieringState::kNone);
state = MaybeHasOptimizedCodeBit::update(state, true); if (code.is_maglevved()) {
DCHECK(!MaybeHasTurbofanCodeBit::decode(state));
state = MaybeHasMaglevCodeBit::update(state, true);
} else {
DCHECK(code.is_turbofanned());
state = MaybeHasTurbofanCodeBit::update(state, true);
state = MaybeHasMaglevCodeBit::update(state, false);
}
set_flags(state); set_flags(state);
} }
void FeedbackVector::ClearOptimizedCode() { void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code()); DCHECK(has_optimized_code());
DCHECK(maybe_has_optimized_code()); DCHECK(maybe_has_maglev_code() || maybe_has_turbofan_code());
set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()), set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
kReleaseStore); kReleaseStore);
set_maybe_has_optimized_code(false); set_maybe_has_maglev_code(false);
set_maybe_has_turbofan_code(false);
} }
void FeedbackVector::SetOptimizedOsrCode(FeedbackSlot slot, CodeT code) { void FeedbackVector::SetOptimizedOsrCode(FeedbackSlot slot, CodeT code) {
@ -434,7 +443,8 @@ void FeedbackVector::set_tiering_state(TieringState state) {
void FeedbackVector::reset_flags() { void FeedbackVector::reset_flags() {
set_flags(TieringStateBits::encode(TieringState::kNone) | set_flags(TieringStateBits::encode(TieringState::kNone) |
MaybeHasOptimizedCodeBit::encode(false) | MaybeHasMaglevCodeBit::encode(false) |
MaybeHasTurbofanCodeBit::encode(false) |
OsrTieringStateBit::encode(TieringState::kNone) | OsrTieringStateBit::encode(TieringState::kNone) |
MaybeHasOptimizedOsrCodeBit::encode(false)); MaybeHasOptimizedOsrCodeBit::encode(false));
} }
@ -456,7 +466,8 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo shared, const char* reason) { SharedFunctionInfo shared, const char* reason) {
MaybeObject slot = maybe_optimized_code(kAcquireLoad); MaybeObject slot = maybe_optimized_code(kAcquireLoad);
if (slot->IsCleared()) { if (slot->IsCleared()) {
set_maybe_has_optimized_code(false); set_maybe_has_maglev_code(false);
set_maybe_has_turbofan_code(false);
return; return;
} }

View File

@ -206,10 +206,14 @@ class FeedbackVector
HeapObject>::maybe_optimized_code; HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code) DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
static constexpr uint32_t kHasAnyOptimizedCodeMask =
MaybeHasMaglevCodeBit::kMask | MaybeHasTurbofanCodeBit::kMask;
static constexpr uint32_t kTieringStateIsAnyRequestMask = static constexpr uint32_t kTieringStateIsAnyRequestMask =
kNoneOrInProgressMask << TieringStateBits::kShift; kNoneOrInProgressMask << TieringStateBits::kShift;
static constexpr uint32_t kHasOptimizedCodeOrTieringStateIsAnyRequestMask = static constexpr uint32_t kHasTurbofanCodeOrTieringStateIsAnyRequestMask =
MaybeHasOptimizedCodeBit::kMask | kTieringStateIsAnyRequestMask; MaybeHasTurbofanCodeBit::kMask | kTieringStateIsAnyRequestMask;
static constexpr uint32_t kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask =
kHasAnyOptimizedCodeMask | kTieringStateIsAnyRequestMask;
inline bool is_empty() const; inline bool is_empty() const;
@ -252,8 +256,10 @@ class FeedbackVector
// Similar to above, but represented internally as a bit that can be // Similar to above, but represented internally as a bit that can be
// efficiently checked by generated code. May lag behind the actual state of // efficiently checked by generated code. May lag behind the actual state of
// the world, thus 'maybe'. // the world, thus 'maybe'.
inline bool maybe_has_optimized_code() const; inline bool maybe_has_maglev_code() const;
inline void set_maybe_has_optimized_code(bool value); inline void set_maybe_has_maglev_code(bool value);
inline bool maybe_has_turbofan_code() const;
inline void set_maybe_has_turbofan_code(bool value);
void SetOptimizedCode(CodeT code); void SetOptimizedCode(CodeT code);
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared, void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason); const char* reason);

View File

@ -9,10 +9,11 @@ bitfield struct FeedbackVectorFlags extends uint16 {
// Whether the maybe_optimized_code field contains a code object. 'maybe', // Whether the maybe_optimized_code field contains a code object. 'maybe',
// because they flag may lag behind the actual state of the world (it will be // because they flag may lag behind the actual state of the world (it will be
// updated in time). // updated in time).
maybe_has_optimized_code: bool: 1 bit; maybe_has_maglev_code: bool: 1 bit;
maybe_has_turbofan_code: bool: 1 bit;
// Just one bit, since only {kNone,kInProgress} are relevant for OSR. // Just one bit, since only {kNone,kInProgress} are relevant for OSR.
osr_tiering_state: TieringState: 1 bit; osr_tiering_state: TieringState: 1 bit;
all_your_bits_are_belong_to_jgruber: uint32: 11 bit; all_your_bits_are_belong_to_jgruber: uint32: 10 bit;
} }
bitfield struct OsrState extends uint8 { bitfield struct OsrState extends uint8 {

View File

@ -4149,7 +4149,7 @@ TEST(WeakReference) {
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(ToCodeT(*code)), fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(ToCodeT(*code)),
v8::kReleaseStore); v8::kReleaseStore);
fv->set_flags( fv->set_flags(
i::FeedbackVector::MaybeHasOptimizedCodeBit::encode(true) | i::FeedbackVector::MaybeHasTurbofanCodeBit::encode(true) |
i::FeedbackVector::TieringStateBits::encode(i::TieringState::kNone)); i::FeedbackVector::TieringStateBits::encode(i::TieringState::kNone));
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler(); v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();