[maglev] Re-enable maglev code on the FBV
Change the has-optimized FeedbackVector bit to two bits, one for Maglev and one for Turbofan. Ignition and Sparkplug can check both bits, while Maglev will only check the Turbofan one. Bug: v8:7700 Change-Id: I95f6e4326180cac02f127a97438f960950f09d82 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3856569 Auto-Submit: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Victor Gomes <victorgomes@chromium.org> Commit-Queue: Leszek Swirski <leszeks@chromium.org> Commit-Queue: Victor Gomes <victorgomes@chromium.org> Cr-Commit-Position: refs/heads/main@{#82748}
This commit is contained in:
parent
45019f34f3
commit
453abb7c9b
@ -958,7 +958,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// and outside it can be reused.
|
||||
optimization_state = temps.Acquire();
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
{
|
||||
@ -1125,7 +1126,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Register optimization_state = r4;
|
||||
Label has_optimized_code_or_state;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1301,7 +1303,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check the tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ mov(r2, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -1115,7 +1115,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = temps.AcquireW();
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1283,7 +1284,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = w7;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1473,7 +1475,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check the tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ Move(x2, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -40,7 +40,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
|
||||
GotoIfNot(
|
||||
IsSetWord32(
|
||||
optimization_state,
|
||||
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask),
|
||||
FeedbackVector::kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask),
|
||||
&fallthrough);
|
||||
|
||||
GotoIfNot(IsSetWord32(optimization_state,
|
||||
|
@ -926,6 +926,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = ecx;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(optimization_state, xmm1,
|
||||
CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Reload the feedback vector.
|
||||
@ -1136,6 +1137,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check the tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(optimization_state, xmm1,
|
||||
CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
@ -1559,7 +1561,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = ecx;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, saved_feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, saved_feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Reload the feedback vector.
|
||||
__ movd(feedback_vector, saved_feedback_vector);
|
||||
|
@ -932,7 +932,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// optimization_state will be used only in |has_optimized_code_or_state|
|
||||
// and outside it can be reused.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
}
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1097,7 +1098,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = a4;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1275,7 +1277,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check for an tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ Move(a2, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -999,14 +999,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
static void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(masm);
|
||||
Register scratch = t6;
|
||||
__ lhu(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
__ And(
|
||||
scratch, optimization_state,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
__ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
|
@ -931,7 +931,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// optimization_state will be used only in |has_optimized_code_or_state|
|
||||
// and outside it can be reused.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
}
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1093,7 +1094,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = a4;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1269,7 +1271,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check for an tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ Move(a2, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -1209,7 +1209,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
Register optimization_state = r10;
|
||||
{
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
{ ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r11, r0); }
|
||||
@ -1381,7 +1382,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Register optimization_state = r7;
|
||||
Label has_optimized_code_or_state;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1572,7 +1574,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check for an tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ mr(r5, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -972,7 +972,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = temps.Acquire();
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
@ -1139,7 +1140,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = a4;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
@ -1321,7 +1323,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check for an tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ Move(a2, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -1250,7 +1250,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
Register optimization_state = r9;
|
||||
{
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
{
|
||||
@ -1417,7 +1418,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Register optimization_state = r6;
|
||||
Label has_optimized_code_or_state;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1602,7 +1604,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check for an tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ mov(r4, kInterpreterBytecodeArrayRegister);
|
||||
|
@ -1039,7 +1039,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
Label has_optimized_code_or_state;
|
||||
Register optimization_state = rcx;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);
|
||||
|
||||
@ -1217,7 +1218,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
|
||||
// Check the tiering state.
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ Move(rcx, kInterpreterBytecodeArrayRegister);
|
||||
@ -1548,7 +1550,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// Check the tiering state.
|
||||
Label has_optimized_code_or_state;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::BASELINE,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);
|
||||
|
||||
|
@ -2034,13 +2034,18 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(optimization_state, feedback_vector));
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
ldrh(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
tst(optimization_state,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
b(ne, has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
|
@ -769,7 +769,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -1427,14 +1427,17 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(optimization_state, feedback_vector));
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
Ldrh(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
TestAndBranchIfAnySet(
|
||||
optimization_state,
|
||||
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask,
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask,
|
||||
has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
|
@ -1832,7 +1832,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -978,7 +978,9 @@ class OptimizedCodeCache : public AllStatic {
|
||||
if (is_function_context_specializing) {
|
||||
// Function context specialization folds-in the function context, so no
|
||||
// sharing can occur. Make sure the optimized code cache is cleared.
|
||||
if (feedback_vector.has_optimized_code()) {
|
||||
// Only do so if the specialized code's kind matches the cached code kind.
|
||||
if (feedback_vector.has_optimized_code() &&
|
||||
feedback_vector.optimized_code().kind() == code.kind()) {
|
||||
feedback_vector.ClearOptimizedCode();
|
||||
}
|
||||
return;
|
||||
@ -3997,20 +3999,15 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
|
||||
// when all the bytecodes are implemented.
|
||||
USE(status);
|
||||
|
||||
// TODO(v8:7700): Re-enable caching in a separate feedback vector slot. We
|
||||
// probably shouldn't reuse the same slot as TF since that makes tiering
|
||||
// logic from ML to TF more involved (it'd have to check the cached code
|
||||
// kind).
|
||||
// const bool kIsContextSpecializing = false;
|
||||
// OptimizedCodeCache::Insert(isolate, *job->function(),
|
||||
// BytecodeOffset::None(),
|
||||
// job->function()->code(),
|
||||
// kIsContextSpecializing);
|
||||
|
||||
static constexpr BytecodeOffset osr_offset = BytecodeOffset::None();
|
||||
ResetTieringState(*job->function(), osr_offset);
|
||||
|
||||
if (status == CompilationJob::SUCCEEDED) {
|
||||
const bool kIsContextSpecializing = false;
|
||||
OptimizedCodeCache::Insert(isolate, *job->function(),
|
||||
BytecodeOffset::None(), job->function()->code(),
|
||||
kIsContextSpecializing);
|
||||
|
||||
// Note the finalized Code object has already been installed on the
|
||||
// function by MaglevCompilationJob::FinalizeJobImpl.
|
||||
|
||||
|
@ -826,8 +826,9 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// Registers optimization_state and feedback_vector must be aliased.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, XMMRegister saved_feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
Register feedback_vector = optimization_state;
|
||||
|
||||
// Store feedback_vector. We may need it if we need to load the optimize code
|
||||
@ -838,9 +839,13 @@ void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
|
||||
// Check if there is optimized code or a tiering state that needes to be
|
||||
// processed.
|
||||
test_w(optimization_state,
|
||||
Immediate(
|
||||
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
test_w(
|
||||
optimization_state,
|
||||
Immediate(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
j(not_zero, has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
|
@ -563,7 +563,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, XMMRegister saved_feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
Register optimization_state, XMMRegister saved_feedback_vector);
|
||||
|
||||
|
@ -4254,14 +4254,19 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Register scratch = t2;
|
||||
DCHECK(!AreAliased(t2, optimization_state, feedback_vector));
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
Ld_hu(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
And(scratch, optimization_state,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
|
@ -1053,7 +1053,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -6300,13 +6300,18 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
Register scratch = t2;
|
||||
Lhu(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
And(scratch, optimization_state,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
|
@ -1242,7 +1242,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -2104,15 +2104,23 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(optimization_state, feedback_vector));
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
LoadU16(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
CHECK(is_uint16(
|
||||
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
mov(r0,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
AndU32(r0, optimization_state, r0, SetRC);
|
||||
bne(has_optimized_code_or_state, cr0);
|
||||
}
|
||||
|
@ -1304,7 +1304,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -190,15 +190,20 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(optimization_state, feedback_vector));
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
Lhu(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
And(scratch, optimization_state,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
|
@ -1343,7 +1343,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -2099,16 +2099,23 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(optimization_state, feedback_vector));
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
LoadU16(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
CHECK(is_uint16(
|
||||
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
tmll(
|
||||
optimization_state,
|
||||
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
tmll(optimization_state,
|
||||
Operand(
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
b(Condition(7), has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
|
@ -1754,7 +1754,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
|
||||
Register feedback_vector);
|
||||
|
||||
|
@ -894,13 +894,17 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
|
||||
// is optimized code or a tiering state that needs to be processed.
|
||||
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state) {
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(CodeKindCanTierUp(current_code_kind));
|
||||
movzxwl(optimization_state,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
testw(optimization_state,
|
||||
Immediate(
|
||||
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
current_code_kind == CodeKind::MAGLEV
|
||||
? FeedbackVector::kHasTurbofanCodeOrTieringStateIsAnyRequestMask
|
||||
: FeedbackVector::
|
||||
kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask));
|
||||
j(not_zero, has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
|
@ -838,7 +838,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
void LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
Register optimization_state, Register feedback_vector,
|
||||
Label* has_optimized_code_or_state);
|
||||
CodeKind current_code_kind, Label* has_optimized_code_or_state);
|
||||
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
Register optimization_state, Register feedback_vector, Register closure,
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
|
@ -1222,7 +1222,8 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) {
|
||||
os << "\n - no optimized code";
|
||||
}
|
||||
os << "\n - tiering state: " << tiering_state();
|
||||
os << "\n - maybe has optimized code: " << maybe_has_optimized_code();
|
||||
os << "\n - maybe has maglev code: " << maybe_has_maglev_code();
|
||||
os << "\n - maybe has turbofan code: " << maybe_has_turbofan_code();
|
||||
os << "\n - invocation count: " << invocation_count();
|
||||
os << "\n - profiler ticks: " << profiler_ticks();
|
||||
os << "\n - closure feedback cell array: ";
|
||||
|
@ -1433,7 +1433,8 @@ void V8FileLogger::FeedbackVectorEvent(FeedbackVector vector,
|
||||
<< vector.length();
|
||||
msg << kNext << reinterpret_cast<void*>(code.InstructionStart(cage_base));
|
||||
msg << kNext << vector.tiering_state();
|
||||
msg << kNext << vector.maybe_has_optimized_code();
|
||||
msg << kNext << vector.maybe_has_maglev_code();
|
||||
msg << kNext << vector.maybe_has_turbofan_code();
|
||||
msg << kNext << vector.invocation_count();
|
||||
msg << kNext << vector.profiler_ticks() << kNext;
|
||||
|
||||
|
@ -460,7 +460,8 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
|
||||
Label has_optimized_code_or_state, next;
|
||||
__ LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
optimization_state, feedback_vector, CodeKind::MAGLEV,
|
||||
&has_optimized_code_or_state);
|
||||
__ jmp(&next);
|
||||
|
||||
__ bind(&has_optimized_code_or_state);
|
||||
|
@ -160,7 +160,12 @@ CodeT FeedbackVector::optimized_code() const {
|
||||
// It is possible that the maybe_optimized_code slot is cleared but the flags
|
||||
// haven't been updated yet. We update them when we execute the function next
|
||||
// time / when we create new closure.
|
||||
DCHECK_IMPLIES(!code.is_null(), maybe_has_optimized_code());
|
||||
DCHECK_IMPLIES(!code.is_null(),
|
||||
maybe_has_maglev_code() || maybe_has_turbofan_code());
|
||||
DCHECK_IMPLIES(!code.is_null() && code.is_maglevved(),
|
||||
maybe_has_maglev_code());
|
||||
DCHECK_IMPLIES(!code.is_null() && code.is_turbofanned(),
|
||||
maybe_has_turbofan_code());
|
||||
return code;
|
||||
}
|
||||
|
||||
@ -169,16 +174,25 @@ TieringState FeedbackVector::tiering_state() const {
|
||||
}
|
||||
|
||||
bool FeedbackVector::has_optimized_code() const {
|
||||
DCHECK_IMPLIES(!optimized_code().is_null(), maybe_has_optimized_code());
|
||||
DCHECK_IMPLIES(!optimized_code().is_null(),
|
||||
maybe_has_maglev_code() || maybe_has_turbofan_code());
|
||||
return !optimized_code().is_null();
|
||||
}
|
||||
|
||||
bool FeedbackVector::maybe_has_optimized_code() const {
|
||||
return MaybeHasOptimizedCodeBit::decode(flags());
|
||||
bool FeedbackVector::maybe_has_maglev_code() const {
|
||||
return MaybeHasMaglevCodeBit::decode(flags());
|
||||
}
|
||||
|
||||
void FeedbackVector::set_maybe_has_optimized_code(bool value) {
|
||||
set_flags(MaybeHasOptimizedCodeBit::update(flags(), value));
|
||||
void FeedbackVector::set_maybe_has_maglev_code(bool value) {
|
||||
set_flags(MaybeHasMaglevCodeBit::update(flags(), value));
|
||||
}
|
||||
|
||||
bool FeedbackVector::maybe_has_turbofan_code() const {
|
||||
return MaybeHasTurbofanCodeBit::decode(flags());
|
||||
}
|
||||
|
||||
void FeedbackVector::set_maybe_has_turbofan_code(bool value) {
|
||||
set_flags(MaybeHasTurbofanCodeBit::update(flags(), value));
|
||||
}
|
||||
|
||||
base::Optional<CodeT> FeedbackVector::GetOptimizedOsrCode(Isolate* isolate,
|
||||
|
@ -261,7 +261,8 @@ Handle<FeedbackVector> FeedbackVector::New(
|
||||
|
||||
DCHECK_EQ(vector->shared_function_info(), *shared);
|
||||
DCHECK_EQ(vector->tiering_state(), TieringState::kNone);
|
||||
DCHECK(!vector->maybe_has_optimized_code());
|
||||
DCHECK(!vector->maybe_has_maglev_code());
|
||||
DCHECK(!vector->maybe_has_turbofan_code());
|
||||
DCHECK_EQ(vector->invocation_count(), 0);
|
||||
DCHECK_EQ(vector->profiler_ticks(), 0);
|
||||
DCHECK(vector->maybe_optimized_code()->IsCleared());
|
||||
@ -388,10 +389,10 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
|
||||
void FeedbackVector::SetOptimizedCode(CodeT code) {
|
||||
DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
|
||||
// We should set optimized code only when there is no valid optimized code.
|
||||
// TODO(v8:7700): Update this check once optimized code can be promoted to a
|
||||
// higher tier (in particular, maglev to turbofan).
|
||||
DCHECK(!has_optimized_code() ||
|
||||
optimized_code().marked_for_deoptimization() ||
|
||||
(CodeKindCanTierUp(optimized_code().kind()) &&
|
||||
optimized_code().kind() < code.kind()) ||
|
||||
FLAG_stress_concurrent_inlining_attach_code);
|
||||
// TODO(mythria): We could see a CompileOptimized state here either from
|
||||
// tests that use %OptimizeFunctionOnNextCall, --always-turbofan or because we
|
||||
@ -403,16 +404,24 @@ void FeedbackVector::SetOptimizedCode(CodeT code) {
|
||||
// TODO(leszeks): Reconsider whether this could clear the tiering state vs.
|
||||
// the callers doing so.
|
||||
state = TieringStateBits::update(state, TieringState::kNone);
|
||||
state = MaybeHasOptimizedCodeBit::update(state, true);
|
||||
if (code.is_maglevved()) {
|
||||
DCHECK(!MaybeHasTurbofanCodeBit::decode(state));
|
||||
state = MaybeHasMaglevCodeBit::update(state, true);
|
||||
} else {
|
||||
DCHECK(code.is_turbofanned());
|
||||
state = MaybeHasTurbofanCodeBit::update(state, true);
|
||||
state = MaybeHasMaglevCodeBit::update(state, false);
|
||||
}
|
||||
set_flags(state);
|
||||
}
|
||||
|
||||
void FeedbackVector::ClearOptimizedCode() {
|
||||
DCHECK(has_optimized_code());
|
||||
DCHECK(maybe_has_optimized_code());
|
||||
DCHECK(maybe_has_maglev_code() || maybe_has_turbofan_code());
|
||||
set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
|
||||
kReleaseStore);
|
||||
set_maybe_has_optimized_code(false);
|
||||
set_maybe_has_maglev_code(false);
|
||||
set_maybe_has_turbofan_code(false);
|
||||
}
|
||||
|
||||
void FeedbackVector::SetOptimizedOsrCode(FeedbackSlot slot, CodeT code) {
|
||||
@ -434,7 +443,8 @@ void FeedbackVector::set_tiering_state(TieringState state) {
|
||||
|
||||
void FeedbackVector::reset_flags() {
|
||||
set_flags(TieringStateBits::encode(TieringState::kNone) |
|
||||
MaybeHasOptimizedCodeBit::encode(false) |
|
||||
MaybeHasMaglevCodeBit::encode(false) |
|
||||
MaybeHasTurbofanCodeBit::encode(false) |
|
||||
OsrTieringStateBit::encode(TieringState::kNone) |
|
||||
MaybeHasOptimizedOsrCodeBit::encode(false));
|
||||
}
|
||||
@ -456,7 +466,8 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
|
||||
SharedFunctionInfo shared, const char* reason) {
|
||||
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
|
||||
if (slot->IsCleared()) {
|
||||
set_maybe_has_optimized_code(false);
|
||||
set_maybe_has_maglev_code(false);
|
||||
set_maybe_has_turbofan_code(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -206,10 +206,14 @@ class FeedbackVector
|
||||
HeapObject>::maybe_optimized_code;
|
||||
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
|
||||
|
||||
static constexpr uint32_t kHasAnyOptimizedCodeMask =
|
||||
MaybeHasMaglevCodeBit::kMask | MaybeHasTurbofanCodeBit::kMask;
|
||||
static constexpr uint32_t kTieringStateIsAnyRequestMask =
|
||||
kNoneOrInProgressMask << TieringStateBits::kShift;
|
||||
static constexpr uint32_t kHasOptimizedCodeOrTieringStateIsAnyRequestMask =
|
||||
MaybeHasOptimizedCodeBit::kMask | kTieringStateIsAnyRequestMask;
|
||||
static constexpr uint32_t kHasTurbofanCodeOrTieringStateIsAnyRequestMask =
|
||||
MaybeHasTurbofanCodeBit::kMask | kTieringStateIsAnyRequestMask;
|
||||
static constexpr uint32_t kHasAnyOptimizedCodeOrTieringStateIsAnyRequestMask =
|
||||
kHasAnyOptimizedCodeMask | kTieringStateIsAnyRequestMask;
|
||||
|
||||
inline bool is_empty() const;
|
||||
|
||||
@ -252,8 +256,10 @@ class FeedbackVector
|
||||
// Similar to above, but represented internally as a bit that can be
|
||||
// efficiently checked by generated code. May lag behind the actual state of
|
||||
// the world, thus 'maybe'.
|
||||
inline bool maybe_has_optimized_code() const;
|
||||
inline void set_maybe_has_optimized_code(bool value);
|
||||
inline bool maybe_has_maglev_code() const;
|
||||
inline void set_maybe_has_maglev_code(bool value);
|
||||
inline bool maybe_has_turbofan_code() const;
|
||||
inline void set_maybe_has_turbofan_code(bool value);
|
||||
void SetOptimizedCode(CodeT code);
|
||||
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
|
||||
const char* reason);
|
||||
|
@ -9,10 +9,11 @@ bitfield struct FeedbackVectorFlags extends uint16 {
|
||||
// Whether the maybe_optimized_code field contains a code object. 'maybe',
|
||||
// because they flag may lag behind the actual state of the world (it will be
|
||||
// updated in time).
|
||||
maybe_has_optimized_code: bool: 1 bit;
|
||||
maybe_has_maglev_code: bool: 1 bit;
|
||||
maybe_has_turbofan_code: bool: 1 bit;
|
||||
// Just one bit, since only {kNone,kInProgress} are relevant for OSR.
|
||||
osr_tiering_state: TieringState: 1 bit;
|
||||
all_your_bits_are_belong_to_jgruber: uint32: 11 bit;
|
||||
all_your_bits_are_belong_to_jgruber: uint32: 10 bit;
|
||||
}
|
||||
|
||||
bitfield struct OsrState extends uint8 {
|
||||
|
@ -4149,7 +4149,7 @@ TEST(WeakReference) {
|
||||
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(ToCodeT(*code)),
|
||||
v8::kReleaseStore);
|
||||
fv->set_flags(
|
||||
i::FeedbackVector::MaybeHasOptimizedCodeBit::encode(true) |
|
||||
i::FeedbackVector::MaybeHasTurbofanCodeBit::encode(true) |
|
||||
i::FeedbackVector::TieringStateBits::encode(i::TieringState::kNone));
|
||||
|
||||
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
|
||||
|
Loading…
Reference in New Issue
Block a user