[sparkplug] Add support to flush only baseline code

Add support to flush only baseline code. FLAG_flush_baseline_code
controls if baseline code is flushed or not and FLAG_flush_bytecode
controls if bytecode is flushed or not. With this CL it is possible
to control if we want to flush only bytecode / only baseline code / both.
This also lets us have different heuristics for bytecode and baseline
code flushing.

Bug: v8:11947
Change-Id: Ibdfb9d8be7e7d54196db7890541fa0b5d84f037e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3060481
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#76075}
This commit is contained in:
Mythri A 2021-08-03 12:27:54 +01:00 committed by V8 LUCI CQ
parent e33384147c
commit ef7d657960
18 changed files with 115 additions and 35 deletions

View File

@ -1247,12 +1247,10 @@ DEFINE_BOOL(never_compact, false,
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(flush_baseline_code, false,
"flush of baseline code when it has not been executed recently")
DEFINE_IMPLICATION(flush_baseline_code, flush_bytecode)
DEFINE_BOOL(flush_bytecode, true,
"flush of bytecode when it has not been executed recently")
DEFINE_BOOL(stress_flush_code, false, "stress code flushing")
DEFINE_BOOL(trace_flush_bytecode, false, "trace bytecode flushing")
DEFINE_IMPLICATION(stress_flush_code, flush_bytecode)
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")

View File

@ -536,7 +536,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
weak_objects_->js_weak_refs.FlushToGlobal(task_id);
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->code_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);

View File

@ -96,13 +96,11 @@ base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
}
if (FLAG_flush_baseline_code) {
// TODO(mythria): Add support to be able to flush baseline code without
// flushing bytecode.
DCHECK(FLAG_flush_bytecode);
code_flush_mode.Add(CodeFlushMode::kFlushBaselineCode);
}
if (FLAG_stress_flush_code) {
DCHECK(FLAG_flush_baseline_code || FLAG_flush_bytecode);
code_flush_mode.Add(CodeFlushMode::kStressFlushCode);
}

View File

@ -2222,7 +2222,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.code_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
@ -2359,11 +2359,11 @@ void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
}
void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode ||
weak_objects_.bytecode_flushing_candidates.IsEmpty());
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
&flushing_candidate)) {
while (weak_objects_.code_flushing_candidates.Pop(kMainThreadTask,
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineData()) {
@ -2748,7 +2748,7 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_objects_in_code.Clear();
weak_objects_.js_weak_refs.Clear();
weak_objects_.weak_cells.Clear();
weak_objects_.bytecode_flushing_candidates.Clear();
weak_objects_.code_flushing_candidates.Clear();
weak_objects_.baseline_flushing_candidates.Clear();
weak_objects_.flushed_js_functions.Clear();
}

View File

@ -159,7 +159,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
// also include cases where there is old bytecode even when there is no
// baseline code and remove this check here.
if (!IsFlushingDisabled(code_flush_mode_) &&
if (IsByteCodeFlushingEnabled(code_flush_mode_) &&
js_function.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, js_function);
}
@ -176,13 +176,25 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
this->VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size, this);
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info.ShouldFlushBytecode(code_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
if (!shared_info.ShouldFlushCode(code_flush_mode_)) {
// If the SharedFunctionInfo doesn't have old bytecode visit the function
// data strongly.
VisitPointer(shared_info,
shared_info.RawField(SharedFunctionInfo::kFunctionDataOffset));
} else if (!IsByteCodeFlushingEnabled(code_flush_mode_)) {
// If bytecode flushing is disabled but baseline code flushing is enabled
// then we have to visit the bytecode but not the baseline code.
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
BaselineData baseline_data =
BaselineData::cast(shared_info.function_data(kAcquireLoad));
// Visit the bytecode hanging off baseline data.
VisitPointer(baseline_data,
baseline_data.RawField(BaselineData::kDataOffset));
weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
} else {
// In other cases, record as a flushing candidate since we have old
// bytecode.
weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
}
return size;
}

View File

@ -133,9 +133,9 @@ void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
DCHECK(!ContainsYoungObjects(weak_cells));
}
void WeakObjects::UpdateBytecodeFlushingCandidates(
WeakObjectWorklist<SharedFunctionInfo>& bytecode_flushing_candidates) {
DCHECK(!ContainsYoungObjects(bytecode_flushing_candidates));
void WeakObjects::UpdateCodeFlushingCandidates(
WeakObjectWorklist<SharedFunctionInfo>& code_flushing_candidates) {
DCHECK(!ContainsYoungObjects(code_flushing_candidates));
}
void WeakObjects::UpdateFlushedJSFunctions(

View File

@ -57,8 +57,7 @@ class TransitionArray;
F(HeapObjectAndCode, weak_objects_in_code, WeakObjectsInCode) \
F(JSWeakRef, js_weak_refs, JSWeakRefs) \
F(WeakCell, weak_cells, WeakCells) \
F(SharedFunctionInfo, bytecode_flushing_candidates, \
BytecodeFlushingCandidates) \
F(SharedFunctionInfo, code_flushing_candidates, CodeFlushingCandidates) \
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
F(JSFunction, flushed_js_functions, FlushedJSFunctions)

View File

@ -312,7 +312,7 @@ bool JSFunction::ShouldFlushBaselineCode(
if (code.kind() != CodeKind::BASELINE) return false;
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return shared.ShouldFlushBytecode(code_flush_mode);
return shared.ShouldFlushCode(code_flush_mode);
}
bool JSFunction::NeedsResetDueToFlushedBytecode() {
@ -339,14 +339,20 @@ void JSFunction::ResetIfCodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot) {
if (!FLAG_flush_bytecode) return;
if (!FLAG_flush_bytecode && !FLAG_flush_baseline_code) return;
if (NeedsResetDueToFlushedBytecode()) {
DCHECK_IMPLIES(NeedsResetDueToFlushedBytecode(), FLAG_flush_bytecode);
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) {
// Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector.
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
} else if (NeedsResetDueToFlushedBaselineCode()) {
return;
}
DCHECK_IMPLIES(NeedsResetDueToFlushedBaselineCode(),
FLAG_flush_baseline_code);
if (FLAG_flush_baseline_code && NeedsResetDueToFlushedBaselineCode()) {
DCHECK(FLAG_flush_baseline_code);
// Flush baseline code from the closure if required
set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));

View File

@ -583,7 +583,7 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode, kReleaseStore);
}
bool SharedFunctionInfo::ShouldFlushBytecode(
bool SharedFunctionInfo::ShouldFlushCode(
base::EnumSet<CodeFlushMode> code_flush_mode) {
if (IsFlushingDisabled(code_flush_mode)) return false;
@ -597,9 +597,15 @@ bool SharedFunctionInfo::ShouldFlushBytecode(
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
if (data.IsBaselineData()) {
// If baseline code flushing isn't enabled and we have baseline data on SFI
// we cannot flush baseline / bytecode.
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
data =
ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
} else if (!IsByteCodeFlushingEnabled(code_flush_mode)) {
// If bytecode flushing isn't enabled and there is no baseline code there is
// nothing to flush.
return false;
}
if (!data.IsBytecodeArray()) return false;

View File

@ -529,7 +529,7 @@ class SharedFunctionInfo
// Returns true if the function has old bytecode that could be flushed. This
// function shouldn't access any flags as it is used by concurrent marker.
// Hence it takes the mode as an argument.
inline bool ShouldFlushBytecode(base::EnumSet<CodeFlushMode> code_flush_mode);
inline bool ShouldFlushCode(base::EnumSet<CodeFlushMode> code_flush_mode);
enum Inlineability {
kIsInlineable,

View File

@ -424,6 +424,7 @@ TEST(SFIDeduplicationClasses) {
TEST(SFIDeduplicationAfterBytecodeFlushing) {
FLAG_stress_flush_code = true;
FLAG_flush_bytecode = true;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@ -509,6 +510,7 @@ TEST(SFIDeduplicationAfterBytecodeFlushing) {
TEST(SFIDeduplicationAfterBytecodeFlushingClasses) {
FLAG_stress_flush_code = true;
FLAG_flush_bytecode = true;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();

View File

@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --noalways-opt --stress-flush-code
// Flags: --expose-gc
// Flags: --expose-gc --flush-bytecode
Debug = debug.Debug

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-gc --stress-flush-code
// Flags: --expose-gc --stress-flush-code --flush-bytecode
var Debug = debug.Debug
var bp;

View File

@ -5,7 +5,7 @@
// Flags: --expose-gc --stress-flush-code --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug --lazy-feedback-allocation
// Flags: --flush-baseline-code
// Flags: --flush-baseline-code --flush-bytecode
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);

View File

@ -0,0 +1,57 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-gc --stress-flush-code --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug --lazy-feedback-allocation
// Flags: --flush-baseline-code --no-flush-bytecode
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);
return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
}
function HasByteCode(f) {
let opt_status = %GetOptimizationStatus(f);
return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
}
var x = {b:20, c:30};
function f() {
return x.b + 10;
}
// Test bytecode gets flushed
f();
assertTrue(HasByteCode(f));
gc();
assertTrue(HasByteCode(f));
// Test baseline code gets flushed but not bytecode.
for (i = 1; i < 50; i++) {
f();
}
assertTrue(HasBaselineCode(f));
gc();
assertFalse(HasBaselineCode(f));
assertTrue(HasByteCode(f));
// Check baseline code and bytecode aren't flushed if baseline code is on
// stack.
function f2(should_recurse) {
if (should_recurse) {
assertTrue(HasBaselineCode(f2));
f2(false);
gc();
assertTrue(HasBaselineCode(f2));
}
return x.b + 10;
}
for (i = 1; i < 50; i++) {
f2(false);
}
assertTrue(HasBaselineCode(f2));
// Recurse with baseline code on stack
f2(true);

View File

@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --opt --allow-natives-syntax --expose-gc --flush-bytecode
// Flags: --stress-flush-code
// Flags: --stress-flush-code --flush-bytecode
function foo(a) {}

View File

@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --interrupt-budget=200 --stack-size=200 --budget-for-feedback-vector-allocation=100 --expose-gc --stress-flush-code
// Flags: --interrupt-budget=200 --stack-size=200
// Flags: --budget-for-feedback-vector-allocation=100 --expose-gc
// Flags: --stress-flush-code --flus-bytecode
var i = 0;
function main() {

View File

@ -57,7 +57,7 @@ GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
'--concurrent-recompilation-queue-length=64',
'--concurrent-recompilation-delay=500',
'--concurrent-recompilation',
'--stress-flush-code',
'--stress-flush-code', '--flush-bytecode',
'--wasm-code-gc', '--stress-wasm-code-gc']
RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',