[sparkplug] Introduce flush_baseline_code flag

Introduce a flush_baseline_code flag to control if baseline code is
flushed or not. Currently flush_baseline_code implies flush_bytecode
as well. So if flush_baseline_code is enabled both bytecode and baseline
code are flushed. If the flag is disabled we only flush bytecode and
not baseline code.

In a follow-up CL we will add support to control baseline and bytecode
flushing independently i.e. we can flush only bytecode / only baseline
code / both.

Bug: v8:11947
Change-Id: I5a90ed38469de64ed1d736d1eaaeabc2985f0783
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3059684
Commit-Queue: Mythri Alle <mythria@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#76003}
This commit is contained in:
Mythri A 2021-07-29 15:34:10 +01:00 committed by V8 LUCI CQ
parent 8db991a042
commit 64556d13a4
15 changed files with 86 additions and 41 deletions

View File

@ -14,6 +14,7 @@
#include "include/v8-internal.h"
#include "src/base/atomic-utils.h"
#include "src/base/build_config.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@ -880,11 +881,27 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum class CodeFlushMode {
kDoNotFlushCode,
kFlushCode,
kFlushBytecode,
kFlushBaselineCode,
kStressFlushCode,
};
bool inline IsBaselineCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
return mode.contains(CodeFlushMode::kFlushBaselineCode);
}
bool inline IsByteCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
return mode.contains(CodeFlushMode::kFlushBytecode);
}
bool inline IsStressFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
return mode.contains(CodeFlushMode::kStressFlushCode);
}
bool inline IsFlushingDisabled(base::EnumSet<CodeFlushMode> mode) {
return mode.empty();
}
// Indicates whether a script should be parsed and compiled in REPL mode.
enum class REPLMode {
kYes,

View File

@ -1244,6 +1244,9 @@ DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(flush_baseline_code, false,
"flush of baseline code when it has not been executed recently")
DEFINE_IMPLICATION(flush_baseline_code, flush_bytecode)
DEFINE_BOOL(flush_bytecode, true,
"flush of bytecode when it has not been executed recently")
DEFINE_BOOL(stress_flush_bytecode, false, "stress bytecode flushing")

View File

@ -86,11 +86,11 @@ class ConcurrentMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode,
mark_compact_epoch, code_flush_mode,
embedder_tracing_enabled, is_forced_gc),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
@ -368,10 +368,10 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode, bool is_forced_gc)
base::EnumSet<CodeFlushMode> code_flush_mode, bool is_forced_gc)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
code_flush_mode_(code_flush_mode),
is_forced_gc_(is_forced_gc) {}
~JobTask() override = default;
@ -382,14 +382,14 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
void Run(JobDelegate* delegate) override {
if (delegate->IsJoiningThread()) {
// TRACE_GC is not needed here because the caller opens the right scope.
concurrent_marking_->Run(delegate, bytecode_flush_mode_,
mark_compact_epoch_, is_forced_gc_);
concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
is_forced_gc_);
} else {
TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
GCTracer::Scope::MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
concurrent_marking_->Run(delegate, bytecode_flush_mode_,
mark_compact_epoch_, is_forced_gc_);
concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
is_forced_gc_);
}
}
@ -400,7 +400,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
private:
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
CodeFlushMode bytecode_flush_mode_;
base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_forced_gc_;
};
@ -421,7 +421,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
}
void ConcurrentMarking::Run(JobDelegate* delegate,
CodeFlushMode bytecode_flush_mode,
base::EnumSet<CodeFlushMode> code_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
@ -430,7 +430,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
mark_compact_epoch, bytecode_flush_mode,
mark_compact_epoch, code_flush_mode,
heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =

View File

@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
char cache_line_padding[64];
};
class JobTask;
void Run(JobDelegate* delegate, CodeFlushMode bytecode_flush_mode,
void Run(JobDelegate* delegate, base::EnumSet<CodeFlushMode> code_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);

View File

@ -84,16 +84,28 @@ Address AllocationResult::ToAddress() {
}
// static
CodeFlushMode Heap::GetCodeFlushMode(Isolate* isolate) {
base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) {
return CodeFlushMode::kDoNotFlushCode;
return base::EnumSet<CodeFlushMode>();
}
base::EnumSet<CodeFlushMode> code_flush_mode;
if (FLAG_flush_bytecode) {
code_flush_mode.Add(CodeFlushMode::kFlushBytecode);
}
if (FLAG_flush_baseline_code) {
// TODO(mythria): Add support to be able to flush baseline code without
// flushing bytecode.
DCHECK(FLAG_flush_bytecode);
code_flush_mode.Add(CodeFlushMode::kFlushBaselineCode);
}
if (FLAG_stress_flush_bytecode) {
return CodeFlushMode::kStressFlushCode;
} else if (FLAG_flush_bytecode) {
return CodeFlushMode::kFlushCode;
code_flush_mode.Add(CodeFlushMode::kStressFlushCode);
}
return CodeFlushMode::kDoNotFlushCode;
return code_flush_mode;
}
Isolate* Heap::isolate() {

View File

@ -465,7 +465,7 @@ class Heap {
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
static inline CodeFlushMode GetCodeFlushMode(Isolate* isolate);
static inline base::EnumSet<CodeFlushMode> GetCodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;

View File

@ -2366,7 +2366,7 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
if (flushing_candidate.HasBaselineData()) {
if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineData()) {
BaselineData baseline_data = flushing_candidate.baseline_data();
if (non_atomic_marking_state()->IsBlackOrGrey(
baseline_data.baseline_code())) {
@ -2387,6 +2387,10 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
}
if (!is_bytecode_live) {
// If baseline code flushing is disabled we should only flush bytecode
// from functions that don't have baseline data.
DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineData());
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
FlushBytecodeFromSFI(flushing_candidate);
@ -2414,7 +2418,7 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
}
void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
DCHECK(FLAG_flush_bytecode ||
DCHECK(FLAG_flush_baseline_code ||
weak_objects_.baseline_flushing_candidates.IsEmpty());
JSFunction flushed_js_function;
while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,

View File

@ -376,11 +376,11 @@ class MainMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
is_forced_gc),
marking_state_(marking_state),
revisiting_object_(false) {}
@ -570,7 +570,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
CodeFlushMode code_flush_mode() const { return code_flush_mode_; }
base::EnumSet<CodeFlushMode> code_flush_mode() const {
return code_flush_mode_;
}
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
@ -798,7 +800,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// that can happen while a GC is happening and we need the
// code_flush_mode_ to remain the same through out a GC, we record this at
// the start of each GC.
CodeFlushMode code_flush_mode_;
base::EnumSet<CodeFlushMode> code_flush_mode_;
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;

View File

@ -151,14 +151,15 @@ template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
Map map, JSFunction js_function) {
int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
if (js_function.ShouldFlushBaselineCode(bytecode_flush_mode_)) {
if (js_function.ShouldFlushBaselineCode(code_flush_mode_)) {
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
} else {
VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
// also include cases where there is old bytecode even when there is no
// baseline code and remove this check here.
if (bytecode_flush_mode_ != CodeFlushMode::kDoNotFlushCode &&
if (!IsFlushingDisabled(code_flush_mode_) &&
js_function.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, js_function);
}
@ -177,7 +178,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info.ShouldFlushBytecode(bytecode_flush_mode_)) {
if (shared_info.ShouldFlushBytecode(code_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
VisitPointer(shared_info,

View File

@ -105,14 +105,14 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
is_forced_gc_(is_forced_gc),
is_shared_heap_(heap->IsShared()) {}
@ -206,7 +206,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
Heap* const heap_;
const int task_id_;
const unsigned mark_compact_epoch_;
const CodeFlushMode bytecode_flush_mode_;
const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
const bool is_shared_heap_;

View File

@ -293,8 +293,9 @@ bool JSFunction::is_compiled() const {
shared().is_compiled();
}
bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) {
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
bool JSFunction::ShouldFlushBaselineCode(
base::EnumSet<CodeFlushMode> code_flush_mode) {
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread. JSFunction itself should be fully
// initialized here but the SharedFunctionInfo, Code objects may not be
@ -311,7 +312,7 @@ bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) {
if (code.kind() != CodeKind::BASELINE) return false;
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return shared.ShouldFlushBytecode(mode);
return shared.ShouldFlushBytecode(code_flush_mode);
}
bool JSFunction::NeedsResetDueToFlushedBytecode() {
@ -346,6 +347,7 @@ void JSFunction::ResetIfCodeFlushed(
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
} else if (NeedsResetDueToFlushedBaselineCode()) {
DCHECK(FLAG_flush_baseline_code);
// Flush baseline code from the closure if required
set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));
}

View File

@ -223,7 +223,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Returns if baseline code is a candidate for flushing. This method is called
// from concurrent marking so we should be careful when accessing data fields.
inline bool ShouldFlushBaselineCode(CodeFlushMode mode);
inline bool ShouldFlushBaselineCode(
base::EnumSet<CodeFlushMode> code_flush_mode);
DECL_GETTER(has_prototype_slot, bool)

View File

@ -583,8 +583,9 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode, kReleaseStore);
}
bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
bool SharedFunctionInfo::ShouldFlushBytecode(
base::EnumSet<CodeFlushMode> code_flush_mode) {
if (IsFlushingDisabled(code_flush_mode)) return false;
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
@ -596,12 +597,13 @@ bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
if (data.IsBaselineData()) {
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
data =
ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
}
if (!data.IsBytecodeArray()) return false;
if (mode == CodeFlushMode::kStressFlushCode) return true;
if (IsStressFlushingEnabled(code_flush_mode)) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);

View File

@ -529,7 +529,7 @@ class SharedFunctionInfo
// Returns true if the function has old bytecode that could be flushed. This
// function shouldn't access any flags as it is used by concurrent marker.
// Hence it takes the mode as an argument.
inline bool ShouldFlushBytecode(CodeFlushMode mode);
inline bool ShouldFlushBytecode(base::EnumSet<CodeFlushMode> code_flush_mode);
enum Inlineability {
kIsInlineable,

View File

@ -5,6 +5,7 @@
// Flags: --expose-gc --stress-flush-bytecode --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug --lazy-feedback-allocation
// Flags: --flush-baseline-code
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);