[sparkplug] Support bytecode / baseline code flushing with sparkplug
Currently with sparkplug we don't flush bytecode / baseline code of functions that were tiered up to sparkplug. This CL adds the support to flush baseline code / bytecode of functions that have baseline code too. This CL: 1. Updates the BodyDescriptor of JSFunction to treat the Code field of JSFunction as a custom weak pointer where the code is treated as weak if the bytecode corresponding to this function is old. 2. Updates GC to handle the functions that had a weak code object during the atomic phase of GC. 3. Updates the check for old bytecode to also consider when there is baseline code on the function. This CL doesn't change any heuristics for flushing. The baseline code will be flushed at the same time as bytecode. Change-Id: I6b51e06ebadb917b9f4b0f43f2afebd7f64cd26a Bug: v8:11947 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2992715 Commit-Queue: Mythri Alle <mythria@chromium.org> Reviewed-by: Andreas Haas <ahaas@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/master@{#75674}
This commit is contained in:
parent
37019412af
commit
ea55438a53
@ -1862,7 +1862,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
|
||||
|
||||
// Reset the JSFunction if we are recompiling due to the bytecode having been
|
||||
// flushed.
|
||||
function->ResetIfBytecodeFlushed();
|
||||
function->ResetIfCodeFlushed();
|
||||
|
||||
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
|
||||
|
||||
|
@ -869,10 +869,10 @@ enum class CompactionSpaceKind {
|
||||
|
||||
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
||||
|
||||
enum class BytecodeFlushMode {
|
||||
kDoNotFlushBytecode,
|
||||
kFlushBytecode,
|
||||
kStressFlushBytecode,
|
||||
enum class CodeFlushMode {
|
||||
kDoNotFlushCode,
|
||||
kFlushCode,
|
||||
kStressFlushCode,
|
||||
};
|
||||
|
||||
// Indicates whether a script should be parsed and compiled in REPL mode.
|
||||
|
@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
|
||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
|
||||
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
||||
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
|
||||
function.ResetIfBytecodeFlushed();
|
||||
function.ResetIfCodeFlushed();
|
||||
if (code.is_null()) code = function.code();
|
||||
|
||||
if (CodeKindCanDeoptimize(code.kind())) {
|
||||
|
@ -86,7 +86,7 @@ class ConcurrentMarkingVisitor final
|
||||
MarkingWorklists::Local* local_marking_worklists,
|
||||
WeakObjects* weak_objects, Heap* heap,
|
||||
unsigned mark_compact_epoch,
|
||||
BytecodeFlushMode bytecode_flush_mode,
|
||||
CodeFlushMode bytecode_flush_mode,
|
||||
bool embedder_tracing_enabled, bool is_forced_gc,
|
||||
MemoryChunkDataMap* memory_chunk_data)
|
||||
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
|
||||
@ -359,7 +359,7 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
|
||||
class ConcurrentMarking::JobTask : public v8::JobTask {
|
||||
public:
|
||||
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
|
||||
BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc)
|
||||
CodeFlushMode bytecode_flush_mode, bool is_forced_gc)
|
||||
: concurrent_marking_(concurrent_marking),
|
||||
mark_compact_epoch_(mark_compact_epoch),
|
||||
bytecode_flush_mode_(bytecode_flush_mode),
|
||||
@ -391,7 +391,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
|
||||
private:
|
||||
ConcurrentMarking* concurrent_marking_;
|
||||
const unsigned mark_compact_epoch_;
|
||||
BytecodeFlushMode bytecode_flush_mode_;
|
||||
CodeFlushMode bytecode_flush_mode_;
|
||||
const bool is_forced_gc_;
|
||||
};
|
||||
|
||||
@ -412,7 +412,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
|
||||
}
|
||||
|
||||
void ConcurrentMarking::Run(JobDelegate* delegate,
|
||||
BytecodeFlushMode bytecode_flush_mode,
|
||||
CodeFlushMode bytecode_flush_mode,
|
||||
unsigned mark_compact_epoch, bool is_forced_gc) {
|
||||
size_t kBytesUntilInterruptCheck = 64 * KB;
|
||||
int kObjectsUntilInterrupCheck = 1000;
|
||||
@ -528,6 +528,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
|
||||
weak_objects_->weak_cells.FlushToGlobal(task_id);
|
||||
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
|
||||
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
|
||||
weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
|
||||
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
|
||||
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
|
||||
total_marked_bytes_ += marked_bytes;
|
||||
|
@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
|
||||
char cache_line_padding[64];
|
||||
};
|
||||
class JobTask;
|
||||
void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode,
|
||||
void Run(JobDelegate* delegate, CodeFlushMode bytecode_flush_mode,
|
||||
unsigned mark_compact_epoch, bool is_forced_gc);
|
||||
size_t GetMaxConcurrency(size_t worker_count);
|
||||
|
||||
|
@ -84,16 +84,16 @@ Address AllocationResult::ToAddress() {
|
||||
}
|
||||
|
||||
// static
|
||||
BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) {
|
||||
CodeFlushMode Heap::GetCodeFlushMode(Isolate* isolate) {
|
||||
if (isolate->disable_bytecode_flushing()) {
|
||||
return BytecodeFlushMode::kDoNotFlushBytecode;
|
||||
return CodeFlushMode::kDoNotFlushCode;
|
||||
}
|
||||
if (FLAG_stress_flush_bytecode) {
|
||||
return BytecodeFlushMode::kStressFlushBytecode;
|
||||
return CodeFlushMode::kStressFlushCode;
|
||||
} else if (FLAG_flush_bytecode) {
|
||||
return BytecodeFlushMode::kFlushBytecode;
|
||||
return CodeFlushMode::kFlushCode;
|
||||
}
|
||||
return BytecodeFlushMode::kDoNotFlushBytecode;
|
||||
return CodeFlushMode::kDoNotFlushCode;
|
||||
}
|
||||
|
||||
Isolate* Heap::isolate() {
|
||||
|
@ -460,7 +460,7 @@ class Heap {
|
||||
|
||||
// Helper function to get the bytecode flushing mode based on the flags. This
|
||||
// is required because it is not safe to acess flags in concurrent marker.
|
||||
static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate);
|
||||
static inline CodeFlushMode GetCodeFlushMode(Isolate* isolate);
|
||||
|
||||
static uintptr_t ZapValue() {
|
||||
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
|
||||
|
@ -519,7 +519,7 @@ void MarkCompactCollector::StartMarking() {
|
||||
contexts.push_back(context->ptr());
|
||||
}
|
||||
}
|
||||
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate());
|
||||
bytecode_flush_mode_ = Heap::GetCodeFlushMode(isolate());
|
||||
marking_worklists()->CreateContextWorklists(contexts);
|
||||
local_marking_worklists_ =
|
||||
std::make_unique<MarkingWorklists::Local>(marking_worklists());
|
||||
@ -2086,7 +2086,7 @@ void MarkCompactCollector::MarkLiveObjects() {
|
||||
}
|
||||
|
||||
// We depend on IterateWeakRootsForPhantomHandles being called before
|
||||
// ClearOldBytecodeCandidates in order to identify flushed bytecode in the
|
||||
// ProcessOldCodeCandidates in order to identify flushed bytecode in the
|
||||
// CPU profiler.
|
||||
{
|
||||
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
|
||||
@ -2122,7 +2122,11 @@ void MarkCompactCollector::ClearNonLiveReferences() {
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
|
||||
ClearOldBytecodeCandidates();
|
||||
// ProcessFlusheBaselineCandidates should be called after clearing bytecode
|
||||
// so that we flush any bytecode if needed so we could correctly set the
|
||||
// code object on the JSFunction.
|
||||
ProcessOldCodeCandidates();
|
||||
ProcessFlushedBaselineCandidates();
|
||||
}
|
||||
|
||||
{
|
||||
@ -2161,6 +2165,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
|
||||
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
|
||||
DCHECK(weak_objects_.weak_cells.IsEmpty());
|
||||
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
|
||||
DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
|
||||
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
|
||||
}
|
||||
|
||||
@ -2278,21 +2283,59 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
|
||||
DCHECK(!shared_info.is_compiled());
|
||||
}
|
||||
|
||||
void MarkCompactCollector::ClearOldBytecodeCandidates() {
|
||||
void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
|
||||
if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
|
||||
|
||||
// Mark baseline data as live.
|
||||
non_atomic_marking_state()->WhiteToBlack(baseline_data);
|
||||
|
||||
// Record object slots.
|
||||
DCHECK(
|
||||
non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
|
||||
ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
|
||||
RecordSlot(baseline_data, code, HeapObject::cast(*code));
|
||||
|
||||
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
|
||||
ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
|
||||
RecordSlot(baseline_data, data, HeapObject::cast(*data));
|
||||
}
|
||||
|
||||
void MarkCompactCollector::ProcessOldCodeCandidates() {
|
||||
DCHECK(FLAG_flush_bytecode ||
|
||||
weak_objects_.bytecode_flushing_candidates.IsEmpty());
|
||||
SharedFunctionInfo flushing_candidate;
|
||||
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
|
||||
&flushing_candidate)) {
|
||||
// If the BytecodeArray is dead, flush it, which will replace the field with
|
||||
// an uncompiled data object.
|
||||
if (!non_atomic_marking_state()->IsBlackOrGrey(
|
||||
flushing_candidate.GetBytecodeArray(isolate()))) {
|
||||
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
|
||||
flushing_candidate.GetBytecodeArray(isolate()));
|
||||
if (flushing_candidate.HasBaselineData()) {
|
||||
BaselineData baseline_data = flushing_candidate.baseline_data();
|
||||
if (non_atomic_marking_state()->IsBlackOrGrey(
|
||||
baseline_data.baseline_code())) {
|
||||
// Currently baseline code holds bytecode array strongly and it is
|
||||
// always ensured that bytecode is live if baseline code is live. Hence
|
||||
// baseline code can safely load bytecode array without any additional
|
||||
// checks. In future if this changes we need to update these checks to
|
||||
// flush code if the bytecode is not live and also update baseline code
|
||||
// to bailout if there is no bytecode.
|
||||
DCHECK(is_bytecode_live);
|
||||
MarkBaselineDataAsLive(baseline_data);
|
||||
} else if (is_bytecode_live) {
|
||||
// If baseline code is flushed but we have a valid bytecode array reset
|
||||
// the function_data field to BytecodeArray.
|
||||
flushing_candidate.set_function_data(baseline_data.data(),
|
||||
kReleaseStore);
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_bytecode_live) {
|
||||
// If the BytecodeArray is dead, flush it, which will replace the field
|
||||
// with an uncompiled data object.
|
||||
FlushBytecodeFromSFI(flushing_candidate);
|
||||
}
|
||||
|
||||
// Now record the slot, which has either been updated to an uncompiled data,
|
||||
// or is the BytecodeArray which is still alive.
|
||||
// Baseline code or BytecodeArray which is still alive.
|
||||
ObjectSlot slot =
|
||||
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
|
||||
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
|
||||
@ -2308,7 +2351,26 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
|
||||
Object target) {
|
||||
RecordSlot(object, slot, HeapObject::cast(target));
|
||||
};
|
||||
flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot);
|
||||
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
|
||||
DCHECK(FLAG_flush_bytecode ||
|
||||
weak_objects_.baseline_flushing_candidates.IsEmpty());
|
||||
JSFunction flushed_js_function;
|
||||
while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
|
||||
&flushed_js_function)) {
|
||||
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
|
||||
Object target) {
|
||||
RecordSlot(object, slot, HeapObject::cast(target));
|
||||
};
|
||||
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
|
||||
|
||||
// Record the code slot that has been updated either to CompileLazy,
|
||||
// InterpreterEntryTrampoline or baseline code.
|
||||
ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
|
||||
RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2625,6 +2687,7 @@ void MarkCompactCollector::AbortWeakObjects() {
|
||||
weak_objects_.js_weak_refs.Clear();
|
||||
weak_objects_.weak_cells.Clear();
|
||||
weak_objects_.bytecode_flushing_candidates.Clear();
|
||||
weak_objects_.baseline_flushing_candidates.Clear();
|
||||
weak_objects_.flushed_js_functions.Clear();
|
||||
}
|
||||
|
||||
|
@ -376,7 +376,7 @@ class MainMarkingVisitor final
|
||||
MarkingWorklists::Local* local_marking_worklists,
|
||||
WeakObjects* weak_objects, Heap* heap,
|
||||
unsigned mark_compact_epoch,
|
||||
BytecodeFlushMode bytecode_flush_mode,
|
||||
CodeFlushMode bytecode_flush_mode,
|
||||
bool embedder_tracing_enabled, bool is_forced_gc)
|
||||
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
|
||||
kMainThreadTask, local_marking_worklists, weak_objects, heap,
|
||||
@ -570,7 +570,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
|
||||
unsigned epoch() const { return epoch_; }
|
||||
|
||||
BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
|
||||
CodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
|
||||
|
||||
explicit MarkCompactCollector(Heap* heap);
|
||||
~MarkCompactCollector() override;
|
||||
@ -668,9 +668,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
// Flushes a weakly held bytecode array from a shared function info.
|
||||
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
|
||||
|
||||
// Clears bytecode arrays that have not been executed for multiple
|
||||
// collections.
|
||||
void ClearOldBytecodeCandidates();
|
||||
// Marks the BaselineData as live and records the slots of baseline data
|
||||
// fields. This assumes that the objects in the data fields are alive.
|
||||
void MarkBaselineDataAsLive(BaselineData baseline_data);
|
||||
|
||||
// Clears bytecode arrays / baseline code that have not been executed for
|
||||
// multiple collections.
|
||||
void ProcessOldCodeCandidates();
|
||||
void ProcessFlushedBaselineCandidates();
|
||||
|
||||
// Resets any JSFunctions which have had their bytecode flushed.
|
||||
void ClearFlushedJsFunctions();
|
||||
@ -793,7 +798,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
||||
// that can happen while a GC is happening and we need the
|
||||
// bytecode_flush_mode_ to remain the same through out a GC, we record this at
|
||||
// the start of each GC.
|
||||
BytecodeFlushMode bytecode_flush_mode_;
|
||||
CodeFlushMode bytecode_flush_mode_;
|
||||
|
||||
friend class FullEvacuator;
|
||||
friend class RecordMigratedSlotVisitor;
|
||||
|
@ -132,12 +132,19 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
|
||||
|
||||
template <typename ConcreteVisitor, typename MarkingState>
|
||||
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
|
||||
Map map, JSFunction object) {
|
||||
int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
|
||||
// Check if the JSFunction needs reset due to bytecode being flushed.
|
||||
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
|
||||
object.NeedsResetDueToFlushedBytecode()) {
|
||||
weak_objects_->flushed_js_functions.Push(task_id_, object);
|
||||
Map map, JSFunction js_function) {
|
||||
int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
|
||||
if (js_function.ShouldFlushBaselineCode(bytecode_flush_mode_)) {
|
||||
weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
|
||||
} else {
|
||||
VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
|
||||
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
|
||||
// also include cases where there is old bytecode even when there is no
|
||||
// baseline code and remove this check here.
|
||||
if (bytecode_flush_mode_ != CodeFlushMode::kDoNotFlushCode &&
|
||||
js_function.NeedsResetDueToFlushedBytecode()) {
|
||||
weak_objects_->flushed_js_functions.Push(task_id_, js_function);
|
||||
}
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
||||
MarkingWorklists::Local* local_marking_worklists,
|
||||
WeakObjects* weak_objects, Heap* heap,
|
||||
unsigned mark_compact_epoch,
|
||||
BytecodeFlushMode bytecode_flush_mode,
|
||||
CodeFlushMode bytecode_flush_mode,
|
||||
bool is_embedder_tracing_enabled, bool is_forced_gc)
|
||||
: local_marking_worklists_(local_marking_worklists),
|
||||
weak_objects_(weak_objects),
|
||||
@ -199,7 +199,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
||||
Heap* const heap_;
|
||||
const int task_id_;
|
||||
const unsigned mark_compact_epoch_;
|
||||
const BytecodeFlushMode bytecode_flush_mode_;
|
||||
const CodeFlushMode bytecode_flush_mode_;
|
||||
const bool is_embedder_tracing_enabled_;
|
||||
const bool is_forced_gc_;
|
||||
const bool is_shared_heap_;
|
||||
|
@ -153,6 +153,21 @@ void WeakObjects::UpdateFlushedJSFunctions(
|
||||
});
|
||||
}
|
||||
|
||||
void WeakObjects::UpdateBaselineFlushingCandidates(
|
||||
WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
|
||||
baseline_flush_candidates.Update(
|
||||
[](JSFunction slot_in, JSFunction* slot_out) -> bool {
|
||||
JSFunction forwarded = ForwardingAddress(slot_in);
|
||||
|
||||
if (!forwarded.is_null()) {
|
||||
*slot_out = forwarded;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
template <typename Type>
|
||||
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
|
||||
|
@ -59,6 +59,7 @@ class TransitionArray;
|
||||
F(WeakCell, weak_cells, WeakCells) \
|
||||
F(SharedFunctionInfo, bytecode_flushing_candidates, \
|
||||
BytecodeFlushingCandidates) \
|
||||
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
|
||||
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
|
||||
|
||||
class WeakObjects {
|
||||
|
@ -284,14 +284,36 @@ bool JSFunction::is_compiled() const {
|
||||
shared().is_compiled();
|
||||
}
|
||||
|
||||
bool JSFunction::NeedsResetDueToFlushedBytecode() {
|
||||
bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) {
|
||||
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
|
||||
// Do a raw read for shared and code fields here since this function may be
|
||||
// called on a concurrent thread and the JSFunction might not be fully
|
||||
// initialized yet.
|
||||
// called on a concurrent thread. JSFunction itself should be fully
|
||||
// initialized here but the SharedFunctionInfo, Code objects may not be
|
||||
// initialized. We read using acquire loads to defend against that.
|
||||
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
|
||||
if (!maybe_shared.IsSharedFunctionInfo()) return false;
|
||||
|
||||
Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
|
||||
// See crbug.com/v8/11972 for more details on acquire / release semantics for
|
||||
// code field. We don't use release stores when copying code pointers from
|
||||
// SFI / FV to JSFunction but it is safe in practice.
|
||||
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
|
||||
if (!maybe_code.IsCodeT()) return false;
|
||||
Code code = FromCodeT(CodeT::cast(maybe_code));
|
||||
if (code.kind() != CodeKind::BASELINE) return false;
|
||||
|
||||
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
|
||||
return shared.ShouldFlushBytecode(mode);
|
||||
}
|
||||
|
||||
bool JSFunction::NeedsResetDueToFlushedBytecode() {
|
||||
// Do a raw read for shared and code fields here since this function may be
|
||||
// called on a concurrent thread. JSFunction itself should be fully
|
||||
// initialized here but the SharedFunctionInfo, Code objects may not be
|
||||
// initialized. We read using acquire loads to defend against that.
|
||||
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
|
||||
if (!maybe_shared.IsSharedFunctionInfo()) return false;
|
||||
|
||||
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
|
||||
if (!maybe_code.IsCodeT()) return false;
|
||||
Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
|
||||
|
||||
@ -299,15 +321,24 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
|
||||
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
|
||||
}
|
||||
|
||||
void JSFunction::ResetIfBytecodeFlushed(
|
||||
bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
|
||||
return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
|
||||
}
|
||||
|
||||
void JSFunction::ResetIfCodeFlushed(
|
||||
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
|
||||
HeapObject target)>>
|
||||
gc_notify_updated_slot) {
|
||||
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) {
|
||||
if (!FLAG_flush_bytecode) return;
|
||||
|
||||
if (NeedsResetDueToFlushedBytecode()) {
|
||||
// Bytecode was flushed and function is now uncompiled, reset JSFunction
|
||||
// by setting code to CompileLazy and clearing the feedback vector.
|
||||
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
|
||||
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
|
||||
} else if (NeedsResetDueToFlushedBaselineCode()) {
|
||||
// Flush baseline code from the closure if required
|
||||
set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
|
||||
}
|
||||
|
||||
void JSFunction::ClearTypeFeedbackInfo() {
|
||||
ResetIfBytecodeFlushed();
|
||||
ResetIfCodeFlushed();
|
||||
if (has_feedback_vector()) {
|
||||
FeedbackVector vector = feedback_vector();
|
||||
Isolate* isolate = GetIsolate();
|
||||
|
@ -210,11 +210,19 @@ class JSFunction : public JSFunctionOrBoundFunction {
|
||||
|
||||
// Resets function to clear compiled data after bytecode has been flushed.
|
||||
inline bool NeedsResetDueToFlushedBytecode();
|
||||
inline void ResetIfBytecodeFlushed(
|
||||
inline void ResetIfCodeFlushed(
|
||||
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
|
||||
HeapObject target)>>
|
||||
gc_notify_updated_slot = base::nullopt);
|
||||
|
||||
// Returns if the closure's code field has to be updated because it has
|
||||
// stale baseline code.
|
||||
inline bool NeedsResetDueToFlushedBaselineCode();
|
||||
|
||||
// Returns if baseline code is a candidate for flushing. This method is called
|
||||
// from concurrent marking so we should be careful when accessing data fields.
|
||||
inline bool ShouldFlushBaselineCode(CodeFlushMode mode);
|
||||
|
||||
DECL_GETTER(has_prototype_slot, bool)
|
||||
|
||||
// The initial map for an object created by this constructor.
|
||||
@ -311,6 +319,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
|
||||
static constexpr int kPrototypeOrInitialMapOffset =
|
||||
FieldOffsets::kPrototypeOrInitialMapOffset;
|
||||
|
||||
class BodyDescriptor;
|
||||
|
||||
private:
|
||||
DECL_ACCESSORS(raw_code, CodeT)
|
||||
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
|
||||
|
@ -296,6 +296,39 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
|
||||
}
|
||||
};
|
||||
|
||||
class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
|
||||
public:
|
||||
static const int kStartOffset = JSObject::BodyDescriptor::kStartOffset;
|
||||
|
||||
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
|
||||
if (offset < kStartOffset) return false;
|
||||
return IsValidJSObjectSlotImpl(map, obj, offset);
|
||||
}
|
||||
|
||||
template <typename ObjectVisitor>
|
||||
static inline void IterateBody(Map map, HeapObject obj, int object_size,
|
||||
ObjectVisitor* v) {
|
||||
// Iterate JSFunction header fields first.
|
||||
int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot());
|
||||
DCHECK_GE(object_size, header_size);
|
||||
IteratePointers(obj, kStartOffset, kCodeOffset, v);
|
||||
// Code field is treated as a custom weak pointer. This field is visited as
|
||||
// a weak pointer if the Code is baseline code and the bytecode array
|
||||
// corresponding to this function is old. In the rest of the cases this
|
||||
// field is treated as strong pointer.
|
||||
IterateCustomWeakPointer(obj, kCodeOffset, v);
|
||||
// Iterate rest of the header fields
|
||||
DCHECK_GE(header_size, kCodeOffset);
|
||||
IteratePointers(obj, kCodeOffset + kTaggedSize, header_size, v);
|
||||
// Iterate rest of the fields starting after the header.
|
||||
IterateJSObjectBodyImpl(map, obj, header_size, object_size, v);
|
||||
}
|
||||
|
||||
static inline int SizeOf(Map map, HeapObject object) {
|
||||
return map.instance_size();
|
||||
}
|
||||
};
|
||||
|
||||
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
|
||||
public:
|
||||
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
|
||||
|
@ -575,8 +575,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
|
||||
set_function_data(bytecode, kReleaseStore);
|
||||
}
|
||||
|
||||
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
|
||||
if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false;
|
||||
bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
|
||||
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
|
||||
|
||||
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
|
||||
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
|
||||
@ -587,9 +587,13 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
|
||||
// check if it is old. Note, this is done this way since this function can be
|
||||
// called by the concurrent marker.
|
||||
Object data = function_data(kAcquireLoad);
|
||||
if (data.IsBaselineData()) {
|
||||
data =
|
||||
ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
|
||||
}
|
||||
if (!data.IsBytecodeArray()) return false;
|
||||
|
||||
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
|
||||
if (mode == CodeFlushMode::kStressFlushCode) return true;
|
||||
|
||||
BytecodeArray bytecode = BytecodeArray::cast(data);
|
||||
|
||||
|
@ -534,7 +534,7 @@ class SharedFunctionInfo
|
||||
// Returns true if the function has old bytecode that could be flushed. This
|
||||
// function shouldn't access any flags as it is used by concurrent marker.
|
||||
// Hence it takes the mode as an argument.
|
||||
inline bool ShouldFlushBytecode(BytecodeFlushMode mode);
|
||||
inline bool ShouldFlushBytecode(CodeFlushMode mode);
|
||||
|
||||
enum Inlineability {
|
||||
kIsInlineable,
|
||||
|
@ -57,6 +57,10 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
|
||||
@customCppClass
|
||||
@customMap // Just to place the map at the beginning of the roots array.
|
||||
class SharedFunctionInfo extends HeapObject {
|
||||
// function_data field is treated as a custom weak pointer. We visit this
|
||||
// field as a weak pointer if there is aged bytecode. If there is no bytecode
|
||||
// or if the bytecode is young then we treat it as a strong pointer. This is
|
||||
// done to support flushing of bytecode.
|
||||
weak function_data: Object;
|
||||
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
|
||||
outer_scope_info_or_feedback_metadata: HeapObject;
|
||||
|
@ -238,7 +238,7 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
|
||||
WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
|
||||
function_index)
|
||||
.ToHandleChecked();
|
||||
exported_function->set_code(*wrapper_code);
|
||||
exported_function->set_code(*wrapper_code, kReleaseStore);
|
||||
WasmExportedFunctionData function_data =
|
||||
exported_function->shared().wasm_exported_function_data();
|
||||
function_data.set_wrapper_code(*wrapper_code);
|
||||
|
@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
|
||||
// Unconditionally reset the JSFunction to its SFI's code, since we can't
|
||||
// serialize optimized code anyway.
|
||||
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
|
||||
closure->ResetIfBytecodeFlushed();
|
||||
closure->ResetIfCodeFlushed();
|
||||
if (closure->is_compiled()) {
|
||||
if (closure->shared().HasBaselineData()) {
|
||||
closure->shared().flush_baseline_data();
|
||||
|
82
test/mjsunit/baseline/flush-baseline-code.js
Normal file
82
test/mjsunit/baseline/flush-baseline-code.js
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --expose-gc --stress-flush-bytecode --allow-natives-syntax
|
||||
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
|
||||
// Flags: --no-always-sparkplug
|
||||
|
||||
function HasBaselineCode(f) {
|
||||
let opt_status = %GetOptimizationStatus(f);
|
||||
return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
|
||||
}
|
||||
|
||||
function HasByteCode(f) {
|
||||
let opt_status = %GetOptimizationStatus(f);
|
||||
return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
|
||||
}
|
||||
|
||||
var x = {b:20, c:30};
|
||||
function f() {
|
||||
return x.b + 10;
|
||||
}
|
||||
|
||||
// Test bytecode gets flushed
|
||||
f();
|
||||
assertTrue(HasByteCode(f));
|
||||
gc();
|
||||
assertFalse(HasByteCode(f));
|
||||
|
||||
// Test baseline code and bytecode gets flushed
|
||||
for (i = 1; i < 50; i++) {
|
||||
f();
|
||||
}
|
||||
assertTrue(HasBaselineCode(f));
|
||||
gc();
|
||||
assertFalse(HasBaselineCode(f));
|
||||
assertFalse(HasByteCode(f));
|
||||
|
||||
// Check bytecode isn't flushed if it's held strongly from somewhere but
|
||||
// baseline code is flushed.
|
||||
function f1(should_recurse) {
|
||||
if (should_recurse) {
|
||||
assertTrue(HasByteCode(f1));
|
||||
for (i = 1; i < 50; i++) {
|
||||
f1(false);
|
||||
}
|
||||
assertTrue(HasBaselineCode(f1));
|
||||
gc();
|
||||
assertFalse(HasBaselineCode(f1));
|
||||
assertTrue(HasByteCode(f1));
|
||||
}
|
||||
return x.b + 10;
|
||||
}
|
||||
|
||||
f1(false);
|
||||
// Recurse first time so we have bytecode array on the stack that keeps
|
||||
// bytecode alive.
|
||||
f1(true);
|
||||
|
||||
// Flush bytecode
|
||||
gc();
|
||||
assertFalse(HasBaselineCode(f1));
|
||||
assertFalse(HasByteCode(f1));
|
||||
|
||||
// Check baseline code and bytecode aren't flushed if baseline code is on
|
||||
// stack.
|
||||
function f2(should_recurse) {
|
||||
if (should_recurse) {
|
||||
assertTrue(HasBaselineCode(f2));
|
||||
f2(false);
|
||||
gc();
|
||||
assertTrue(HasBaselineCode(f2));
|
||||
}
|
||||
return x.b + 10;
|
||||
}
|
||||
|
||||
for (i = 1; i < 50; i++) {
|
||||
f2(false);
|
||||
}
|
||||
assertTrue(HasBaselineCode(f2));
|
||||
// Recurse with baseline code on stack
|
||||
f2(true);
|
Loading…
Reference in New Issue
Block a user