From a079f057981bfc3e801226fbb20073478ab078fb Mon Sep 17 00:00:00 2001 From: Mythri Alle Date: Mon, 12 Jul 2021 11:57:00 +0000 Subject: [PATCH] Revert "[sparkplug] Support bytecode / baseline code flushing with sparkplug" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit ea55438a53ebba990e9dc206b148cff98838bf75. Reason for revert: Likely culprit for these failures: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20NumFuzz/15494/overview Original change's description: > [sparkplug] Support bytecode / baseline code flushing with sparkplug > > Currently with sparkplug we don't flush bytecode / baseline code of > functions that were tiered up to sparkplug. This CL adds the support to > flush baseline code / bytecode of functions that have baseline code too. > This CL: > 1. Updates the BodyDescriptor of JSFunction to treat the Code field of > JSFunction as a custom weak pointer where the code is treated as weak if > the bytecode corresponding to this function is old. > 2. Updates GC to handle the functions that had a weak code object during > the atomic phase of GC. > 3. Updates the check for old bytecode to also consider when there is > baseline code on the function. > > This CL doesn't change any heuristics for flushing. The baseline code > will be flushed at the same time as bytecode. > > Change-Id: I6b51e06ebadb917b9f4b0f43f2afebd7f64cd26a > Bug: v8:11947 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2992715 > Commit-Queue: Mythri Alle > Reviewed-by: Andreas Haas > Reviewed-by: Toon Verwaest > Reviewed-by: Dominik Inführ > Cr-Commit-Position: refs/heads/master@{#75674} Bug: v8:11947 Change-Id: I50535b9a6c6fc39eceb4f6c0e0c84c55bb92f30a No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3017811 Reviewed-by: Mythri Alle Commit-Queue: Mythri Alle Cr-Commit-Position: refs/heads/master@{#75679} --- src/codegen/compiler.cc | 2 +- src/common/globals.h | 8 +- src/deoptimizer/deoptimizer.cc | 2 +- src/heap/concurrent-marking.cc | 9 +-- src/heap/concurrent-marking.h | 2 +- src/heap/heap-inl.h | 10 +-- src/heap/heap.h | 2 +- src/heap/mark-compact.cc | 83 +++----------------- src/heap/mark-compact.h | 17 ++-- src/heap/marking-visitor-inl.h | 19 ++--- src/heap/marking-visitor.h | 4 +- src/heap/weak-object-worklists.cc | 15 ---- src/heap/weak-object-worklists.h | 1 - src/objects/js-function-inl.h | 41 ++-------- src/objects/js-function.cc | 2 +- src/objects/js-function.h | 12 +-- src/objects/objects-body-descriptors-inl.h | 33 -------- src/objects/shared-function-info-inl.h | 10 +-- src/objects/shared-function-info.h | 2 +- src/objects/shared-function-info.tq | 4 - src/runtime/runtime-wasm.cc | 2 +- src/snapshot/context-serializer.cc | 2 +- test/mjsunit/baseline/flush-baseline-code.js | 82 ------------------- 23 files changed, 54 insertions(+), 310 deletions(-) delete mode 100644 test/mjsunit/baseline/flush-baseline-code.js diff --git a/src/codegen/compiler.cc b/src/codegen/compiler.cc index b4535dbaaa..99f6d725f9 100644 --- a/src/codegen/compiler.cc +++ b/src/codegen/compiler.cc @@ -1862,7 +1862,7 @@ bool Compiler::Compile(Isolate* isolate, Handle function, // Reset the JSFunction if we are recompiling due to the bytecode having been // flushed. - function->ResetIfCodeFlushed(); + function->ResetIfBytecodeFlushed(); Handle shared_info = handle(function->shared(), isolate); diff --git a/src/common/globals.h b/src/common/globals.h index b252fd2cf6..9a06221e82 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -869,10 +869,10 @@ enum class CompactionSpaceKind { enum Executability { NOT_EXECUTABLE, EXECUTABLE }; -enum class CodeFlushMode { - kDoNotFlushCode, - kFlushCode, - kStressFlushCode, +enum class BytecodeFlushMode { + kDoNotFlushBytecode, + kFlushBytecode, + kStressFlushBytecode, }; // Indicates whether a script should be parsed and compiled in REPL mode. diff --git a/src/deoptimizer/deoptimizer.cc b/src/deoptimizer/deoptimizer.cc index dc3896b313..72ca85f41e 100644 --- a/src/deoptimizer/deoptimizer.cc +++ b/src/deoptimizer/deoptimizer.cc @@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode); TimerEventScope timer(isolate); TRACE_EVENT0("v8", "V8.DeoptimizeCode"); - function.ResetIfCodeFlushed(); + function.ResetIfBytecodeFlushed(); if (code.is_null()) code = function.code(); if (CodeKindCanDeoptimize(code.kind())) { diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index 1870796058..3decc57882 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -86,7 +86,7 @@ class ConcurrentMarkingVisitor final MarkingWorklists::Local* local_marking_worklists, WeakObjects* weak_objects, Heap* heap, unsigned mark_compact_epoch, - CodeFlushMode bytecode_flush_mode, + BytecodeFlushMode bytecode_flush_mode, bool embedder_tracing_enabled, bool is_forced_gc, MemoryChunkDataMap* memory_chunk_data) : MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap, @@ -359,7 +359,7 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) { class ConcurrentMarking::JobTask : public v8::JobTask { public: JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch, - CodeFlushMode bytecode_flush_mode, bool is_forced_gc) + BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc) : concurrent_marking_(concurrent_marking), mark_compact_epoch_(mark_compact_epoch), bytecode_flush_mode_(bytecode_flush_mode), @@ -391,7 +391,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask { private: ConcurrentMarking* concurrent_marking_; const unsigned mark_compact_epoch_; - CodeFlushMode bytecode_flush_mode_; + BytecodeFlushMode bytecode_flush_mode_; const bool is_forced_gc_; }; @@ -412,7 +412,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, } void ConcurrentMarking::Run(JobDelegate* delegate, - CodeFlushMode bytecode_flush_mode, + BytecodeFlushMode bytecode_flush_mode, unsigned mark_compact_epoch, bool is_forced_gc) { size_t kBytesUntilInterruptCheck = 64 * KB; int kObjectsUntilInterrupCheck = 1000; @@ -528,7 +528,6 @@ void ConcurrentMarking::Run(JobDelegate* delegate, weak_objects_->weak_cells.FlushToGlobal(task_id); weak_objects_->weak_objects_in_code.FlushToGlobal(task_id); weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id); - weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id); weak_objects_->flushed_js_functions.FlushToGlobal(task_id); base::AsAtomicWord::Relaxed_Store(&task_state->marked_bytes, 0); total_marked_bytes_ += marked_bytes; diff --git a/src/heap/concurrent-marking.h b/src/heap/concurrent-marking.h index ef1a41559d..c685f5cca6 100644 --- a/src/heap/concurrent-marking.h +++ b/src/heap/concurrent-marking.h @@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { char cache_line_padding[64]; }; class JobTask; - void Run(JobDelegate* delegate, CodeFlushMode bytecode_flush_mode, + void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode, unsigned mark_compact_epoch, bool is_forced_gc); size_t GetMaxConcurrency(size_t worker_count); diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index 82ab7175b3..bfe682204b 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -84,16 +84,16 @@ Address AllocationResult::ToAddress() { } // static -CodeFlushMode Heap::GetCodeFlushMode(Isolate* isolate) { +BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) { if (isolate->disable_bytecode_flushing()) { - return CodeFlushMode::kDoNotFlushCode; + return BytecodeFlushMode::kDoNotFlushBytecode; } if (FLAG_stress_flush_bytecode) { - return CodeFlushMode::kStressFlushCode; + return BytecodeFlushMode::kStressFlushBytecode; } else if (FLAG_flush_bytecode) { - return CodeFlushMode::kFlushCode; + return BytecodeFlushMode::kFlushBytecode; } - return CodeFlushMode::kDoNotFlushCode; + return BytecodeFlushMode::kDoNotFlushBytecode; } Isolate* Heap::isolate() { diff --git a/src/heap/heap.h b/src/heap/heap.h index cf8f142d99..e287276ba9 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -460,7 +460,7 @@ class Heap { // Helper function to get the bytecode flushing mode based on the flags. This // is required because it is not safe to acess flags in concurrent marker. - static inline CodeFlushMode GetCodeFlushMode(Isolate* isolate); + static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate); static uintptr_t ZapValue() { return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue; diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 5b3d9c347d..d6c644696d 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -519,7 +519,7 @@ void MarkCompactCollector::StartMarking() { contexts.push_back(context->ptr()); } } - bytecode_flush_mode_ = Heap::GetCodeFlushMode(isolate()); + bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate()); marking_worklists()->CreateContextWorklists(contexts); local_marking_worklists_ = std::make_unique(marking_worklists()); @@ -2086,7 +2086,7 @@ void MarkCompactCollector::MarkLiveObjects() { } // We depend on IterateWeakRootsForPhantomHandles being called before - // ProcessOldCodeCandidates in order to identify flushed bytecode in the + // ClearOldBytecodeCandidates in order to identify flushed bytecode in the // CPU profiler. { heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles( @@ -2122,11 +2122,7 @@ void MarkCompactCollector::ClearNonLiveReferences() { { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE); - // ProcessFlusheBaselineCandidates should be called after clearing bytecode - // so that we flush any bytecode if needed so we could correctly set the - // code object on the JSFunction. - ProcessOldCodeCandidates(); - ProcessFlushedBaselineCandidates(); + ClearOldBytecodeCandidates(); } { @@ -2165,7 +2161,6 @@ void MarkCompactCollector::ClearNonLiveReferences() { DCHECK(weak_objects_.js_weak_refs.IsEmpty()); DCHECK(weak_objects_.weak_cells.IsEmpty()); DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty()); - DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty()); DCHECK(weak_objects_.flushed_js_functions.IsEmpty()); } @@ -2283,59 +2278,21 @@ void MarkCompactCollector::FlushBytecodeFromSFI( DCHECK(!shared_info.is_compiled()); } -void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) { - if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return; - - // Mark baseline data as live. - non_atomic_marking_state()->WhiteToBlack(baseline_data); - - // Record object slots. - DCHECK( - non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code())); - ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset); - RecordSlot(baseline_data, code, HeapObject::cast(*code)); - - DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data())); - ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset); - RecordSlot(baseline_data, data, HeapObject::cast(*data)); -} - -void MarkCompactCollector::ProcessOldCodeCandidates() { +void MarkCompactCollector::ClearOldBytecodeCandidates() { DCHECK(FLAG_flush_bytecode || weak_objects_.bytecode_flushing_candidates.IsEmpty()); SharedFunctionInfo flushing_candidate; while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask, &flushing_candidate)) { - bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey( - flushing_candidate.GetBytecodeArray(isolate())); - if (flushing_candidate.HasBaselineData()) { - BaselineData baseline_data = flushing_candidate.baseline_data(); - if (non_atomic_marking_state()->IsBlackOrGrey( - baseline_data.baseline_code())) { - // Currently baseline code holds bytecode array strongly and it is - // always ensured that bytecode is live if baseline code is live. Hence - // baseline code can safely load bytecode array without any additional - // checks. In future if this changes we need to update these checks to - // flush code if the bytecode is not live and also update baseline code - // to bailout if there is no bytecode. - DCHECK(is_bytecode_live); - MarkBaselineDataAsLive(baseline_data); - } else if (is_bytecode_live) { - // If baseline code is flushed but we have a valid bytecode array reset - // the function_data field to BytecodeArray. - flushing_candidate.set_function_data(baseline_data.data(), - kReleaseStore); - } - } - - if (!is_bytecode_live) { - // If the BytecodeArray is dead, flush it, which will replace the field - // with an uncompiled data object. + // If the BytecodeArray is dead, flush it, which will replace the field with + // an uncompiled data object. + if (!non_atomic_marking_state()->IsBlackOrGrey( + flushing_candidate.GetBytecodeArray(isolate()))) { FlushBytecodeFromSFI(flushing_candidate); } // Now record the slot, which has either been updated to an uncompiled data, - // Baseline code or BytecodeArray which is still alive. + // or is the BytecodeArray which is still alive. ObjectSlot slot = flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset); RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot)); @@ -2351,26 +2308,7 @@ void MarkCompactCollector::ClearFlushedJsFunctions() { Object target) { RecordSlot(object, slot, HeapObject::cast(target)); }; - flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot); - } -} - -void MarkCompactCollector::ProcessFlushedBaselineCandidates() { - DCHECK(FLAG_flush_bytecode || - weak_objects_.baseline_flushing_candidates.IsEmpty()); - JSFunction flushed_js_function; - while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask, - &flushed_js_function)) { - auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot, - Object target) { - RecordSlot(object, slot, HeapObject::cast(target)); - }; - flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot); - - // Record the code slot that has been updated either to CompileLazy, - // InterpreterEntryTrampoline or baseline code. - ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset); - RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot)); + flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot); } } @@ -2687,7 +2625,6 @@ void MarkCompactCollector::AbortWeakObjects() { weak_objects_.js_weak_refs.Clear(); weak_objects_.weak_cells.Clear(); weak_objects_.bytecode_flushing_candidates.Clear(); - weak_objects_.baseline_flushing_candidates.Clear(); weak_objects_.flushed_js_functions.Clear(); } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index b4cebf9163..b077522213 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -376,7 +376,7 @@ class MainMarkingVisitor final MarkingWorklists::Local* local_marking_worklists, WeakObjects* weak_objects, Heap* heap, unsigned mark_compact_epoch, - CodeFlushMode bytecode_flush_mode, + BytecodeFlushMode bytecode_flush_mode, bool embedder_tracing_enabled, bool is_forced_gc) : MarkingVisitorBase, MarkingState>( kMainThreadTask, local_marking_worklists, weak_objects, heap, @@ -570,7 +570,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { unsigned epoch() const { return epoch_; } - CodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; } + BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; } explicit MarkCompactCollector(Heap* heap); ~MarkCompactCollector() override; @@ -668,14 +668,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { // Flushes a weakly held bytecode array from a shared function info. void FlushBytecodeFromSFI(SharedFunctionInfo shared_info); - // Marks the BaselineData as live and records the slots of baseline data - // fields. This assumes that the objects in the data fields are alive. - void MarkBaselineDataAsLive(BaselineData baseline_data); - - // Clears bytecode arrays / baseline code that have not been executed for - // multiple collections. - void ProcessOldCodeCandidates(); - void ProcessFlushedBaselineCandidates(); + // Clears bytecode arrays that have not been executed for multiple + // collections. + void ClearOldBytecodeCandidates(); // Resets any JSFunctions which have had their bytecode flushed. void ClearFlushedJsFunctions(); @@ -798,7 +793,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { // that can happen while a GC is happening and we need the // bytecode_flush_mode_ to remain the same through out a GC, we record this at // the start of each GC. - CodeFlushMode bytecode_flush_mode_; + BytecodeFlushMode bytecode_flush_mode_; friend class FullEvacuator; friend class RecordMigratedSlotVisitor; diff --git a/src/heap/marking-visitor-inl.h b/src/heap/marking-visitor-inl.h index 39b5a67eea..3468d732bf 100644 --- a/src/heap/marking-visitor-inl.h +++ b/src/heap/marking-visitor-inl.h @@ -132,19 +132,12 @@ int MarkingVisitorBase::VisitBytecodeArray( template int MarkingVisitorBase::VisitJSFunction( - Map map, JSFunction js_function) { - int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function); - if (js_function.ShouldFlushBaselineCode(bytecode_flush_mode_)) { - weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function); - } else { - VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset)); - // TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to - // also include cases where there is old bytecode even when there is no - // baseline code and remove this check here. - if (bytecode_flush_mode_ != CodeFlushMode::kDoNotFlushCode && - js_function.NeedsResetDueToFlushedBytecode()) { - weak_objects_->flushed_js_functions.Push(task_id_, js_function); - } + Map map, JSFunction object) { + int size = concrete_visitor()->VisitJSObjectSubclass(map, object); + // Check if the JSFunction needs reset due to bytecode being flushed. + if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode && + object.NeedsResetDueToFlushedBytecode()) { + weak_objects_->flushed_js_functions.Push(task_id_, object); } return size; } diff --git a/src/heap/marking-visitor.h b/src/heap/marking-visitor.h index 89da17b53d..f8795aadfd 100644 --- a/src/heap/marking-visitor.h +++ b/src/heap/marking-visitor.h @@ -105,7 +105,7 @@ class MarkingVisitorBase : public HeapVisitor { MarkingWorklists::Local* local_marking_worklists, WeakObjects* weak_objects, Heap* heap, unsigned mark_compact_epoch, - CodeFlushMode bytecode_flush_mode, + BytecodeFlushMode bytecode_flush_mode, bool is_embedder_tracing_enabled, bool is_forced_gc) : local_marking_worklists_(local_marking_worklists), weak_objects_(weak_objects), @@ -199,7 +199,7 @@ class MarkingVisitorBase : public HeapVisitor { Heap* const heap_; const int task_id_; const unsigned mark_compact_epoch_; - const CodeFlushMode bytecode_flush_mode_; + const BytecodeFlushMode bytecode_flush_mode_; const bool is_embedder_tracing_enabled_; const bool is_forced_gc_; const bool is_shared_heap_; diff --git a/src/heap/weak-object-worklists.cc b/src/heap/weak-object-worklists.cc index 39e6b3a3a9..84df473076 100644 --- a/src/heap/weak-object-worklists.cc +++ b/src/heap/weak-object-worklists.cc @@ -153,21 +153,6 @@ void WeakObjects::UpdateFlushedJSFunctions( }); } -void WeakObjects::UpdateBaselineFlushingCandidates( - WeakObjectWorklist& baseline_flush_candidates) { - baseline_flush_candidates.Update( - [](JSFunction slot_in, JSFunction* slot_out) -> bool { - JSFunction forwarded = ForwardingAddress(slot_in); - - if (!forwarded.is_null()) { - *slot_out = forwarded; - return true; - } - - return false; - }); -} - #ifdef DEBUG template bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist& worklist) { diff --git a/src/heap/weak-object-worklists.h b/src/heap/weak-object-worklists.h index 50ed99541f..67df372b57 100644 --- a/src/heap/weak-object-worklists.h +++ b/src/heap/weak-object-worklists.h @@ -59,7 +59,6 @@ class TransitionArray; F(WeakCell, weak_cells, WeakCells) \ F(SharedFunctionInfo, bytecode_flushing_candidates, \ BytecodeFlushingCandidates) \ - F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \ F(JSFunction, flushed_js_functions, FlushedJSFunctions) class WeakObjects { diff --git a/src/objects/js-function-inl.h b/src/objects/js-function-inl.h index 895371449b..bae63f6ef9 100644 --- a/src/objects/js-function-inl.h +++ b/src/objects/js-function-inl.h @@ -284,36 +284,14 @@ bool JSFunction::is_compiled() const { shared().is_compiled(); } -bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) { - if (mode == CodeFlushMode::kDoNotFlushCode) return false; - // Do a raw read for shared and code fields here since this function may be - // called on a concurrent thread. JSFunction itself should be fully - // initialized here but the SharedFunctionInfo, Code objects may not be - // initialized. We read using acquire loads to defend against that. - Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset); - if (!maybe_shared.IsSharedFunctionInfo()) return false; - - // See crbug.com/v8/11972 for more details on acquire / release semantics for - // code field. We don't use release stores when copying code pointers from - // SFI / FV to JSFunction but it is safe in practice. - Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset); - if (!maybe_code.IsCodeT()) return false; - Code code = FromCodeT(CodeT::cast(maybe_code)); - if (code.kind() != CodeKind::BASELINE) return false; - - SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared); - return shared.ShouldFlushBytecode(mode); -} - bool JSFunction::NeedsResetDueToFlushedBytecode() { // Do a raw read for shared and code fields here since this function may be - // called on a concurrent thread. JSFunction itself should be fully - // initialized here but the SharedFunctionInfo, Code objects may not be - // initialized. We read using acquire loads to defend against that. + // called on a concurrent thread and the JSFunction might not be fully + // initialized yet. Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset); if (!maybe_shared.IsSharedFunctionInfo()) return false; - Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset); + Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset); if (!maybe_code.IsCodeT()) return false; Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad); @@ -321,24 +299,15 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() { return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy; } -bool JSFunction::NeedsResetDueToFlushedBaselineCode() { - return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData(); -} - -void JSFunction::ResetIfCodeFlushed( +void JSFunction::ResetIfBytecodeFlushed( base::Optional> gc_notify_updated_slot) { - if (!FLAG_flush_bytecode) return; - - if (NeedsResetDueToFlushedBytecode()) { + if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) { // Bytecode was flushed and function is now uncompiled, reset JSFunction // by setting code to CompileLazy and clearing the feedback vector. set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy)); raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot); - } else if (NeedsResetDueToFlushedBaselineCode()) { - // Flush baseline code from the closure if required - set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline)); } } diff --git a/src/objects/js-function.cc b/src/objects/js-function.cc index b2d086814f..05ac6a6c93 100644 --- a/src/objects/js-function.cc +++ b/src/objects/js-function.cc @@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type, } void JSFunction::ClearTypeFeedbackInfo() { - ResetIfCodeFlushed(); + ResetIfBytecodeFlushed(); if (has_feedback_vector()) { FeedbackVector vector = feedback_vector(); Isolate* isolate = GetIsolate(); diff --git a/src/objects/js-function.h b/src/objects/js-function.h index 8114de95e5..5277e4e796 100644 --- a/src/objects/js-function.h +++ b/src/objects/js-function.h @@ -210,19 +210,11 @@ class JSFunction : public JSFunctionOrBoundFunction { // Resets function to clear compiled data after bytecode has been flushed. inline bool NeedsResetDueToFlushedBytecode(); - inline void ResetIfCodeFlushed( + inline void ResetIfBytecodeFlushed( base::Optional> gc_notify_updated_slot = base::nullopt); - // Returns if the closure's code field has to be updated because it has - // stale baseline code. - inline bool NeedsResetDueToFlushedBaselineCode(); - - // Returns if baseline code is a candidate for flushing. This method is called - // from concurrent marking so we should be careful when accessing data fields. - inline bool ShouldFlushBaselineCode(CodeFlushMode mode); - DECL_GETTER(has_prototype_slot, bool) // The initial map for an object created by this constructor. @@ -319,8 +311,6 @@ class JSFunction : public JSFunctionOrBoundFunction { static constexpr int kPrototypeOrInitialMapOffset = FieldOffsets::kPrototypeOrInitialMapOffset; - class BodyDescriptor; - private: DECL_ACCESSORS(raw_code, CodeT) DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT) diff --git a/src/objects/objects-body-descriptors-inl.h b/src/objects/objects-body-descriptors-inl.h index 0f06c58915..bb3c6ca3ce 100644 --- a/src/objects/objects-body-descriptors-inl.h +++ b/src/objects/objects-body-descriptors-inl.h @@ -296,39 +296,6 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase { } }; -class JSFunction::BodyDescriptor final : public BodyDescriptorBase { - public: - static const int kStartOffset = JSObject::BodyDescriptor::kStartOffset; - - static bool IsValidSlot(Map map, HeapObject obj, int offset) { - if (offset < kStartOffset) return false; - return IsValidJSObjectSlotImpl(map, obj, offset); - } - - template - static inline void IterateBody(Map map, HeapObject obj, int object_size, - ObjectVisitor* v) { - // Iterate JSFunction header fields first. - int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot()); - DCHECK_GE(object_size, header_size); - IteratePointers(obj, kStartOffset, kCodeOffset, v); - // Code field is treated as a custom weak pointer. This field is visited as - // a weak pointer if the Code is baseline code and the bytecode array - // corresponding to this function is old. In the rest of the cases this - // field is treated as strong pointer. - IterateCustomWeakPointer(obj, kCodeOffset, v); - // Iterate rest of the header fields - DCHECK_GE(header_size, kCodeOffset); - IteratePointers(obj, kCodeOffset + kTaggedSize, header_size, v); - // Iterate rest of the fields starting after the header. - IterateJSObjectBodyImpl(map, obj, header_size, object_size, v); - } - - static inline int SizeOf(Map map, HeapObject object) { - return map.instance_size(); - } -}; - class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase { public: static bool IsValidSlot(Map map, HeapObject obj, int offset) { diff --git a/src/objects/shared-function-info-inl.h b/src/objects/shared-function-info-inl.h index 8163dc4119..a01432278b 100644 --- a/src/objects/shared-function-info-inl.h +++ b/src/objects/shared-function-info-inl.h @@ -575,8 +575,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) { set_function_data(bytecode, kReleaseStore); } -bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) { - if (mode == CodeFlushMode::kDoNotFlushCode) return false; +bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) { + if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false; // TODO(rmcilroy): Enable bytecode flushing for resumable functions. if (IsResumableFunction(kind()) || !allows_lazy_compilation()) { @@ -587,13 +587,9 @@ bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) { // check if it is old. Note, this is done this way since this function can be // called by the concurrent marker. Object data = function_data(kAcquireLoad); - if (data.IsBaselineData()) { - data = - ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset); - } if (!data.IsBytecodeArray()) return false; - if (mode == CodeFlushMode::kStressFlushCode) return true; + if (mode == BytecodeFlushMode::kStressFlushBytecode) return true; BytecodeArray bytecode = BytecodeArray::cast(data); diff --git a/src/objects/shared-function-info.h b/src/objects/shared-function-info.h index f0a39fea45..1081481d7a 100644 --- a/src/objects/shared-function-info.h +++ b/src/objects/shared-function-info.h @@ -534,7 +534,7 @@ class SharedFunctionInfo // Returns true if the function has old bytecode that could be flushed. This // function shouldn't access any flags as it is used by concurrent marker. // Hence it takes the mode as an argument. - inline bool ShouldFlushBytecode(CodeFlushMode mode); + inline bool ShouldFlushBytecode(BytecodeFlushMode mode); enum Inlineability { kIsInlineable, diff --git a/src/objects/shared-function-info.tq b/src/objects/shared-function-info.tq index 160c3d26d4..2a08c51088 100644 --- a/src/objects/shared-function-info.tq +++ b/src/objects/shared-function-info.tq @@ -57,10 +57,6 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 { @customCppClass @customMap // Just to place the map at the beginning of the roots array. class SharedFunctionInfo extends HeapObject { - // function_data field is treated as a custom weak pointer. We visit this - // field as a weak pointer if there is aged bytecode. If there is no bytecode - // or if the bytecode is young then we treat it as a strong pointer. This is - // done to support flushing of bytecode. weak function_data: Object; name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo; outer_scope_info_or_feedback_metadata: HeapObject; diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 76b779a9eb..1d75cb8f6f 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -238,7 +238,7 @@ void ReplaceWrapper(Isolate* isolate, Handle instance, WasmInstanceObject::GetWasmExternalFunction(isolate, instance, function_index) .ToHandleChecked(); - exported_function->set_code(*wrapper_code, kReleaseStore); + exported_function->set_code(*wrapper_code); WasmExportedFunctionData function_data = exported_function->shared().wasm_exported_function_data(); function_data.set_wrapper_code(*wrapper_code); diff --git a/src/snapshot/context-serializer.cc b/src/snapshot/context-serializer.cc index 7a02a50caa..ec3605f82b 100644 --- a/src/snapshot/context-serializer.cc +++ b/src/snapshot/context-serializer.cc @@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle obj) { // Unconditionally reset the JSFunction to its SFI's code, since we can't // serialize optimized code anyway. Handle closure = Handle::cast(obj); - closure->ResetIfCodeFlushed(); + closure->ResetIfBytecodeFlushed(); if (closure->is_compiled()) { if (closure->shared().HasBaselineData()) { closure->shared().flush_baseline_data(); diff --git a/test/mjsunit/baseline/flush-baseline-code.js b/test/mjsunit/baseline/flush-baseline-code.js deleted file mode 100644 index e621cb5163..0000000000 --- a/test/mjsunit/baseline/flush-baseline-code.js +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2021 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Flags: --expose-gc --stress-flush-bytecode --allow-natives-syntax -// Flags: --baseline-batch-compilation-threshold=0 --sparkplug -// Flags: --no-always-sparkplug - -function HasBaselineCode(f) { - let opt_status = %GetOptimizationStatus(f); - return (opt_status & V8OptimizationStatus.kBaseline) !== 0; -} - -function HasByteCode(f) { - let opt_status = %GetOptimizationStatus(f); - return (opt_status & V8OptimizationStatus.kInterpreted) !== 0; -} - -var x = {b:20, c:30}; -function f() { - return x.b + 10; -} - -// Test bytecode gets flushed -f(); -assertTrue(HasByteCode(f)); -gc(); -assertFalse(HasByteCode(f)); - -// Test baseline code and bytecode gets flushed -for (i = 1; i < 50; i++) { - f(); -} -assertTrue(HasBaselineCode(f)); -gc(); -assertFalse(HasBaselineCode(f)); -assertFalse(HasByteCode(f)); - -// Check bytecode isn't flushed if it's held strongly from somewhere but -// baseline code is flushed. -function f1(should_recurse) { - if (should_recurse) { - assertTrue(HasByteCode(f1)); - for (i = 1; i < 50; i++) { - f1(false); - } - assertTrue(HasBaselineCode(f1)); - gc(); - assertFalse(HasBaselineCode(f1)); - assertTrue(HasByteCode(f1)); - } - return x.b + 10; -} - -f1(false); -// Recurse first time so we have bytecode array on the stack that keeps -// bytecode alive. -f1(true); - -// Flush bytecode -gc(); -assertFalse(HasBaselineCode(f1)); -assertFalse(HasByteCode(f1)); - -// Check baseline code and bytecode aren't flushed if baseline code is on -// stack. -function f2(should_recurse) { - if (should_recurse) { - assertTrue(HasBaselineCode(f2)); - f2(false); - gc(); - assertTrue(HasBaselineCode(f2)); - } - return x.b + 10; -} - -for (i = 1; i < 50; i++) { - f2(false); -} -assertTrue(HasBaselineCode(f2)); -// Recurse with baseline code on stack -f2(true);