Revert "[sparkplug] Support bytecode / baseline code flushing with sparkplug"
This reverts commit ea55438a53
.
Reason for revert: Likely culprit for these failures: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20NumFuzz/15494/overview
Original change's description:
> [sparkplug] Support bytecode / baseline code flushing with sparkplug
>
> Currently with sparkplug we don't flush bytecode / baseline code of
> functions that were tiered up to sparkplug. This CL adds the support to
> flush baseline code / bytecode of functions that have baseline code too.
> This CL:
> 1. Updates the BodyDescriptor of JSFunction to treat the Code field of
> JSFunction as a custom weak pointer where the code is treated as weak if
> the bytecode corresponding to this function is old.
> 2. Updates GC to handle the functions that had a weak code object during
> the atomic phase of GC.
> 3. Updates the check for old bytecode to also consider when there is
> baseline code on the function.
>
> This CL doesn't change any heuristics for flushing. The baseline code
> will be flushed at the same time as bytecode.
>
> Change-Id: I6b51e06ebadb917b9f4b0f43f2afebd7f64cd26a
> Bug: v8:11947
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2992715
> Commit-Queue: Mythri Alle <mythria@chromium.org>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Toon Verwaest <verwaest@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75674}
Bug: v8:11947
Change-Id: I50535b9a6c6fc39eceb4f6c0e0c84c55bb92f30a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3017811
Reviewed-by: Mythri Alle <mythria@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75679}
This commit is contained in:
parent
6018d479b6
commit
a079f05798
@ -1862,7 +1862,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
|
|||||||
|
|
||||||
// Reset the JSFunction if we are recompiling due to the bytecode having been
|
// Reset the JSFunction if we are recompiling due to the bytecode having been
|
||||||
// flushed.
|
// flushed.
|
||||||
function->ResetIfCodeFlushed();
|
function->ResetIfBytecodeFlushed();
|
||||||
|
|
||||||
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
|
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
|
||||||
|
|
||||||
|
@ -869,10 +869,10 @@ enum class CompactionSpaceKind {
|
|||||||
|
|
||||||
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
||||||
|
|
||||||
enum class CodeFlushMode {
|
enum class BytecodeFlushMode {
|
||||||
kDoNotFlushCode,
|
kDoNotFlushBytecode,
|
||||||
kFlushCode,
|
kFlushBytecode,
|
||||||
kStressFlushCode,
|
kStressFlushBytecode,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Indicates whether a script should be parsed and compiled in REPL mode.
|
// Indicates whether a script should be parsed and compiled in REPL mode.
|
||||||
|
@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
|
|||||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
|
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
|
||||||
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
||||||
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
|
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
|
||||||
function.ResetIfCodeFlushed();
|
function.ResetIfBytecodeFlushed();
|
||||||
if (code.is_null()) code = function.code();
|
if (code.is_null()) code = function.code();
|
||||||
|
|
||||||
if (CodeKindCanDeoptimize(code.kind())) {
|
if (CodeKindCanDeoptimize(code.kind())) {
|
||||||
|
@ -86,7 +86,7 @@ class ConcurrentMarkingVisitor final
|
|||||||
MarkingWorklists::Local* local_marking_worklists,
|
MarkingWorklists::Local* local_marking_worklists,
|
||||||
WeakObjects* weak_objects, Heap* heap,
|
WeakObjects* weak_objects, Heap* heap,
|
||||||
unsigned mark_compact_epoch,
|
unsigned mark_compact_epoch,
|
||||||
CodeFlushMode bytecode_flush_mode,
|
BytecodeFlushMode bytecode_flush_mode,
|
||||||
bool embedder_tracing_enabled, bool is_forced_gc,
|
bool embedder_tracing_enabled, bool is_forced_gc,
|
||||||
MemoryChunkDataMap* memory_chunk_data)
|
MemoryChunkDataMap* memory_chunk_data)
|
||||||
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
|
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
|
||||||
@ -359,7 +359,7 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
|
|||||||
class ConcurrentMarking::JobTask : public v8::JobTask {
|
class ConcurrentMarking::JobTask : public v8::JobTask {
|
||||||
public:
|
public:
|
||||||
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
|
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
|
||||||
CodeFlushMode bytecode_flush_mode, bool is_forced_gc)
|
BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc)
|
||||||
: concurrent_marking_(concurrent_marking),
|
: concurrent_marking_(concurrent_marking),
|
||||||
mark_compact_epoch_(mark_compact_epoch),
|
mark_compact_epoch_(mark_compact_epoch),
|
||||||
bytecode_flush_mode_(bytecode_flush_mode),
|
bytecode_flush_mode_(bytecode_flush_mode),
|
||||||
@ -391,7 +391,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
|
|||||||
private:
|
private:
|
||||||
ConcurrentMarking* concurrent_marking_;
|
ConcurrentMarking* concurrent_marking_;
|
||||||
const unsigned mark_compact_epoch_;
|
const unsigned mark_compact_epoch_;
|
||||||
CodeFlushMode bytecode_flush_mode_;
|
BytecodeFlushMode bytecode_flush_mode_;
|
||||||
const bool is_forced_gc_;
|
const bool is_forced_gc_;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -412,7 +412,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarking::Run(JobDelegate* delegate,
|
void ConcurrentMarking::Run(JobDelegate* delegate,
|
||||||
CodeFlushMode bytecode_flush_mode,
|
BytecodeFlushMode bytecode_flush_mode,
|
||||||
unsigned mark_compact_epoch, bool is_forced_gc) {
|
unsigned mark_compact_epoch, bool is_forced_gc) {
|
||||||
size_t kBytesUntilInterruptCheck = 64 * KB;
|
size_t kBytesUntilInterruptCheck = 64 * KB;
|
||||||
int kObjectsUntilInterrupCheck = 1000;
|
int kObjectsUntilInterrupCheck = 1000;
|
||||||
@ -528,7 +528,6 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
|
|||||||
weak_objects_->weak_cells.FlushToGlobal(task_id);
|
weak_objects_->weak_cells.FlushToGlobal(task_id);
|
||||||
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
|
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
|
||||||
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
|
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
|
||||||
weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
|
|
||||||
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
|
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
|
||||||
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
|
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
|
||||||
total_marked_bytes_ += marked_bytes;
|
total_marked_bytes_ += marked_bytes;
|
||||||
|
@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
|
|||||||
char cache_line_padding[64];
|
char cache_line_padding[64];
|
||||||
};
|
};
|
||||||
class JobTask;
|
class JobTask;
|
||||||
void Run(JobDelegate* delegate, CodeFlushMode bytecode_flush_mode,
|
void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode,
|
||||||
unsigned mark_compact_epoch, bool is_forced_gc);
|
unsigned mark_compact_epoch, bool is_forced_gc);
|
||||||
size_t GetMaxConcurrency(size_t worker_count);
|
size_t GetMaxConcurrency(size_t worker_count);
|
||||||
|
|
||||||
|
@ -84,16 +84,16 @@ Address AllocationResult::ToAddress() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
CodeFlushMode Heap::GetCodeFlushMode(Isolate* isolate) {
|
BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) {
|
||||||
if (isolate->disable_bytecode_flushing()) {
|
if (isolate->disable_bytecode_flushing()) {
|
||||||
return CodeFlushMode::kDoNotFlushCode;
|
return BytecodeFlushMode::kDoNotFlushBytecode;
|
||||||
}
|
}
|
||||||
if (FLAG_stress_flush_bytecode) {
|
if (FLAG_stress_flush_bytecode) {
|
||||||
return CodeFlushMode::kStressFlushCode;
|
return BytecodeFlushMode::kStressFlushBytecode;
|
||||||
} else if (FLAG_flush_bytecode) {
|
} else if (FLAG_flush_bytecode) {
|
||||||
return CodeFlushMode::kFlushCode;
|
return BytecodeFlushMode::kFlushBytecode;
|
||||||
}
|
}
|
||||||
return CodeFlushMode::kDoNotFlushCode;
|
return BytecodeFlushMode::kDoNotFlushBytecode;
|
||||||
}
|
}
|
||||||
|
|
||||||
Isolate* Heap::isolate() {
|
Isolate* Heap::isolate() {
|
||||||
|
@ -460,7 +460,7 @@ class Heap {
|
|||||||
|
|
||||||
// Helper function to get the bytecode flushing mode based on the flags. This
|
// Helper function to get the bytecode flushing mode based on the flags. This
|
||||||
// is required because it is not safe to acess flags in concurrent marker.
|
// is required because it is not safe to acess flags in concurrent marker.
|
||||||
static inline CodeFlushMode GetCodeFlushMode(Isolate* isolate);
|
static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate);
|
||||||
|
|
||||||
static uintptr_t ZapValue() {
|
static uintptr_t ZapValue() {
|
||||||
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
|
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
|
||||||
|
@ -519,7 +519,7 @@ void MarkCompactCollector::StartMarking() {
|
|||||||
contexts.push_back(context->ptr());
|
contexts.push_back(context->ptr());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bytecode_flush_mode_ = Heap::GetCodeFlushMode(isolate());
|
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate());
|
||||||
marking_worklists()->CreateContextWorklists(contexts);
|
marking_worklists()->CreateContextWorklists(contexts);
|
||||||
local_marking_worklists_ =
|
local_marking_worklists_ =
|
||||||
std::make_unique<MarkingWorklists::Local>(marking_worklists());
|
std::make_unique<MarkingWorklists::Local>(marking_worklists());
|
||||||
@ -2086,7 +2086,7 @@ void MarkCompactCollector::MarkLiveObjects() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We depend on IterateWeakRootsForPhantomHandles being called before
|
// We depend on IterateWeakRootsForPhantomHandles being called before
|
||||||
// ProcessOldCodeCandidates in order to identify flushed bytecode in the
|
// ClearOldBytecodeCandidates in order to identify flushed bytecode in the
|
||||||
// CPU profiler.
|
// CPU profiler.
|
||||||
{
|
{
|
||||||
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
|
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
|
||||||
@ -2122,11 +2122,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
|
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
|
||||||
// ProcessFlusheBaselineCandidates should be called after clearing bytecode
|
ClearOldBytecodeCandidates();
|
||||||
// so that we flush any bytecode if needed so we could correctly set the
|
|
||||||
// code object on the JSFunction.
|
|
||||||
ProcessOldCodeCandidates();
|
|
||||||
ProcessFlushedBaselineCandidates();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -2165,7 +2161,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
|
|||||||
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
|
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
|
||||||
DCHECK(weak_objects_.weak_cells.IsEmpty());
|
DCHECK(weak_objects_.weak_cells.IsEmpty());
|
||||||
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
|
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
|
||||||
DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
|
|
||||||
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
|
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2283,59 +2278,21 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
|
|||||||
DCHECK(!shared_info.is_compiled());
|
DCHECK(!shared_info.is_compiled());
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
|
void MarkCompactCollector::ClearOldBytecodeCandidates() {
|
||||||
if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
|
|
||||||
|
|
||||||
// Mark baseline data as live.
|
|
||||||
non_atomic_marking_state()->WhiteToBlack(baseline_data);
|
|
||||||
|
|
||||||
// Record object slots.
|
|
||||||
DCHECK(
|
|
||||||
non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
|
|
||||||
ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
|
|
||||||
RecordSlot(baseline_data, code, HeapObject::cast(*code));
|
|
||||||
|
|
||||||
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
|
|
||||||
ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
|
|
||||||
RecordSlot(baseline_data, data, HeapObject::cast(*data));
|
|
||||||
}
|
|
||||||
|
|
||||||
void MarkCompactCollector::ProcessOldCodeCandidates() {
|
|
||||||
DCHECK(FLAG_flush_bytecode ||
|
DCHECK(FLAG_flush_bytecode ||
|
||||||
weak_objects_.bytecode_flushing_candidates.IsEmpty());
|
weak_objects_.bytecode_flushing_candidates.IsEmpty());
|
||||||
SharedFunctionInfo flushing_candidate;
|
SharedFunctionInfo flushing_candidate;
|
||||||
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
|
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
|
||||||
&flushing_candidate)) {
|
&flushing_candidate)) {
|
||||||
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
|
// If the BytecodeArray is dead, flush it, which will replace the field with
|
||||||
flushing_candidate.GetBytecodeArray(isolate()));
|
// an uncompiled data object.
|
||||||
if (flushing_candidate.HasBaselineData()) {
|
if (!non_atomic_marking_state()->IsBlackOrGrey(
|
||||||
BaselineData baseline_data = flushing_candidate.baseline_data();
|
flushing_candidate.GetBytecodeArray(isolate()))) {
|
||||||
if (non_atomic_marking_state()->IsBlackOrGrey(
|
|
||||||
baseline_data.baseline_code())) {
|
|
||||||
// Currently baseline code holds bytecode array strongly and it is
|
|
||||||
// always ensured that bytecode is live if baseline code is live. Hence
|
|
||||||
// baseline code can safely load bytecode array without any additional
|
|
||||||
// checks. In future if this changes we need to update these checks to
|
|
||||||
// flush code if the bytecode is not live and also update baseline code
|
|
||||||
// to bailout if there is no bytecode.
|
|
||||||
DCHECK(is_bytecode_live);
|
|
||||||
MarkBaselineDataAsLive(baseline_data);
|
|
||||||
} else if (is_bytecode_live) {
|
|
||||||
// If baseline code is flushed but we have a valid bytecode array reset
|
|
||||||
// the function_data field to BytecodeArray.
|
|
||||||
flushing_candidate.set_function_data(baseline_data.data(),
|
|
||||||
kReleaseStore);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_bytecode_live) {
|
|
||||||
// If the BytecodeArray is dead, flush it, which will replace the field
|
|
||||||
// with an uncompiled data object.
|
|
||||||
FlushBytecodeFromSFI(flushing_candidate);
|
FlushBytecodeFromSFI(flushing_candidate);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now record the slot, which has either been updated to an uncompiled data,
|
// Now record the slot, which has either been updated to an uncompiled data,
|
||||||
// Baseline code or BytecodeArray which is still alive.
|
// or is the BytecodeArray which is still alive.
|
||||||
ObjectSlot slot =
|
ObjectSlot slot =
|
||||||
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
|
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
|
||||||
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
|
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
|
||||||
@ -2351,26 +2308,7 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
|
|||||||
Object target) {
|
Object target) {
|
||||||
RecordSlot(object, slot, HeapObject::cast(target));
|
RecordSlot(object, slot, HeapObject::cast(target));
|
||||||
};
|
};
|
||||||
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
|
flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
|
|
||||||
DCHECK(FLAG_flush_bytecode ||
|
|
||||||
weak_objects_.baseline_flushing_candidates.IsEmpty());
|
|
||||||
JSFunction flushed_js_function;
|
|
||||||
while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
|
|
||||||
&flushed_js_function)) {
|
|
||||||
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
|
|
||||||
Object target) {
|
|
||||||
RecordSlot(object, slot, HeapObject::cast(target));
|
|
||||||
};
|
|
||||||
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
|
|
||||||
|
|
||||||
// Record the code slot that has been updated either to CompileLazy,
|
|
||||||
// InterpreterEntryTrampoline or baseline code.
|
|
||||||
ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
|
|
||||||
RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2687,7 +2625,6 @@ void MarkCompactCollector::AbortWeakObjects() {
|
|||||||
weak_objects_.js_weak_refs.Clear();
|
weak_objects_.js_weak_refs.Clear();
|
||||||
weak_objects_.weak_cells.Clear();
|
weak_objects_.weak_cells.Clear();
|
||||||
weak_objects_.bytecode_flushing_candidates.Clear();
|
weak_objects_.bytecode_flushing_candidates.Clear();
|
||||||
weak_objects_.baseline_flushing_candidates.Clear();
|
|
||||||
weak_objects_.flushed_js_functions.Clear();
|
weak_objects_.flushed_js_functions.Clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,7 +376,7 @@ class MainMarkingVisitor final
|
|||||||
MarkingWorklists::Local* local_marking_worklists,
|
MarkingWorklists::Local* local_marking_worklists,
|
||||||
WeakObjects* weak_objects, Heap* heap,
|
WeakObjects* weak_objects, Heap* heap,
|
||||||
unsigned mark_compact_epoch,
|
unsigned mark_compact_epoch,
|
||||||
CodeFlushMode bytecode_flush_mode,
|
BytecodeFlushMode bytecode_flush_mode,
|
||||||
bool embedder_tracing_enabled, bool is_forced_gc)
|
bool embedder_tracing_enabled, bool is_forced_gc)
|
||||||
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
|
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
|
||||||
kMainThreadTask, local_marking_worklists, weak_objects, heap,
|
kMainThreadTask, local_marking_worklists, weak_objects, heap,
|
||||||
@ -570,7 +570,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
|||||||
|
|
||||||
unsigned epoch() const { return epoch_; }
|
unsigned epoch() const { return epoch_; }
|
||||||
|
|
||||||
CodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
|
BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
|
||||||
|
|
||||||
explicit MarkCompactCollector(Heap* heap);
|
explicit MarkCompactCollector(Heap* heap);
|
||||||
~MarkCompactCollector() override;
|
~MarkCompactCollector() override;
|
||||||
@ -668,14 +668,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
|||||||
// Flushes a weakly held bytecode array from a shared function info.
|
// Flushes a weakly held bytecode array from a shared function info.
|
||||||
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
|
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
|
||||||
|
|
||||||
// Marks the BaselineData as live and records the slots of baseline data
|
// Clears bytecode arrays that have not been executed for multiple
|
||||||
// fields. This assumes that the objects in the data fields are alive.
|
// collections.
|
||||||
void MarkBaselineDataAsLive(BaselineData baseline_data);
|
void ClearOldBytecodeCandidates();
|
||||||
|
|
||||||
// Clears bytecode arrays / baseline code that have not been executed for
|
|
||||||
// multiple collections.
|
|
||||||
void ProcessOldCodeCandidates();
|
|
||||||
void ProcessFlushedBaselineCandidates();
|
|
||||||
|
|
||||||
// Resets any JSFunctions which have had their bytecode flushed.
|
// Resets any JSFunctions which have had their bytecode flushed.
|
||||||
void ClearFlushedJsFunctions();
|
void ClearFlushedJsFunctions();
|
||||||
@ -798,7 +793,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
|||||||
// that can happen while a GC is happening and we need the
|
// that can happen while a GC is happening and we need the
|
||||||
// bytecode_flush_mode_ to remain the same through out a GC, we record this at
|
// bytecode_flush_mode_ to remain the same through out a GC, we record this at
|
||||||
// the start of each GC.
|
// the start of each GC.
|
||||||
CodeFlushMode bytecode_flush_mode_;
|
BytecodeFlushMode bytecode_flush_mode_;
|
||||||
|
|
||||||
friend class FullEvacuator;
|
friend class FullEvacuator;
|
||||||
friend class RecordMigratedSlotVisitor;
|
friend class RecordMigratedSlotVisitor;
|
||||||
|
@ -132,19 +132,12 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
|
|||||||
|
|
||||||
template <typename ConcreteVisitor, typename MarkingState>
|
template <typename ConcreteVisitor, typename MarkingState>
|
||||||
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
|
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
|
||||||
Map map, JSFunction js_function) {
|
Map map, JSFunction object) {
|
||||||
int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
|
int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
|
||||||
if (js_function.ShouldFlushBaselineCode(bytecode_flush_mode_)) {
|
// Check if the JSFunction needs reset due to bytecode being flushed.
|
||||||
weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
|
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
|
||||||
} else {
|
object.NeedsResetDueToFlushedBytecode()) {
|
||||||
VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
|
weak_objects_->flushed_js_functions.Push(task_id_, object);
|
||||||
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
|
|
||||||
// also include cases where there is old bytecode even when there is no
|
|
||||||
// baseline code and remove this check here.
|
|
||||||
if (bytecode_flush_mode_ != CodeFlushMode::kDoNotFlushCode &&
|
|
||||||
js_function.NeedsResetDueToFlushedBytecode()) {
|
|
||||||
weak_objects_->flushed_js_functions.Push(task_id_, js_function);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
|||||||
MarkingWorklists::Local* local_marking_worklists,
|
MarkingWorklists::Local* local_marking_worklists,
|
||||||
WeakObjects* weak_objects, Heap* heap,
|
WeakObjects* weak_objects, Heap* heap,
|
||||||
unsigned mark_compact_epoch,
|
unsigned mark_compact_epoch,
|
||||||
CodeFlushMode bytecode_flush_mode,
|
BytecodeFlushMode bytecode_flush_mode,
|
||||||
bool is_embedder_tracing_enabled, bool is_forced_gc)
|
bool is_embedder_tracing_enabled, bool is_forced_gc)
|
||||||
: local_marking_worklists_(local_marking_worklists),
|
: local_marking_worklists_(local_marking_worklists),
|
||||||
weak_objects_(weak_objects),
|
weak_objects_(weak_objects),
|
||||||
@ -199,7 +199,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
|
|||||||
Heap* const heap_;
|
Heap* const heap_;
|
||||||
const int task_id_;
|
const int task_id_;
|
||||||
const unsigned mark_compact_epoch_;
|
const unsigned mark_compact_epoch_;
|
||||||
const CodeFlushMode bytecode_flush_mode_;
|
const BytecodeFlushMode bytecode_flush_mode_;
|
||||||
const bool is_embedder_tracing_enabled_;
|
const bool is_embedder_tracing_enabled_;
|
||||||
const bool is_forced_gc_;
|
const bool is_forced_gc_;
|
||||||
const bool is_shared_heap_;
|
const bool is_shared_heap_;
|
||||||
|
@ -153,21 +153,6 @@ void WeakObjects::UpdateFlushedJSFunctions(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void WeakObjects::UpdateBaselineFlushingCandidates(
|
|
||||||
WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
|
|
||||||
baseline_flush_candidates.Update(
|
|
||||||
[](JSFunction slot_in, JSFunction* slot_out) -> bool {
|
|
||||||
JSFunction forwarded = ForwardingAddress(slot_in);
|
|
||||||
|
|
||||||
if (!forwarded.is_null()) {
|
|
||||||
*slot_out = forwarded;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
template <typename Type>
|
template <typename Type>
|
||||||
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
|
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
|
||||||
|
@ -59,7 +59,6 @@ class TransitionArray;
|
|||||||
F(WeakCell, weak_cells, WeakCells) \
|
F(WeakCell, weak_cells, WeakCells) \
|
||||||
F(SharedFunctionInfo, bytecode_flushing_candidates, \
|
F(SharedFunctionInfo, bytecode_flushing_candidates, \
|
||||||
BytecodeFlushingCandidates) \
|
BytecodeFlushingCandidates) \
|
||||||
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
|
|
||||||
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
|
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
|
||||||
|
|
||||||
class WeakObjects {
|
class WeakObjects {
|
||||||
|
@ -284,36 +284,14 @@ bool JSFunction::is_compiled() const {
|
|||||||
shared().is_compiled();
|
shared().is_compiled();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) {
|
|
||||||
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
|
|
||||||
// Do a raw read for shared and code fields here since this function may be
|
|
||||||
// called on a concurrent thread. JSFunction itself should be fully
|
|
||||||
// initialized here but the SharedFunctionInfo, Code objects may not be
|
|
||||||
// initialized. We read using acquire loads to defend against that.
|
|
||||||
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
|
|
||||||
if (!maybe_shared.IsSharedFunctionInfo()) return false;
|
|
||||||
|
|
||||||
// See crbug.com/v8/11972 for more details on acquire / release semantics for
|
|
||||||
// code field. We don't use release stores when copying code pointers from
|
|
||||||
// SFI / FV to JSFunction but it is safe in practice.
|
|
||||||
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
|
|
||||||
if (!maybe_code.IsCodeT()) return false;
|
|
||||||
Code code = FromCodeT(CodeT::cast(maybe_code));
|
|
||||||
if (code.kind() != CodeKind::BASELINE) return false;
|
|
||||||
|
|
||||||
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
|
|
||||||
return shared.ShouldFlushBytecode(mode);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool JSFunction::NeedsResetDueToFlushedBytecode() {
|
bool JSFunction::NeedsResetDueToFlushedBytecode() {
|
||||||
// Do a raw read for shared and code fields here since this function may be
|
// Do a raw read for shared and code fields here since this function may be
|
||||||
// called on a concurrent thread. JSFunction itself should be fully
|
// called on a concurrent thread and the JSFunction might not be fully
|
||||||
// initialized here but the SharedFunctionInfo, Code objects may not be
|
// initialized yet.
|
||||||
// initialized. We read using acquire loads to defend against that.
|
|
||||||
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
|
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
|
||||||
if (!maybe_shared.IsSharedFunctionInfo()) return false;
|
if (!maybe_shared.IsSharedFunctionInfo()) return false;
|
||||||
|
|
||||||
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
|
Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
|
||||||
if (!maybe_code.IsCodeT()) return false;
|
if (!maybe_code.IsCodeT()) return false;
|
||||||
Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
|
Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
|
||||||
|
|
||||||
@ -321,24 +299,15 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
|
|||||||
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
|
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
|
void JSFunction::ResetIfBytecodeFlushed(
|
||||||
return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
|
|
||||||
}
|
|
||||||
|
|
||||||
void JSFunction::ResetIfCodeFlushed(
|
|
||||||
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
|
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
|
||||||
HeapObject target)>>
|
HeapObject target)>>
|
||||||
gc_notify_updated_slot) {
|
gc_notify_updated_slot) {
|
||||||
if (!FLAG_flush_bytecode) return;
|
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) {
|
||||||
|
|
||||||
if (NeedsResetDueToFlushedBytecode()) {
|
|
||||||
// Bytecode was flushed and function is now uncompiled, reset JSFunction
|
// Bytecode was flushed and function is now uncompiled, reset JSFunction
|
||||||
// by setting code to CompileLazy and clearing the feedback vector.
|
// by setting code to CompileLazy and clearing the feedback vector.
|
||||||
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
|
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
|
||||||
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
|
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
|
||||||
} else if (NeedsResetDueToFlushedBaselineCode()) {
|
|
||||||
// Flush baseline code from the closure if required
|
|
||||||
set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void JSFunction::ClearTypeFeedbackInfo() {
|
void JSFunction::ClearTypeFeedbackInfo() {
|
||||||
ResetIfCodeFlushed();
|
ResetIfBytecodeFlushed();
|
||||||
if (has_feedback_vector()) {
|
if (has_feedback_vector()) {
|
||||||
FeedbackVector vector = feedback_vector();
|
FeedbackVector vector = feedback_vector();
|
||||||
Isolate* isolate = GetIsolate();
|
Isolate* isolate = GetIsolate();
|
||||||
|
@ -210,19 +210,11 @@ class JSFunction : public JSFunctionOrBoundFunction {
|
|||||||
|
|
||||||
// Resets function to clear compiled data after bytecode has been flushed.
|
// Resets function to clear compiled data after bytecode has been flushed.
|
||||||
inline bool NeedsResetDueToFlushedBytecode();
|
inline bool NeedsResetDueToFlushedBytecode();
|
||||||
inline void ResetIfCodeFlushed(
|
inline void ResetIfBytecodeFlushed(
|
||||||
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
|
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
|
||||||
HeapObject target)>>
|
HeapObject target)>>
|
||||||
gc_notify_updated_slot = base::nullopt);
|
gc_notify_updated_slot = base::nullopt);
|
||||||
|
|
||||||
// Returns if the closure's code field has to be updated because it has
|
|
||||||
// stale baseline code.
|
|
||||||
inline bool NeedsResetDueToFlushedBaselineCode();
|
|
||||||
|
|
||||||
// Returns if baseline code is a candidate for flushing. This method is called
|
|
||||||
// from concurrent marking so we should be careful when accessing data fields.
|
|
||||||
inline bool ShouldFlushBaselineCode(CodeFlushMode mode);
|
|
||||||
|
|
||||||
DECL_GETTER(has_prototype_slot, bool)
|
DECL_GETTER(has_prototype_slot, bool)
|
||||||
|
|
||||||
// The initial map for an object created by this constructor.
|
// The initial map for an object created by this constructor.
|
||||||
@ -319,8 +311,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
|
|||||||
static constexpr int kPrototypeOrInitialMapOffset =
|
static constexpr int kPrototypeOrInitialMapOffset =
|
||||||
FieldOffsets::kPrototypeOrInitialMapOffset;
|
FieldOffsets::kPrototypeOrInitialMapOffset;
|
||||||
|
|
||||||
class BodyDescriptor;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DECL_ACCESSORS(raw_code, CodeT)
|
DECL_ACCESSORS(raw_code, CodeT)
|
||||||
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
|
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
|
||||||
|
@ -296,39 +296,6 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
|
|
||||||
public:
|
|
||||||
static const int kStartOffset = JSObject::BodyDescriptor::kStartOffset;
|
|
||||||
|
|
||||||
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
|
|
||||||
if (offset < kStartOffset) return false;
|
|
||||||
return IsValidJSObjectSlotImpl(map, obj, offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename ObjectVisitor>
|
|
||||||
static inline void IterateBody(Map map, HeapObject obj, int object_size,
|
|
||||||
ObjectVisitor* v) {
|
|
||||||
// Iterate JSFunction header fields first.
|
|
||||||
int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot());
|
|
||||||
DCHECK_GE(object_size, header_size);
|
|
||||||
IteratePointers(obj, kStartOffset, kCodeOffset, v);
|
|
||||||
// Code field is treated as a custom weak pointer. This field is visited as
|
|
||||||
// a weak pointer if the Code is baseline code and the bytecode array
|
|
||||||
// corresponding to this function is old. In the rest of the cases this
|
|
||||||
// field is treated as strong pointer.
|
|
||||||
IterateCustomWeakPointer(obj, kCodeOffset, v);
|
|
||||||
// Iterate rest of the header fields
|
|
||||||
DCHECK_GE(header_size, kCodeOffset);
|
|
||||||
IteratePointers(obj, kCodeOffset + kTaggedSize, header_size, v);
|
|
||||||
// Iterate rest of the fields starting after the header.
|
|
||||||
IterateJSObjectBodyImpl(map, obj, header_size, object_size, v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int SizeOf(Map map, HeapObject object) {
|
|
||||||
return map.instance_size();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
|
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
|
||||||
public:
|
public:
|
||||||
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
|
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
|
||||||
|
@ -575,8 +575,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
|
|||||||
set_function_data(bytecode, kReleaseStore);
|
set_function_data(bytecode, kReleaseStore);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
|
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
|
||||||
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
|
if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false;
|
||||||
|
|
||||||
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
|
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
|
||||||
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
|
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
|
||||||
@ -587,13 +587,9 @@ bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
|
|||||||
// check if it is old. Note, this is done this way since this function can be
|
// check if it is old. Note, this is done this way since this function can be
|
||||||
// called by the concurrent marker.
|
// called by the concurrent marker.
|
||||||
Object data = function_data(kAcquireLoad);
|
Object data = function_data(kAcquireLoad);
|
||||||
if (data.IsBaselineData()) {
|
|
||||||
data =
|
|
||||||
ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
|
|
||||||
}
|
|
||||||
if (!data.IsBytecodeArray()) return false;
|
if (!data.IsBytecodeArray()) return false;
|
||||||
|
|
||||||
if (mode == CodeFlushMode::kStressFlushCode) return true;
|
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
|
||||||
|
|
||||||
BytecodeArray bytecode = BytecodeArray::cast(data);
|
BytecodeArray bytecode = BytecodeArray::cast(data);
|
||||||
|
|
||||||
|
@ -534,7 +534,7 @@ class SharedFunctionInfo
|
|||||||
// Returns true if the function has old bytecode that could be flushed. This
|
// Returns true if the function has old bytecode that could be flushed. This
|
||||||
// function shouldn't access any flags as it is used by concurrent marker.
|
// function shouldn't access any flags as it is used by concurrent marker.
|
||||||
// Hence it takes the mode as an argument.
|
// Hence it takes the mode as an argument.
|
||||||
inline bool ShouldFlushBytecode(CodeFlushMode mode);
|
inline bool ShouldFlushBytecode(BytecodeFlushMode mode);
|
||||||
|
|
||||||
enum Inlineability {
|
enum Inlineability {
|
||||||
kIsInlineable,
|
kIsInlineable,
|
||||||
|
@ -57,10 +57,6 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
|
|||||||
@customCppClass
|
@customCppClass
|
||||||
@customMap // Just to place the map at the beginning of the roots array.
|
@customMap // Just to place the map at the beginning of the roots array.
|
||||||
class SharedFunctionInfo extends HeapObject {
|
class SharedFunctionInfo extends HeapObject {
|
||||||
// function_data field is treated as a custom weak pointer. We visit this
|
|
||||||
// field as a weak pointer if there is aged bytecode. If there is no bytecode
|
|
||||||
// or if the bytecode is young then we treat it as a strong pointer. This is
|
|
||||||
// done to support flushing of bytecode.
|
|
||||||
weak function_data: Object;
|
weak function_data: Object;
|
||||||
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
|
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
|
||||||
outer_scope_info_or_feedback_metadata: HeapObject;
|
outer_scope_info_or_feedback_metadata: HeapObject;
|
||||||
|
@ -238,7 +238,7 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
|
|||||||
WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
|
WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
|
||||||
function_index)
|
function_index)
|
||||||
.ToHandleChecked();
|
.ToHandleChecked();
|
||||||
exported_function->set_code(*wrapper_code, kReleaseStore);
|
exported_function->set_code(*wrapper_code);
|
||||||
WasmExportedFunctionData function_data =
|
WasmExportedFunctionData function_data =
|
||||||
exported_function->shared().wasm_exported_function_data();
|
exported_function->shared().wasm_exported_function_data();
|
||||||
function_data.set_wrapper_code(*wrapper_code);
|
function_data.set_wrapper_code(*wrapper_code);
|
||||||
|
@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
|
|||||||
// Unconditionally reset the JSFunction to its SFI's code, since we can't
|
// Unconditionally reset the JSFunction to its SFI's code, since we can't
|
||||||
// serialize optimized code anyway.
|
// serialize optimized code anyway.
|
||||||
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
|
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
|
||||||
closure->ResetIfCodeFlushed();
|
closure->ResetIfBytecodeFlushed();
|
||||||
if (closure->is_compiled()) {
|
if (closure->is_compiled()) {
|
||||||
if (closure->shared().HasBaselineData()) {
|
if (closure->shared().HasBaselineData()) {
|
||||||
closure->shared().flush_baseline_data();
|
closure->shared().flush_baseline_data();
|
||||||
|
@ -1,82 +0,0 @@
|
|||||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
// Flags: --expose-gc --stress-flush-bytecode --allow-natives-syntax
|
|
||||||
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
|
|
||||||
// Flags: --no-always-sparkplug
|
|
||||||
|
|
||||||
function HasBaselineCode(f) {
|
|
||||||
let opt_status = %GetOptimizationStatus(f);
|
|
||||||
return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
function HasByteCode(f) {
|
|
||||||
let opt_status = %GetOptimizationStatus(f);
|
|
||||||
return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
var x = {b:20, c:30};
|
|
||||||
function f() {
|
|
||||||
return x.b + 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test bytecode gets flushed
|
|
||||||
f();
|
|
||||||
assertTrue(HasByteCode(f));
|
|
||||||
gc();
|
|
||||||
assertFalse(HasByteCode(f));
|
|
||||||
|
|
||||||
// Test baseline code and bytecode gets flushed
|
|
||||||
for (i = 1; i < 50; i++) {
|
|
||||||
f();
|
|
||||||
}
|
|
||||||
assertTrue(HasBaselineCode(f));
|
|
||||||
gc();
|
|
||||||
assertFalse(HasBaselineCode(f));
|
|
||||||
assertFalse(HasByteCode(f));
|
|
||||||
|
|
||||||
// Check bytecode isn't flushed if it's held strongly from somewhere but
|
|
||||||
// baseline code is flushed.
|
|
||||||
function f1(should_recurse) {
|
|
||||||
if (should_recurse) {
|
|
||||||
assertTrue(HasByteCode(f1));
|
|
||||||
for (i = 1; i < 50; i++) {
|
|
||||||
f1(false);
|
|
||||||
}
|
|
||||||
assertTrue(HasBaselineCode(f1));
|
|
||||||
gc();
|
|
||||||
assertFalse(HasBaselineCode(f1));
|
|
||||||
assertTrue(HasByteCode(f1));
|
|
||||||
}
|
|
||||||
return x.b + 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
f1(false);
|
|
||||||
// Recurse first time so we have bytecode array on the stack that keeps
|
|
||||||
// bytecode alive.
|
|
||||||
f1(true);
|
|
||||||
|
|
||||||
// Flush bytecode
|
|
||||||
gc();
|
|
||||||
assertFalse(HasBaselineCode(f1));
|
|
||||||
assertFalse(HasByteCode(f1));
|
|
||||||
|
|
||||||
// Check baseline code and bytecode aren't flushed if baseline code is on
|
|
||||||
// stack.
|
|
||||||
function f2(should_recurse) {
|
|
||||||
if (should_recurse) {
|
|
||||||
assertTrue(HasBaselineCode(f2));
|
|
||||||
f2(false);
|
|
||||||
gc();
|
|
||||||
assertTrue(HasBaselineCode(f2));
|
|
||||||
}
|
|
||||||
return x.b + 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 1; i < 50; i++) {
|
|
||||||
f2(false);
|
|
||||||
}
|
|
||||||
assertTrue(HasBaselineCode(f2));
|
|
||||||
// Recurse with baseline code on stack
|
|
||||||
f2(true);
|
|
Loading…
Reference in New Issue
Block a user