Revert "[sparkplug] Support bytecode / baseline code flushing with sparkplug"

This reverts commit ea55438a53.

Reason for revert: Likely culprit for these failures: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20NumFuzz/15494/overview

Original change's description:
> [sparkplug] Support bytecode / baseline code flushing with sparkplug
>
> Currently with sparkplug we don't flush bytecode / baseline code of
> functions that were tiered up to sparkplug. This CL adds the support to
> flush baseline code / bytecode of functions that have baseline code too.
> This CL:
> 1. Updates the BodyDescriptor of JSFunction to treat the Code field of
> JSFunction as a custom weak pointer where the code is treated as weak if
> the bytecode corresponding to this function is old.
> 2. Updates GC to handle the functions that had a weak code object during
> the atomic phase of GC.
> 3. Updates the check for old bytecode to also consider when there is
> baseline code on the function.
>
> This CL doesn't change any heuristics for flushing. The baseline code
> will be flushed at the same time as bytecode.
>
> Change-Id: I6b51e06ebadb917b9f4b0f43f2afebd7f64cd26a
> Bug: v8:11947
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2992715
> Commit-Queue: Mythri Alle <mythria@chromium.org>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Toon Verwaest <verwaest@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75674}

Bug: v8:11947
Change-Id: I50535b9a6c6fc39eceb4f6c0e0c84c55bb92f30a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3017811
Reviewed-by: Mythri Alle <mythria@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75679}
This commit is contained in:
Mythri Alle 2021-07-12 11:57:00 +00:00 committed by V8 LUCI CQ
parent 6018d479b6
commit a079f05798
23 changed files with 54 additions and 310 deletions

View File

@ -1862,7 +1862,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// Reset the JSFunction if we are recompiling due to the bytecode having been
// flushed.
function->ResetIfCodeFlushed();
function->ResetIfBytecodeFlushed();
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);

View File

@ -869,10 +869,10 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum class CodeFlushMode {
kDoNotFlushCode,
kFlushCode,
kStressFlushCode,
enum class BytecodeFlushMode {
kDoNotFlushBytecode,
kFlushBytecode,
kStressFlushBytecode,
};
// Indicates whether a script should be parsed and compiled in REPL mode.

View File

@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
function.ResetIfCodeFlushed();
function.ResetIfBytecodeFlushed();
if (code.is_null()) code = function.code();
if (CodeKindCanDeoptimize(code.kind())) {

View File

@ -86,7 +86,7 @@ class ConcurrentMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
@ -359,7 +359,7 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode, bool is_forced_gc)
BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
@ -391,7 +391,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
private:
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
CodeFlushMode bytecode_flush_mode_;
BytecodeFlushMode bytecode_flush_mode_;
const bool is_forced_gc_;
};
@ -412,7 +412,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
}
void ConcurrentMarking::Run(JobDelegate* delegate,
CodeFlushMode bytecode_flush_mode,
BytecodeFlushMode bytecode_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
@ -528,7 +528,6 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;

View File

@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
char cache_line_padding[64];
};
class JobTask;
void Run(JobDelegate* delegate, CodeFlushMode bytecode_flush_mode,
void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);

View File

@ -84,16 +84,16 @@ Address AllocationResult::ToAddress() {
}
// static
CodeFlushMode Heap::GetCodeFlushMode(Isolate* isolate) {
BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) {
return CodeFlushMode::kDoNotFlushCode;
return BytecodeFlushMode::kDoNotFlushBytecode;
}
if (FLAG_stress_flush_bytecode) {
return CodeFlushMode::kStressFlushCode;
return BytecodeFlushMode::kStressFlushBytecode;
} else if (FLAG_flush_bytecode) {
return CodeFlushMode::kFlushCode;
return BytecodeFlushMode::kFlushBytecode;
}
return CodeFlushMode::kDoNotFlushCode;
return BytecodeFlushMode::kDoNotFlushBytecode;
}
Isolate* Heap::isolate() {

View File

@ -460,7 +460,7 @@ class Heap {
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
static inline CodeFlushMode GetCodeFlushMode(Isolate* isolate);
static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;

View File

@ -519,7 +519,7 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr());
}
}
bytecode_flush_mode_ = Heap::GetCodeFlushMode(isolate());
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
@ -2086,7 +2086,7 @@ void MarkCompactCollector::MarkLiveObjects() {
}
// We depend on IterateWeakRootsForPhantomHandles being called before
// ProcessOldCodeCandidates in order to identify flushed bytecode in the
// ClearOldBytecodeCandidates in order to identify flushed bytecode in the
// CPU profiler.
{
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
@ -2122,11 +2122,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
// ProcessFlusheBaselineCandidates should be called after clearing bytecode
// so that we flush any bytecode if needed so we could correctly set the
// code object on the JSFunction.
ProcessOldCodeCandidates();
ProcessFlushedBaselineCandidates();
ClearOldBytecodeCandidates();
}
{
@ -2165,7 +2161,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
@ -2283,59 +2278,21 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
DCHECK(!shared_info.is_compiled());
}
void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
// Mark baseline data as live.
non_atomic_marking_state()->WhiteToBlack(baseline_data);
// Record object slots.
DCHECK(
non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
RecordSlot(baseline_data, code, HeapObject::cast(*code));
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
RecordSlot(baseline_data, data, HeapObject::cast(*data));
}
void MarkCompactCollector::ProcessOldCodeCandidates() {
void MarkCompactCollector::ClearOldBytecodeCandidates() {
DCHECK(FLAG_flush_bytecode ||
weak_objects_.bytecode_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
if (flushing_candidate.HasBaselineData()) {
BaselineData baseline_data = flushing_candidate.baseline_data();
if (non_atomic_marking_state()->IsBlackOrGrey(
baseline_data.baseline_code())) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
// checks. In future if this changes we need to update these checks to
// flush code if the bytecode is not live and also update baseline code
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
MarkBaselineDataAsLive(baseline_data);
} else if (is_bytecode_live) {
// If baseline code is flushed but we have a valid bytecode array reset
// the function_data field to BytecodeArray.
flushing_candidate.set_function_data(baseline_data.data(),
kReleaseStore);
}
}
if (!is_bytecode_live) {
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
// If the BytecodeArray is dead, flush it, which will replace the field with
// an uncompiled data object.
if (!non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()))) {
FlushBytecodeFromSFI(flushing_candidate);
}
// Now record the slot, which has either been updated to an uncompiled data,
// Baseline code or BytecodeArray which is still alive.
// or is the BytecodeArray which is still alive.
ObjectSlot slot =
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
@ -2351,26 +2308,7 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
}
}
void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
DCHECK(FLAG_flush_bytecode ||
weak_objects_.baseline_flushing_candidates.IsEmpty());
JSFunction flushed_js_function;
while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
&flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
// Record the code slot that has been updated either to CompileLazy,
// InterpreterEntryTrampoline or baseline code.
ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot);
}
}
@ -2687,7 +2625,6 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.js_weak_refs.Clear();
weak_objects_.weak_cells.Clear();
weak_objects_.bytecode_flushing_candidates.Clear();
weak_objects_.baseline_flushing_candidates.Clear();
weak_objects_.flushed_js_functions.Clear();
}

View File

@ -376,7 +376,7 @@ class MainMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, local_marking_worklists, weak_objects, heap,
@ -570,7 +570,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
CodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
@ -668,14 +668,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
// Marks the BaselineData as live and records the slots of baseline data
// fields. This assumes that the objects in the data fields are alive.
void MarkBaselineDataAsLive(BaselineData baseline_data);
// Clears bytecode arrays / baseline code that have not been executed for
// multiple collections.
void ProcessOldCodeCandidates();
void ProcessFlushedBaselineCandidates();
// Clears bytecode arrays that have not been executed for multiple
// collections.
void ClearOldBytecodeCandidates();
// Resets any JSFunctions which have had their bytecode flushed.
void ClearFlushedJsFunctions();
@ -798,7 +793,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// that can happen while a GC is happening and we need the
// bytecode_flush_mode_ to remain the same through out a GC, we record this at
// the start of each GC.
CodeFlushMode bytecode_flush_mode_;
BytecodeFlushMode bytecode_flush_mode_;
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;

View File

@ -132,19 +132,12 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
Map map, JSFunction js_function) {
int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
if (js_function.ShouldFlushBaselineCode(bytecode_flush_mode_)) {
weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
} else {
VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
// also include cases where there is old bytecode even when there is no
// baseline code and remove this check here.
if (bytecode_flush_mode_ != CodeFlushMode::kDoNotFlushCode &&
js_function.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, js_function);
}
Map map, JSFunction object) {
int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
// Check if the JSFunction needs reset due to bytecode being flushed.
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
object.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, object);
}
return size;
}

View File

@ -105,7 +105,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
CodeFlushMode bytecode_flush_mode,
BytecodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
@ -199,7 +199,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
Heap* const heap_;
const int task_id_;
const unsigned mark_compact_epoch_;
const CodeFlushMode bytecode_flush_mode_;
const BytecodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
const bool is_shared_heap_;

View File

@ -153,21 +153,6 @@ void WeakObjects::UpdateFlushedJSFunctions(
});
}
void WeakObjects::UpdateBaselineFlushingCandidates(
WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
baseline_flush_candidates.Update(
[](JSFunction slot_in, JSFunction* slot_out) -> bool {
JSFunction forwarded = ForwardingAddress(slot_in);
if (!forwarded.is_null()) {
*slot_out = forwarded;
return true;
}
return false;
});
}
#ifdef DEBUG
template <typename Type>
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {

View File

@ -59,7 +59,6 @@ class TransitionArray;
F(WeakCell, weak_cells, WeakCells) \
F(SharedFunctionInfo, bytecode_flushing_candidates, \
BytecodeFlushingCandidates) \
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
class WeakObjects {

View File

@ -284,36 +284,14 @@ bool JSFunction::is_compiled() const {
shared().is_compiled();
}
bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) {
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread. JSFunction itself should be fully
// initialized here but the SharedFunctionInfo, Code objects may not be
// initialized. We read using acquire loads to defend against that.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false;
// See crbug.com/v8/11972 for more details on acquire / release semantics for
// code field. We don't use release stores when copying code pointers from
// SFI / FV to JSFunction but it is safe in practice.
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false;
Code code = FromCodeT(CodeT::cast(maybe_code));
if (code.kind() != CodeKind::BASELINE) return false;
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return shared.ShouldFlushBytecode(mode);
}
bool JSFunction::NeedsResetDueToFlushedBytecode() {
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread. JSFunction itself should be fully
// initialized here but the SharedFunctionInfo, Code objects may not be
// initialized. We read using acquire loads to defend against that.
// called on a concurrent thread and the JSFunction might not be fully
// initialized yet.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false;
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false;
Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
@ -321,24 +299,15 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
}
bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
}
void JSFunction::ResetIfCodeFlushed(
void JSFunction::ResetIfBytecodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot) {
if (!FLAG_flush_bytecode) return;
if (NeedsResetDueToFlushedBytecode()) {
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) {
// Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector.
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
} else if (NeedsResetDueToFlushedBaselineCode()) {
// Flush baseline code from the closure if required
set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));
}
}

View File

@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
}
void JSFunction::ClearTypeFeedbackInfo() {
ResetIfCodeFlushed();
ResetIfBytecodeFlushed();
if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();

View File

@ -210,19 +210,11 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Resets function to clear compiled data after bytecode has been flushed.
inline bool NeedsResetDueToFlushedBytecode();
inline void ResetIfCodeFlushed(
inline void ResetIfBytecodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot = base::nullopt);
// Returns if the closure's code field has to be updated because it has
// stale baseline code.
inline bool NeedsResetDueToFlushedBaselineCode();
// Returns if baseline code is a candidate for flushing. This method is called
// from concurrent marking so we should be careful when accessing data fields.
inline bool ShouldFlushBaselineCode(CodeFlushMode mode);
DECL_GETTER(has_prototype_slot, bool)
// The initial map for an object created by this constructor.
@ -319,8 +311,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
static constexpr int kPrototypeOrInitialMapOffset =
FieldOffsets::kPrototypeOrInitialMapOffset;
class BodyDescriptor;
private:
DECL_ACCESSORS(raw_code, CodeT)
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)

View File

@ -296,39 +296,6 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
}
};
class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSObject::BodyDescriptor::kStartOffset;
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kStartOffset) return false;
return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
// Iterate JSFunction header fields first.
int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot());
DCHECK_GE(object_size, header_size);
IteratePointers(obj, kStartOffset, kCodeOffset, v);
// Code field is treated as a custom weak pointer. This field is visited as
// a weak pointer if the Code is baseline code and the bytecode array
// corresponding to this function is old. In the rest of the cases this
// field is treated as strong pointer.
IterateCustomWeakPointer(obj, kCodeOffset, v);
// Iterate rest of the header fields
DCHECK_GE(header_size, kCodeOffset);
IteratePointers(obj, kCodeOffset + kTaggedSize, header_size, v);
// Iterate rest of the fields starting after the header.
IterateJSObjectBodyImpl(map, obj, header_size, object_size, v);
}
static inline int SizeOf(Map map, HeapObject object) {
return map.instance_size();
}
};
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {

View File

@ -575,8 +575,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode, kReleaseStore);
}
bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false;
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
@ -587,13 +587,9 @@ bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
if (data.IsBaselineData()) {
data =
ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
}
if (!data.IsBytecodeArray()) return false;
if (mode == CodeFlushMode::kStressFlushCode) return true;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);

View File

@ -534,7 +534,7 @@ class SharedFunctionInfo
// Returns true if the function has old bytecode that could be flushed. This
// function shouldn't access any flags as it is used by concurrent marker.
// Hence it takes the mode as an argument.
inline bool ShouldFlushBytecode(CodeFlushMode mode);
inline bool ShouldFlushBytecode(BytecodeFlushMode mode);
enum Inlineability {
kIsInlineable,

View File

@ -57,10 +57,6 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
@customCppClass
@customMap // Just to place the map at the beginning of the roots array.
class SharedFunctionInfo extends HeapObject {
// function_data field is treated as a custom weak pointer. We visit this
// field as a weak pointer if there is aged bytecode. If there is no bytecode
// or if the bytecode is young then we treat it as a strong pointer. This is
// done to support flushing of bytecode.
weak function_data: Object;
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;

View File

@ -238,7 +238,7 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
function_index)
.ToHandleChecked();
exported_function->set_code(*wrapper_code, kReleaseStore);
exported_function->set_code(*wrapper_code);
WasmExportedFunctionData function_data =
exported_function->shared().wasm_exported_function_data();
function_data.set_wrapper_code(*wrapper_code);

View File

@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway.
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfCodeFlushed();
closure->ResetIfBytecodeFlushed();
if (closure->is_compiled()) {
if (closure->shared().HasBaselineData()) {
closure->shared().flush_baseline_data();

View File

@ -1,82 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-gc --stress-flush-bytecode --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);
return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
}
function HasByteCode(f) {
let opt_status = %GetOptimizationStatus(f);
return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
}
var x = {b:20, c:30};
function f() {
return x.b + 10;
}
// Test bytecode gets flushed
f();
assertTrue(HasByteCode(f));
gc();
assertFalse(HasByteCode(f));
// Test baseline code and bytecode gets flushed
for (i = 1; i < 50; i++) {
f();
}
assertTrue(HasBaselineCode(f));
gc();
assertFalse(HasBaselineCode(f));
assertFalse(HasByteCode(f));
// Check bytecode isn't flushed if it's held strongly from somewhere but
// baseline code is flushed.
function f1(should_recurse) {
if (should_recurse) {
assertTrue(HasByteCode(f1));
for (i = 1; i < 50; i++) {
f1(false);
}
assertTrue(HasBaselineCode(f1));
gc();
assertFalse(HasBaselineCode(f1));
assertTrue(HasByteCode(f1));
}
return x.b + 10;
}
f1(false);
// Recurse first time so we have bytecode array on the stack that keeps
// bytecode alive.
f1(true);
// Flush bytecode
gc();
assertFalse(HasBaselineCode(f1));
assertFalse(HasByteCode(f1));
// Check baseline code and bytecode aren't flushed if baseline code is on
// stack.
function f2(should_recurse) {
if (should_recurse) {
assertTrue(HasBaselineCode(f2));
f2(false);
gc();
assertTrue(HasBaselineCode(f2));
}
return x.b + 10;
}
for (i = 1; i < 50; i++) {
f2(false);
}
assertTrue(HasBaselineCode(f2));
// Recurse with baseline code on stack
f2(true);