[SFI] Add support for flushing old Bytecode from SharedFunctionInfos.

This change makes the SFI to bytecode link pseudo-weak. The marking visitors
check whether the bytecode is old, and if so, don't mark it and instead
push the SFI onto a bytecode_flushing_candidates worklist. Once marking
is complete, this list is walked, and for any of the candidates who's bytecode
has not been marked (i.e., is only referenced by the shared function info),
the bytecode is flushed and the SFI has the function data replaced with
an UncompiledData (which overwrites the flushed bytecode array).

Since we don't track JSFunctions, these can still think the underlying
function is compiled, and so calling them will invoke
InterpreterEntryTrampoline. As such, logic is added to
InterpreterEntryTrampoline to detect flushed functions, and enter
CompileLazy instead.

BUG=v8:8395

Change-Id: I4afba79f814ca9a92dec45d59485935845a6669d
Reviewed-on: https://chromium-review.googlesource.com/c/1348433
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58158}
This commit is contained in:
Ross McIlroy 2018-12-11 13:33:07 +00:00 committed by Commit Bot
parent c1bf25bb68
commit a55803a15d
19 changed files with 578 additions and 134 deletions

View File

@ -1026,6 +1026,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = r1;
Register feedback_vector = r2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
BYTECODE_ARRAY_TYPE);
__ b(ne, &compile_lazy);
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@ -1055,24 +1069,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister);
__ Assert(
ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(
eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
__ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
__ strb(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
@ -1164,6 +1160,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ bkpt(0); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,

View File

@ -438,6 +438,17 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Register sfi_data,
Register scratch1) {
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
__ Ldr(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ Bind(&done);
}
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@ -529,13 +540,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
Label check_has_bytecode_array;
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x3, x0, x0, INTERPRETER_DATA_TYPE);
__ B(ne, &check_has_bytecode_array);
__ Ldr(x3, FieldMemOperand(x3, InterpreterData::kBytecodeArrayOffset));
__ Bind(&check_has_bytecode_array);
GetSharedFunctionInfoBytecode(masm, x3, x0);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@ -1134,6 +1141,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = x1;
Register feedback_vector = x2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, x11);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
BYTECODE_ARRAY_TYPE);
__ B(ne, &compile_lazy);
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@ -1165,31 +1186,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
Label has_bytecode_array;
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x11, x11,
INTERPRETER_DATA_TYPE);
__ B(ne, &has_bytecode_array);
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
InterpreterData::kBytecodeArrayOffset));
__ Bind(&has_bytecode_array);
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(
kInterpreterBytecodeArrayRegister,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
BYTECODE_ARRAY_TYPE);
__ Assert(
eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
__ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
__ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
@ -1289,6 +1285,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ Unreachable(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,

View File

@ -946,6 +946,15 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = edi;
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, ecx, eax);
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
__ j(not_equal, &compile_lazy);
Register feedback_vector = ecx;
Label push_stack_frame;
// Load feedback vector and check if it is valid. If valid, check for
@ -981,9 +990,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ Push(eax);
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
__ Pop(eax);
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@ -1087,6 +1094,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in eax.
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
}

View File

@ -1049,6 +1049,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = rdi;
Register feedback_vector = rbx;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
kScratchRegister);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, rax);
__ j(not_equal, &compile_lazy);
// Load the feedback vector from the closure.
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
@ -1077,24 +1091,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS function.
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
kScratchRegister);
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rax);
__ Assert(
equal,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
__ movb(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kBytecodeAgeOffset),
@ -1192,6 +1188,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,

View File

@ -369,7 +369,8 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
}
// Install coverage info on the shared function info.
if (compilation_info->has_coverage_info()) {
if (compilation_info->has_coverage_info() &&
!shared_info->HasCoverageInfo()) {
DCHECK(isolate->is_block_code_coverage());
isolate->debug()->InstallCoverageInfo(shared_info,
compilation_info->coverage_info());
@ -1159,7 +1160,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Parse and update ParseInfo with the results.
if (!parsing::ParseFunction(&parse_info, shared_info, isolate)) {
if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
return FailWithPendingException(isolate, &parse_info, flag);
}

View File

@ -765,6 +765,9 @@ DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(flush_bytecode, false,
"flush of bytecode when it has not been executed recently")
DEFINE_BOOL(stress_flush_bytecode, false, "stress bytecode flushing")
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")

View File

@ -381,6 +381,7 @@
F(HEAP_PROLOGUE) \
TOP_MC_SCOPES(F) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_FLUSHABLE_BYTECODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \

View File

@ -316,6 +316,25 @@ class ConcurrentMarkingVisitor final
// Side-effectful visitation.
// ===========================================================================
int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
VisitMapPointer(shared_info, shared_info->map_slot());
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info->ShouldFlushBytecode()) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
VisitPointer(shared_info, shared_info->RawField(
SharedFunctionInfo::kFunctionDataOffset));
}
return size;
}
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
@ -700,6 +719,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->weak_references.FlushToGlobal(task_id);
weak_objects_->js_weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;

View File

@ -2645,12 +2645,9 @@ Factory::NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
UncompiledDataWithoutPreParsedScope::cast(
New(uncompiled_data_without_pre_parsed_scope_map(), TENURED)),
isolate());
result->set_inferred_name(*inferred_name);
result->set_start_position(start_position);
result->set_end_position(end_position);
result->set_function_literal_id(function_literal_id);
result->clear_padding();
UncompiledData::Initialize(*result, *inferred_name, start_position,
end_position, function_literal_id);
return result;
}
@ -2663,13 +2660,11 @@ Factory::NewUncompiledDataWithPreParsedScope(
UncompiledDataWithPreParsedScope::cast(
New(uncompiled_data_with_pre_parsed_scope_map(), TENURED)),
isolate());
result->set_inferred_name(*inferred_name);
result->set_start_position(start_position);
result->set_end_position(end_position);
result->set_function_literal_id(function_literal_id);
result->set_pre_parsed_scope_data(*pre_parsed_scope_data);
result->clear_padding();
UncompiledDataWithPreParsedScope::Initialize(
*result, *inferred_name, start_position, end_position,
function_literal_id, *pre_parsed_scope_data);
return result;
}

View File

@ -738,6 +738,12 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->current_ephemerons.Update(ephemeron_updater);
weak_objects_->next_ephemerons.Update(ephemeron_updater);
weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
#ifdef DEBUG
weak_objects_->bytecode_flushing_candidates.Iterate(
[](SharedFunctionInfo candidate) {
DCHECK(!Heap::InNewSpace(candidate));
});
#endif
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(

View File

@ -59,6 +59,25 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size, this);
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info->ShouldFlushBytecode()) {
collector_->AddBytecodeFlushingCandidate(shared_info);
} else {
VisitPointer(shared_info,
HeapObject::RawField(shared_info,
SharedFunctionInfo::kFunctionDataOffset));
}
return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
@ -433,6 +452,11 @@ void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
void MarkCompactCollector::AddBytecodeFlushingCandidate(
SharedFunctionInfo flush_candidate) {
weak_objects_.bytecode_flushing_candidates.Push(kMainThread, flush_candidate);
}
template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
Address start)

View File

@ -1900,6 +1900,11 @@ void MarkCompactCollector::ClearNonLiveReferences() {
heap()->external_string_table_.CleanUpAll();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
ClearOldBytecodeCandidates();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
@ -1927,6 +1932,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
DCHECK(weak_objects_.js_weak_cells.IsEmpty());
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
@ -1976,6 +1982,96 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
}
}
void MarkCompactCollector::FlushBytecodeFromSFI(
SharedFunctionInfo shared_info) {
DCHECK(shared_info->HasBytecodeArray());
// Retain objects required for uncompiled data.
String inferred_name = shared_info->inferred_name();
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
int function_literal_id = shared_info->FunctionLiteralId(isolate());
shared_info->DiscardCompiledMetadata(
isolate(),
[](HeapObjectPtr object, ObjectSlot slot, HeapObjectPtr target) {
RecordSlot(object, slot, target);
});
// The size of the bytecode array should always be larger than an
// UncompiledData object.
STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
UncompiledDataWithoutPreParsedScope::kSize);
// Replace bytecode array with an uncompiled data array.
HeapObject* compiled_data = shared_info->GetBytecodeArray();
Address compiled_data_start = compiled_data->address();
int compiled_data_size = compiled_data->Size();
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::PREFREE_EMPTY_BUCKETS);
// Swap the map, using set_map_after_allocation to avoid verify heap checks
// which are not necessary since we are doing this during the GC atomic pause.
compiled_data->set_map_after_allocation(
ReadOnlyRoots(heap()).uncompiled_data_without_pre_parsed_scope_map(),
SKIP_WRITE_BARRIER);
// Create a filler object for any left over space in the bytecode array.
if (!heap()->IsLargeObject(compiled_data)) {
heap()->CreateFillerObjectAt(
compiled_data->address() + UncompiledDataWithoutPreParsedScope::kSize,
compiled_data_size - UncompiledDataWithoutPreParsedScope::kSize,
ClearRecordedSlots::kNo);
}
// Initialize the uncompiled data.
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
UncompiledData::Initialize(
uncompiled_data, inferred_name, start_position, end_position,
function_literal_id,
[](HeapObjectPtr object, ObjectSlot slot, HeapObjectPtr target) {
RecordSlot(object, slot, target);
});
// Mark the uncompiled data as black, and ensure all fields have already been
// marked.
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
shared_info->set_function_data(uncompiled_data);
DCHECK(!shared_info->is_compiled());
}
void MarkCompactCollector::ClearOldBytecodeCandidates() {
DCHECK(FLAG_flush_bytecode ||
weak_objects_.bytecode_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThread,
&flushing_candidate)) {
// If the BytecodeArray is dead, flush it, which will replace the field with
// an uncompiled data object.
if (!non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate->GetBytecodeArray())) {
FlushBytecodeFromSFI(flushing_candidate);
}
// Now record the slot, which has either been updated to an uncompiled data,
// or is the BytecodeArray which is still alive.
ObjectSlot slot = HeapObject::RawField(
flushing_candidate, SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
}
}
void MarkCompactCollector::ClearFullMapTransitions() {
TransitionArray array;
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
@ -2217,6 +2313,7 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
weak_objects_.js_weak_cells.Clear();
weak_objects_.bytecode_flushing_candidates.Clear();
}
bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {

View File

@ -445,6 +445,8 @@ struct WeakObjects {
Worklist<std::pair<HeapObject*, Code>, 64> weak_objects_in_code;
Worklist<JSWeakCell, 64> js_weak_cells;
Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
};
struct EphemeronMarking {
@ -685,6 +687,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.js_weak_cells.Push(kMainThread, weak_cell);
}
inline void AddBytecodeFlushingCandidate(SharedFunctionInfo flush_candidate);
void AddNewlyDiscovered(HeapObject* object) {
if (ephemeron_marking_.newly_discovered_overflowed) return;
@ -805,6 +809,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the descriptor array of the parent if needed.
void ClearPotentialSimpleMapTransition(Map dead_target);
void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
// Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
// Clears bytecode arrays that have not been executed for multiple
// collections.
void ClearOldBytecodeCandidates();
// Compact every array in the global list of transition arrays and
// trim the corresponding descriptor array if a transition target is non-live.
void ClearFullMapTransitions();
@ -936,6 +948,7 @@ class MarkingVisitor final
V8_INLINE int VisitJSDataView(Map map, JSDataView object);
V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
V8_INLINE int VisitMap(Map map, Map object);
V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
V8_INLINE int VisitTransitionArray(Map map, TransitionArray object);
V8_INLINE int VisitJSWeakCell(Map map, JSWeakCell object);

View File

@ -188,6 +188,28 @@ class JSWeakCell::BodyDescriptor final : public BodyDescriptorBase {
}
};
class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject* obj, int offset) {
return FixedBodyDescriptor<kStartOfPointerFieldsOffset,
kEndOfTaggedFieldsOffset,
kAlignedSize>::IsValidSlot(map, obj, offset);
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IterateCustomWeakPointer(obj, kFunctionDataOffset, v);
IteratePointers(obj,
SharedFunctionInfo::kStartOfAlwaysStrongPointerFieldsOffset,
SharedFunctionInfo::kEndOfTaggedFieldsOffset, v);
}
static inline int SizeOf(Map map, HeapObject* object) {
return map->instance_size();
}
};
class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(AllocationSite::kCommonPointerFieldEndOffset ==

View File

@ -14100,6 +14100,66 @@ bool SharedFunctionInfo::HasSourceCode() const {
!reinterpret_cast<Script*>(script())->source()->IsUndefined(isolate);
}
void SharedFunctionInfo::DiscardCompiledMetadata(
Isolate* isolate, std::function<void(HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target)>
gc_notify_updated_slot) {
DisallowHeapAllocation no_gc;
if (is_compiled()) {
HeapObjectPtr outer_scope_info;
if (scope_info()->HasOuterScopeInfo()) {
outer_scope_info = scope_info()->OuterScopeInfo();
} else {
// TODO(3770): Drop explicit cast when migrating Oddball*.
outer_scope_info =
HeapObjectPtr::cast(ReadOnlyRoots(isolate).the_hole_value());
}
// Raw setter to avoid validity checks, since we're performing the unusual
// task of decompiling.
set_raw_outer_scope_info_or_feedback_metadata(outer_scope_info);
gc_notify_updated_slot(
*this,
RawField(SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset),
outer_scope_info);
} else {
DCHECK(outer_scope_info()->IsScopeInfo() ||
outer_scope_info()->IsTheHole());
}
// TODO(rmcilroy): Possibly discard ScopeInfo here as well.
}
// static
void SharedFunctionInfo::DiscardCompiled(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
DCHECK(shared_info->CanDiscardCompiled());
Handle<String> inferred_name_val =
handle(shared_info->inferred_name(), isolate);
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
int function_literal_id = shared_info->FunctionLiteralId(isolate);
shared_info->DiscardCompiledMetadata(isolate);
// Replace compiled data with a new UncompiledData object.
if (shared_info->HasUncompiledDataWithPreParsedScope()) {
// If this is uncompiled data with a pre-parsed scope data, we can just
// clear out the scope data and keep the uncompiled data.
shared_info->ClearPreParsedScopeData();
} else {
// Create a new UncompiledData, without pre-parsed scope, and update the
// function data to point to it. Use the raw function data setter to avoid
// validity checks, since we're performing the unusual task of decompiling.
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
inferred_name_val, start_position, end_position,
function_literal_id);
shared_info->set_function_data(*data);
}
}
// static
Handle<Object> SharedFunctionInfo::GetSourceCode(
Handle<SharedFunctionInfo> shared) {

View File

@ -709,7 +709,7 @@ class BytecodeArray : public FixedArrayBase {
kIsOldBytecodeAge = kSexagenarianBytecodeAge
};
static int SizeFor(int length) {
static constexpr int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}

View File

@ -90,7 +90,6 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
kNameOrScopeInfoOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
@ -147,6 +146,16 @@ AbstractCode SharedFunctionInfo::abstract_code() {
}
}
Object* SharedFunctionInfo::function_data() const {
return RELAXED_READ_FIELD(this, kFunctionDataOffset);
}
void SharedFunctionInfo::set_function_data(Object* data,
WriteBarrierMode mode) {
RELAXED_WRITE_FIELD(this, kFunctionDataOffset, data);
CONDITIONAL_WRITE_BARRIER(this, kFunctionDataOffset, data, mode);
}
int SharedFunctionInfo::function_token_position() const {
int offset = raw_function_token_offset();
if (offset == kFunctionTokenOutOfRange) {
@ -453,6 +462,29 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode);
}
bool SharedFunctionInfo::ShouldFlushBytecode() {
if (!FLAG_flush_bytecode) return false;
// TODO(rmcilroy): Enable bytecode flushing for resumable functions amd class
// member initializers.
if (IsResumableFunction(kind()) ||
IsClassMembersInitializerFunction(kind()) || !allows_lazy_compilation()) {
return false;
}
// Get a snapshot of the function data field, and if it is a bytecode array,
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object* data = function_data();
if (!data->IsBytecodeArray()) return false;
if (FLAG_stress_flush_bytecode) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);
return bytecode->IsOld();
}
Code SharedFunctionInfo::InterpreterTrampoline() const {
DCHECK(HasInterpreterData());
return interpreter_data()->interpreter_trampoline();
@ -572,6 +604,39 @@ void SharedFunctionInfo::ClearPreParsedScopeData() {
DCHECK(HasUncompiledDataWithoutPreParsedScope());
}
// static
void UncompiledData::Initialize(
UncompiledData data, String inferred_name, int start_position,
int end_position, int function_literal_id,
std::function<void(HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target)>
gc_notify_updated_slot) {
data->set_inferred_name(inferred_name);
gc_notify_updated_slot(
data, data->RawField(UncompiledData::kInferredNameOffset), inferred_name);
data->set_start_position(start_position);
data->set_end_position(end_position);
data->set_function_literal_id(function_literal_id);
data->clear_padding();
}
void UncompiledDataWithPreParsedScope::Initialize(
UncompiledDataWithPreParsedScope data, String inferred_name,
int start_position, int end_position, int function_literal_id,
PreParsedScopeData scope_data,
std::function<void(HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target)>
gc_notify_updated_slot) {
UncompiledData::Initialize(data, inferred_name, start_position, end_position,
function_literal_id, gc_notify_updated_slot);
data->set_pre_parsed_scope_data(scope_data);
gc_notify_updated_slot(
data,
data->RawField(
UncompiledDataWithPreParsedScope::kPreParsedScopeDataOffset),
scope_data);
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data()->IsWasmExportedFunctionData();
}
@ -659,51 +724,6 @@ bool SharedFunctionInfo::CanDiscardCompiled() const {
return can_decompile;
}
// static
void SharedFunctionInfo::DiscardCompiled(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
DCHECK(shared_info->CanDiscardCompiled());
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
int function_literal_id = shared_info->FunctionLiteralId(isolate);
if (shared_info->is_compiled()) {
DisallowHeapAllocation no_gc;
HeapObjectPtr outer_scope_info;
if (shared_info->scope_info()->HasOuterScopeInfo()) {
outer_scope_info = shared_info->scope_info()->OuterScopeInfo();
} else {
// TODO(3770): Drop explicit cast when migrating Oddball*.
outer_scope_info =
HeapObjectPtr::cast(ReadOnlyRoots(isolate).the_hole_value());
}
// Raw setter to avoid validity checks, since we're performing the unusual
// task of decompiling.
shared_info->set_raw_outer_scope_info_or_feedback_metadata(
outer_scope_info);
} else {
DCHECK(shared_info->outer_scope_info()->IsScopeInfo() ||
shared_info->outer_scope_info()->IsTheHole());
}
if (shared_info->HasUncompiledDataWithPreParsedScope()) {
// If this is uncompiled data with a pre-parsed scope data, we can just
// clear out the scope data and keep the uncompiled data.
shared_info->ClearPreParsedScopeData();
} else {
// Create a new UncompiledData, without pre-parsed scope, and update the
// function data to point to it. Use the raw function data setter to avoid
// validity checks, since we're performing the unusual task of decompiling.
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
handle(shared_info->inferred_name(), isolate), start_position,
end_position, function_literal_id);
shared_info->set_function_data(*data);
}
}
} // namespace internal
} // namespace v8

View File

@ -76,7 +76,16 @@ class UncompiledData : public HeapObjectPtr {
DECL_CAST2(UncompiledData)
inline static void Initialize(
UncompiledData data, String inferred_name, int start_position,
int end_position, int function_literal_id,
std::function<void(HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target)>
gc_notify_updated_slot = [](HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target) {});
// Layout description.
#define UNCOMPILED_DATA_FIELDS(V) \
V(kStartOfPointerFieldsOffset, 0) \
V(kInferredNameOffset, kTaggedSize) \
@ -129,7 +138,17 @@ class UncompiledDataWithPreParsedScope : public UncompiledData {
DECL_PRINTER(UncompiledDataWithPreParsedScope)
DECL_VERIFIER(UncompiledDataWithPreParsedScope)
inline static void Initialize(
UncompiledDataWithPreParsedScope data, String inferred_name,
int start_position, int end_position, int function_literal_id,
PreParsedScopeData scope_data,
std::function<void(HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target)>
gc_notify_updated_slot = [](HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target) {});
// Layout description.
#define UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS(V) \
V(kStartOfPointerFieldsOffset, 0) \
V(kPreParsedScopeDataOffset, kTaggedSize) \
@ -503,9 +522,21 @@ class SharedFunctionInfo : public HeapObjectPtr {
inline bool CanDiscardCompiled() const;
// Flush compiled data from this function, setting it back to CompileLazy and
// clearing any feedback metadata.
static inline void DiscardCompiled(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info);
// clearing any compiled metadata.
static void DiscardCompiled(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info);
// Discard the compiled metadata. If called during GC then
// |gc_notify_updated_slot| should be used to record any slot updates.
void DiscardCompiledMetadata(
Isolate* isolate,
std::function<void(HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target)>
gc_notify_updated_slot = [](HeapObjectPtr object, ObjectSlot slot,
HeapObjectPtr target) {});
// Returns true if the function has old bytecode that could be flushed.
inline bool ShouldFlushBytecode();
// Check whether or not this function is inlineable.
bool IsInlineable();
@ -600,6 +631,7 @@ class SharedFunctionInfo : public HeapObjectPtr {
/* Pointer fields. */ \
V(kStartOfPointerFieldsOffset, 0) \
V(kFunctionDataOffset, kTaggedSize) \
V(kStartOfAlwaysStrongPointerFieldsOffset, 0) \
V(kNameOrScopeInfoOffset, kTaggedSize) \
V(kOuterScopeInfoOrFeedbackMetadataOffset, kTaggedSize) \
V(kScriptOrDebugInfoOffset, kTaggedSize) \
@ -621,9 +653,7 @@ class SharedFunctionInfo : public HeapObjectPtr {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
kEndOfTaggedFieldsOffset, kAlignedSize>
BodyDescriptor;
class BodyDescriptor;
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \

View File

@ -1276,6 +1276,147 @@ TEST(Iteration) {
CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count));
}
TEST(TestBytecodeFlushing) {
#ifndef V8_LITE_MODE
FLAG_opt = false;
FLAG_always_opt = false;
i::FLAG_optimize_for_size = false;
#endif // V8_LITE_MODE
i::FLAG_flush_bytecode = true;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
Factory* factory = i_isolate->factory();
{
v8::HandleScope scope(isolate);
v8::Context::New(isolate)->Enter();
const char* source =
"function foo() {"
" var x = 42;"
" var y = 42;"
" var z = x + y;"
"};"
"foo()";
Handle<String> foo_name = factory->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{
v8::HandleScope scope(isolate);
CompileRun(source);
}
// Check function is compiled.
Handle<Object> func_value =
Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
.ToHandleChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
CcTest::CollectAllGarbage();
}
// foo should no longer be in the compilation cache
CHECK(!function->shared()->is_compiled());
CHECK(!function->is_compiled());
// Call foo to get it recompiled.
CompileRun("foo()");
CHECK(function->shared()->is_compiled());
CHECK(function->is_compiled());
}
}
#ifndef V8_LITE_MODE
TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
FLAG_opt = true;
FLAG_always_opt = false;
i::FLAG_optimize_for_size = false;
i::FLAG_incremental_marking = true;
i::FLAG_flush_bytecode = true;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source =
"function foo() {"
" var x = 42;"
" var y = 42;"
" var z = x + y;"
"};"
"foo()";
Handle<String> foo_name = factory->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{
v8::HandleScope scope(CcTest::isolate());
CompileRun(source);
}
// Check function is compiled.
Handle<Object> func_value =
Object::GetProperty(isolate, isolate->global_object(), foo_name)
.ToHandleChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::CollectAllGarbage();
}
CHECK(!function->shared()->is_compiled());
CHECK(!function->is_compiled());
// This compile will compile the function again.
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("foo();");
}
// Simulate several GCs that use incremental marking but make sure
// the loop breaks once the function is enqueued as a candidate.
for (int i = 0; i < kAgingThreshold; i++) {
heap::SimulateIncrementalMarking(CcTest::heap());
if (function->shared()->GetBytecodeArray()->IsOld()) break;
CcTest::CollectAllGarbage();
}
// Force optimization while incremental marking is active and while
// the function is enqueued as a candidate.
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
}
// Simulate one final GC and make sure the candidate wasn't flushed.
CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
CHECK(function->is_compiled());
}
#endif // V8_LITE_MODE
TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
if (!FLAG_incremental_marking) return;
// Turn off always_opt because it interferes with running the built-in for