[heap] Refactoring prologue of CollectGarbage.

This is the 3. CL in a series of CollectGarbage refactoring CLs.

Bug: v8:12503
Change-Id: Icc578eb9a4dc06083ea3380a00a50dbdbfd22e34
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3420908
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78949}
This commit is contained in:
Hannes Payer 2022-02-04 15:44:10 +01:00 committed by V8 LUCI CQ
parent c97337ff5e
commit cf3b6036d0
2 changed files with 58 additions and 61 deletions

View File

@ -906,9 +906,35 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
void Heap::GarbageCollectionPrologue() {
void Heap::GarbageCollectionPrologue(
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
current_gc_flags_ & kForcedGC ||
force_gc_on_next_allocation_;
is_current_gc_for_heap_profiler_ =
gc_reason == GarbageCollectionReason::kHeapProfiler;
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
// of allocation sequences and we assume that a garbage collection will allow
// the subsequent allocation attempts to go through.
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
allocation_timeout_ =
std::max(6, NextAllocationTimeout(allocation_timeout_));
}
#endif
// There may be an allocation memento behind objects in new space. Upon
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
@ -1690,6 +1716,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
// collection is triggered. Note that these callbacks may trigger another
// garbage collection since they may allocate.
DCHECK(AllowGarbageCollection::IsAllowed());
// Ensure that all pending phantom callbacks are invoked.
isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
@ -1715,58 +1743,25 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
// Part 2: The main garbage collection phase.
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
current_gc_flags_ & kForcedGC ||
force_gc_on_next_allocation_;
is_current_gc_for_heap_profiler_ =
gc_reason == GarbageCollectionReason::kHeapProfiler;
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
// Filter on-stack reference below this method.
isolate()
->global_handles()
->CleanupOnStackReferencesBelowCurrentStackPosition();
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
// of allocation sequences and we assume that a garbage collection will allow
// the subsequent allocation attempts to go through.
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
allocation_timeout_ =
std::max(6, NextAllocationTimeout(allocation_timeout_));
}
#endif
// There may be an allocation memento behind objects in new space. Upon
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
if (IsYoungGenerationCollector(collector) &&
!incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scavenge during marking.\n");
}
}
DisallowGarbageCollection no_gc_during_gc;
size_t freed_global_handles = 0;
size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR
? CommittedOldGenerationMemory()
: 0;
{
tracer()->StartObservablePause(collector, gc_reason, collector_reason);
VMState<GC> state(isolate());
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
size_t committed_memory_before = 0;
// Filter on-stack reference below this method.
isolate()
->global_handles()
->CleanupOnStackReferencesBelowCurrentStackPosition();
if (collector == GarbageCollector::MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
if (cpp_heap()) {
if (collector == GarbageCollector::MARK_COMPACTOR && cpp_heap()) {
// CppHeap needs a stack marker at the top of all entry points to allow
// deterministic passes over the stack. E.g., a verifier that should only
// find a subset of references of the marker.
@ -1776,14 +1771,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
static_cast<v8::internal::CppHeap*>(cpp_heap())
->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
}
}
{
tracer()->StartObservablePause(collector, gc_reason, collector_reason);
DCHECK(AllowGarbageCollection::IsAllowed());
DisallowGarbageCollection no_gc_during_gc;
GarbageCollectionPrologue();
GarbageCollectionPrologue(gc_reason, gc_callback_flags);
{
TimedHistogram* gc_type_timer = GCTypeTimer(collector);
TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
@ -1797,10 +1786,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
OptionalTimedHistogramScope histogram_timer_priority_scope(
gc_type_priority_timer, isolate_, mode);
if (!IsYoungGenerationCollector(collector)) {
PROFILE(isolate_, CodeMovingGCEvent());
}
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
tp_heap_->CollectGarbage();
} else {
@ -2457,6 +2442,7 @@ void Heap::MarkCompact() {
SetGCState(MARK_COMPACT);
PROFILE(isolate_, CodeMovingGCEvent());
CodeSpaceMemoryModificationScope code_modifcation(this);
// Disable soft allocation limits in the shared heap, if one exists, as
@ -2497,6 +2483,11 @@ void Heap::MinorMarkCompact() {
DCHECK(FLAG_minor_mc);
DCHECK(new_space());
if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] MinorMarkCompact during marking.\n");
}
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
@ -2601,6 +2592,11 @@ void Heap::EvacuateYoungGeneration() {
void Heap::Scavenge() {
DCHECK_NOT_NULL(new_space());
if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scavenge during marking.\n");
}
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);

View File

@ -1949,7 +1949,8 @@ class Heap {
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
void GarbageCollectionPrologue(GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags);
void GarbageCollectionPrologueInSafepoint();
void GarbageCollectionEpilogue(GarbageCollector collector);
void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector);