[heap] New mechanism for requesting GC from background threads

Background threads use a new mechanism to request a GC from the main
thread. Previously they used MemoryPressureNotification to request the
collection. However this conflicts with the embedder's usage of
MemoryPressureNotification.

Bug: v8:10315
Change-Id: Ib25a13a43e1f6a8785bb0d421dd056ae06a4a350
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2429270
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70249}
This commit is contained in:
Dominik Inführ 2020-09-30 18:39:55 +02:00 committed by Commit Bot
parent 82efa4bd7a
commit b187504e98
4 changed files with 134 additions and 30 deletions

View File

@ -1095,7 +1095,8 @@ void Heap::DeoptMarkedAllocationSites() {
void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
if (collector == MARK_COMPACTOR) {
memory_pressure_level_ = MemoryPressureLevel::kNone;
memory_pressure_level_.store(MemoryPressureLevel::kNone,
std::memory_order_relaxed);
}
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
@ -1151,6 +1152,9 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
// Resume all threads waiting for the GC.
collection_barrier_.ResumeThreadsAwaitingCollection();
}
void Heap::GarbageCollectionEpilogue() {
@ -1212,6 +1216,8 @@ void Heap::HandleGCRequest() {
} else if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
} else if (CollectionRequested()) {
CheckCollectionRequested();
} else if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
@ -1678,8 +1684,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
collection_barrier_.CollectionPerformed();
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) {
@ -2005,34 +2009,72 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
void Heap::CollectionBarrier::CollectionPerformed() {
void Heap::CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
gc_requested_ = false;
ClearCollectionRequested();
cond_.NotifyAll();
}
void Heap::CollectionBarrier::ShutdownRequested() {
base::MutexGuard guard(&mutex_);
shutdown_requested_ = true;
state_.store(RequestState::kShutdown);
cond_.NotifyAll();
}
void Heap::CollectionBarrier::Wait() {
base::MutexGuard guard(&mutex_);
class BackgroundCollectionInterruptTask : public CancelableTask {
public:
explicit BackgroundCollectionInterruptTask(Heap* heap)
: CancelableTask(heap->isolate()), heap_(heap) {}
if (shutdown_requested_) return;
~BackgroundCollectionInterruptTask() override = default;
if (!gc_requested_) {
heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
gc_requested_ = true;
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override { heap_->CheckCollectionRequested(); }
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(BackgroundCollectionInterruptTask);
};
void Heap::CollectionBarrier::AwaitCollectionBackground() {
if (FirstCollectionRequest()) {
// This is the first background thread requesting collection, ask the main
// thread for GC.
ActivateStackGuardAndPostTask();
}
while (gc_requested_ && !shutdown_requested_) {
BlockUntilCollected();
}
void Heap::CollectionBarrier::ActivateStackGuardAndPostTask() {
Isolate* isolate = heap_->isolate();
ExecutionAccess access(isolate);
isolate->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
}
void Heap::CollectionBarrier::BlockUntilCollected() {
base::MutexGuard guard(&mutex_);
while (CollectionRequested()) {
cond_.Wait(&mutex_);
}
}
void Heap::RequestAndWaitForCollection() { collection_barrier_.Wait(); }
void Heap::RequestCollectionBackground() {
collection_barrier_.AwaitCollectionBackground();
}
void Heap::CheckCollectionRequested() {
if (!collection_barrier_.CollectionRequested()) return;
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
current_gc_callback_flags_);
}
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@ -3812,11 +3854,11 @@ void Heap::CheckMemoryPressure() {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
// Reset the memory pressure level to avoid recursive GCs triggered by
// CheckMemoryPressure from AdjustAmountOfExternalMemory called by
// the finalizers.
memory_pressure_level_ = MemoryPressureLevel::kNone;
MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
MemoryPressureLevel::kNone, std::memory_order_relaxed);
if (memory_pressure_level == MemoryPressureLevel::kCritical) {
TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
CollectGarbageOnMemoryPressure();
@ -3869,8 +3911,8 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
static_cast<int>(level));
MemoryPressureLevel previous = memory_pressure_level_;
memory_pressure_level_ = level;
MemoryPressureLevel previous =
memory_pressure_level_.exchange(level, std::memory_order_relaxed);
if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone &&
@ -4048,6 +4090,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "measure memory";
case GarbageCollectionReason::kUnknown:
return "unknown";
case GarbageCollectionReason::kBackgroundAllocationFailure:
return "background allocation failure";
}
UNREACHABLE();
}
@ -4930,6 +4974,9 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
// Ensure that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true;
// Background thread requested GC, allocation should fail
if (CollectionRequested()) return false;
if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true;

View File

@ -149,7 +149,8 @@ enum class GarbageCollectionReason {
kTesting = 21,
kExternalFinalize = 22,
kGlobalAllocationLimit = 23,
kMeasureMemory = 24
kMeasureMemory = 24,
kBackgroundAllocationFailure = 25,
// If you add new items here, then update the incremental_marking_reason,
// mark_compact_reason, and scavenge_reason counters in counters.h.
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
@ -669,7 +670,8 @@ class Heap {
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
void RequestAndWaitForCollection();
// Requests collection and blocks until GC is finished.
void RequestCollectionBackground();
//
// Support for the API.
@ -770,9 +772,16 @@ class Heap {
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
return memory_pressure_level_ != MemoryPressureLevel::kNone;
return memory_pressure_level_.load(std::memory_order_relaxed) !=
MemoryPressureLevel::kNone;
}
bool CollectionRequested() {
return collection_barrier_.CollectionRequested();
}
void CheckCollectionRequested();
void RestoreHeapLimit(size_t heap_limit) {
// Do not set the limit lower than the live size + some slack.
size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
@ -1574,20 +1583,68 @@ class Heap {
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
};
// This class stops and resumes all background threads waiting for GC.
class CollectionBarrier {
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cond_;
bool gc_requested_;
bool shutdown_requested_;
enum class RequestState {
// Default state, no collection requested and tear down wasn't initated
// yet.
kDefault,
// Collection was already requested
kCollection,
// This state is reached after isolate starts to shut down. The main
// thread can't perform any GCs anymore, so all allocations need to be
// allowed from here on until background thread finishes.
kShutdown,
};
// The current state.
std::atomic<RequestState> state_;
void BlockUntilCollected();
// Request GC by activating stack guards and posting a task to perform the
// GC.
void ActivateStackGuardAndPostTask();
// Returns true when state was successfully updated from kDefault to
// kCollection.
bool FirstCollectionRequest() {
RequestState expected = RequestState::kDefault;
return state_.compare_exchange_strong(expected,
RequestState::kCollection);
}
// Sets state back to kDefault - invoked at end of GC.
void ClearCollectionRequested() {
RequestState old_state =
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
CHECK_NE(old_state, RequestState::kShutdown);
}
public:
explicit CollectionBarrier(Heap* heap)
: heap_(heap), gc_requested_(false), shutdown_requested_(false) {}
: heap_(heap), state_(RequestState::kDefault) {}
void CollectionPerformed();
// Checks whether any background thread requested GC.
bool CollectionRequested() {
return state_.load(std::memory_order_relaxed) ==
RequestState::kCollection;
}
// Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection();
// Sets current state to kShutdown.
void ShutdownRequested();
void Wait();
// This is the method use by background threads to request and wait for GC.
void AwaitCollectionBackground();
};
struct StringTypeTable {

View File

@ -159,7 +159,7 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
for (int i = 0; i < kMaxNumberOfRetries; i++) {
{
ParkedScope scope(this);
heap_->RequestAndWaitForCollection();
heap_->RequestCollectionBackground();
}
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);

View File

@ -16,9 +16,9 @@ namespace internal {
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 22, 23) \
HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 25, 26) \
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 22, 23) \
HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 25, 26) \
HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
@ -33,7 +33,7 @@ namespace internal {
/* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */ \
HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0, \
100000, 50) \
HR(scavenge_reason, V8.GCScavengeReason, 0, 22, 23) \
HR(scavenge_reason, V8.GCScavengeReason, 0, 25, 26) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \