[heap] Introduce new state in CollectionBarrier
Introduce new state kCollectionStarted in CollectionBarrier. This state is used during Heap::PerformGarbageCollection. It stops threads from requesting GC when the GC was already started. This happens because a background thread only requests the GC after it parked itself - the GC could be started in-between those two events. Bug: v8:10315 Change-Id: I59cf3d4ea41c7a2c37ffce89c5b057221a2499e0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2474858 Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#70572}
This commit is contained in:
parent
fba14bde5f
commit
8358ab49d8
@ -38,7 +38,20 @@ class BackgroundCollectionInterruptTask : public CancelableTask {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void CollectionBarrier::AwaitCollectionBackground() {
|
void CollectionBarrier::AwaitCollectionBackground() {
|
||||||
if (FirstCollectionRequest()) {
|
bool first;
|
||||||
|
|
||||||
|
{
|
||||||
|
base::MutexGuard guard(&mutex_);
|
||||||
|
first = FirstCollectionRequest();
|
||||||
|
if (first) {
|
||||||
|
// Initialize scope while holding the lock - prevents GC from starting
|
||||||
|
// before setting up this counter
|
||||||
|
time_to_collection_scope_.emplace(
|
||||||
|
heap_->isolate()->counters()->time_to_collection());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (first) {
|
||||||
// This is the first background thread requesting collection, ask the main
|
// This is the first background thread requesting collection, ask the main
|
||||||
// thread for GC.
|
// thread for GC.
|
||||||
ActivateStackGuardAndPostTask();
|
ActivateStackGuardAndPostTask();
|
||||||
@ -49,6 +62,11 @@ void CollectionBarrier::AwaitCollectionBackground() {
|
|||||||
|
|
||||||
void CollectionBarrier::StopTimeToCollectionTimer() {
|
void CollectionBarrier::StopTimeToCollectionTimer() {
|
||||||
base::MutexGuard guard(&mutex_);
|
base::MutexGuard guard(&mutex_);
|
||||||
|
RequestState old_state = state_.exchange(RequestState::kCollectionStarted,
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
USE(old_state);
|
||||||
|
DCHECK(old_state == RequestState::kDefault ||
|
||||||
|
old_state == RequestState::kCollectionRequested);
|
||||||
time_to_collection_scope_.reset();
|
time_to_collection_scope_.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,7 +78,6 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() {
|
|||||||
reinterpret_cast<v8::Isolate*>(isolate));
|
reinterpret_cast<v8::Isolate*>(isolate));
|
||||||
taskrunner->PostTask(
|
taskrunner->PostTask(
|
||||||
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
|
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
|
||||||
base::MutexGuard guard(&mutex_);
|
|
||||||
time_to_collection_scope_.emplace(isolate->counters()->time_to_collection());
|
time_to_collection_scope_.emplace(isolate->counters()->time_to_collection());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,10 @@ class CollectionBarrier {
|
|||||||
kDefault,
|
kDefault,
|
||||||
|
|
||||||
// Collection was already requested
|
// Collection was already requested
|
||||||
kCollection,
|
kCollectionRequested,
|
||||||
|
|
||||||
|
// Collection was already started
|
||||||
|
kCollectionStarted,
|
||||||
|
|
||||||
// This state is reached after isolate starts to shut down. The main
|
// This state is reached after isolate starts to shut down. The main
|
||||||
// thread can't perform any GCs anymore, so all allocations need to be
|
// thread can't perform any GCs anymore, so all allocations need to be
|
||||||
@ -40,8 +43,6 @@ class CollectionBarrier {
|
|||||||
// The current state.
|
// The current state.
|
||||||
std::atomic<RequestState> state_;
|
std::atomic<RequestState> state_;
|
||||||
|
|
||||||
void BlockUntilCollected();
|
|
||||||
|
|
||||||
// Request GC by activating stack guards and posting a task to perform the
|
// Request GC by activating stack guards and posting a task to perform the
|
||||||
// GC.
|
// GC.
|
||||||
void ActivateStackGuardAndPostTask();
|
void ActivateStackGuardAndPostTask();
|
||||||
@ -50,14 +51,16 @@ class CollectionBarrier {
|
|||||||
// kCollection.
|
// kCollection.
|
||||||
bool FirstCollectionRequest() {
|
bool FirstCollectionRequest() {
|
||||||
RequestState expected = RequestState::kDefault;
|
RequestState expected = RequestState::kDefault;
|
||||||
return state_.compare_exchange_strong(expected, RequestState::kCollection);
|
return state_.compare_exchange_strong(expected,
|
||||||
|
RequestState::kCollectionRequested);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets state back to kDefault - invoked at end of GC.
|
// Sets state back to kDefault - invoked at end of GC.
|
||||||
void ClearCollectionRequested() {
|
void ClearCollectionRequested() {
|
||||||
RequestState old_state =
|
RequestState old_state =
|
||||||
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
|
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
|
||||||
CHECK_NE(old_state, RequestState::kShutdown);
|
USE(old_state);
|
||||||
|
DCHECK_EQ(old_state, RequestState::kCollectionStarted);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -66,10 +69,12 @@ class CollectionBarrier {
|
|||||||
|
|
||||||
// Checks whether any background thread requested GC.
|
// Checks whether any background thread requested GC.
|
||||||
bool CollectionRequested() {
|
bool CollectionRequested() {
|
||||||
return state_.load(std::memory_order_relaxed) == RequestState::kCollection;
|
return state_.load(std::memory_order_relaxed) ==
|
||||||
|
RequestState::kCollectionRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
void StopTimeToCollectionTimer();
|
void StopTimeToCollectionTimer();
|
||||||
|
void BlockUntilCollected();
|
||||||
|
|
||||||
// Resumes threads waiting for collection.
|
// Resumes threads waiting for collection.
|
||||||
void ResumeThreadsAwaitingCollection();
|
void ResumeThreadsAwaitingCollection();
|
||||||
|
@ -1515,8 +1515,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
|
|||||||
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
|
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
|
||||||
GarbageCollectionReasonToString(gc_reason));
|
GarbageCollectionReasonToString(gc_reason));
|
||||||
|
|
||||||
collection_barrier_->StopTimeToCollectionTimer();
|
|
||||||
|
|
||||||
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
|
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
|
||||||
InvokeNearHeapLimitCallback();
|
InvokeNearHeapLimitCallback();
|
||||||
}
|
}
|
||||||
@ -1935,6 +1933,11 @@ size_t Heap::PerformGarbageCollection(
|
|||||||
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
|
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
|
||||||
DisallowJavascriptExecution no_js(isolate());
|
DisallowJavascriptExecution no_js(isolate());
|
||||||
base::Optional<SafepointScope> optional_safepoint_scope;
|
base::Optional<SafepointScope> optional_safepoint_scope;
|
||||||
|
|
||||||
|
// Stop time-to-collection timer before safepoint - we do not want to measure
|
||||||
|
// time for safepointing.
|
||||||
|
collection_barrier_->StopTimeToCollectionTimer();
|
||||||
|
|
||||||
if (FLAG_local_heaps) {
|
if (FLAG_local_heaps) {
|
||||||
optional_safepoint_scope.emplace(this);
|
optional_safepoint_scope.emplace(this);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user