[heap] Add histogram for time-to-collection
Add histogram for time-to-collection. As a drive-by change also move CollectionBarrier into its own class and rename V8.TimeToSafepoint to V8.StopTheWorld such that the histogram name and the trace file entry now have the same name. Bug: v8:10315 Change-Id: I86e2a9592d10316d04bc8cab37ff548067aadf78 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2465840 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/master@{#70489}
This commit is contained in:
parent
567c3a1cf5
commit
a2d44ad719
2
BUILD.gn
2
BUILD.gn
@ -2592,6 +2592,8 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/code-object-registry.h",
|
||||
"src/heap/code-stats.cc",
|
||||
"src/heap/code-stats.h",
|
||||
"src/heap/collection-barrier.cc",
|
||||
"src/heap/collection-barrier.h",
|
||||
"src/heap/combined-heap.cc",
|
||||
"src/heap/combined-heap.h",
|
||||
"src/heap/concurrent-allocator-inl.h",
|
||||
|
75
src/heap/collection-barrier.cc
Normal file
75
src/heap/collection-barrier.cc
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/collection-barrier.h"
|
||||
|
||||
#include "src/heap/heap-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
ClearCollectionRequested();
|
||||
cond_.NotifyAll();
|
||||
}
|
||||
|
||||
void CollectionBarrier::ShutdownRequested() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
state_.store(RequestState::kShutdown);
|
||||
cond_.NotifyAll();
|
||||
}
|
||||
|
||||
class BackgroundCollectionInterruptTask : public CancelableTask {
|
||||
public:
|
||||
explicit BackgroundCollectionInterruptTask(Heap* heap)
|
||||
: CancelableTask(heap->isolate()), heap_(heap) {}
|
||||
|
||||
~BackgroundCollectionInterruptTask() override = default;
|
||||
|
||||
private:
|
||||
// v8::internal::CancelableTask overrides.
|
||||
void RunInternal() override { heap_->CheckCollectionRequested(); }
|
||||
|
||||
Heap* heap_;
|
||||
DISALLOW_COPY_AND_ASSIGN(BackgroundCollectionInterruptTask);
|
||||
};
|
||||
|
||||
void CollectionBarrier::AwaitCollectionBackground() {
|
||||
if (FirstCollectionRequest()) {
|
||||
// This is the first background thread requesting collection, ask the main
|
||||
// thread for GC.
|
||||
ActivateStackGuardAndPostTask();
|
||||
}
|
||||
|
||||
BlockUntilCollected();
|
||||
}
|
||||
|
||||
void CollectionBarrier::StopTimeToCollectionTimer() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
time_to_collection_scope_.reset();
|
||||
}
|
||||
|
||||
void CollectionBarrier::ActivateStackGuardAndPostTask() {
|
||||
Isolate* isolate = heap_->isolate();
|
||||
ExecutionAccess access(isolate);
|
||||
isolate->stack_guard()->RequestGC();
|
||||
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
|
||||
reinterpret_cast<v8::Isolate*>(isolate));
|
||||
taskrunner->PostTask(
|
||||
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
|
||||
base::MutexGuard guard(&mutex_);
|
||||
time_to_collection_scope_.emplace(isolate->counters()->time_to_collection());
|
||||
}
|
||||
|
||||
void CollectionBarrier::BlockUntilCollected() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
while (CollectionRequested()) {
|
||||
cond_.Wait(&mutex_);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
87
src/heap/collection-barrier.h
Normal file
87
src/heap/collection-barrier.h
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_COLLECTION_BARRIER_H_
|
||||
#define V8_HEAP_COLLECTION_BARRIER_H_
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "src/base/optional.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/logging/counters.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Heap;
|
||||
|
||||
// This class stops and resumes all background threads waiting for GC.
|
||||
class CollectionBarrier {
|
||||
Heap* heap_;
|
||||
base::Mutex mutex_;
|
||||
base::ConditionVariable cond_;
|
||||
base::Optional<TimedHistogramScope> time_to_collection_scope_;
|
||||
|
||||
enum class RequestState {
|
||||
// Default state, no collection requested and tear down wasn't initated
|
||||
// yet.
|
||||
kDefault,
|
||||
|
||||
// Collection was already requested
|
||||
kCollection,
|
||||
|
||||
// This state is reached after isolate starts to shut down. The main
|
||||
// thread can't perform any GCs anymore, so all allocations need to be
|
||||
// allowed from here on until background thread finishes.
|
||||
kShutdown,
|
||||
};
|
||||
|
||||
// The current state.
|
||||
std::atomic<RequestState> state_;
|
||||
|
||||
void BlockUntilCollected();
|
||||
|
||||
// Request GC by activating stack guards and posting a task to perform the
|
||||
// GC.
|
||||
void ActivateStackGuardAndPostTask();
|
||||
|
||||
// Returns true when state was successfully updated from kDefault to
|
||||
// kCollection.
|
||||
bool FirstCollectionRequest() {
|
||||
RequestState expected = RequestState::kDefault;
|
||||
return state_.compare_exchange_strong(expected, RequestState::kCollection);
|
||||
}
|
||||
|
||||
// Sets state back to kDefault - invoked at end of GC.
|
||||
void ClearCollectionRequested() {
|
||||
RequestState old_state =
|
||||
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
|
||||
CHECK_NE(old_state, RequestState::kShutdown);
|
||||
}
|
||||
|
||||
public:
|
||||
explicit CollectionBarrier(Heap* heap)
|
||||
: heap_(heap), state_(RequestState::kDefault) {}
|
||||
|
||||
// Checks whether any background thread requested GC.
|
||||
bool CollectionRequested() {
|
||||
return state_.load(std::memory_order_relaxed) == RequestState::kCollection;
|
||||
}
|
||||
|
||||
void StopTimeToCollectionTimer();
|
||||
|
||||
// Resumes threads waiting for collection.
|
||||
void ResumeThreadsAwaitingCollection();
|
||||
|
||||
// Sets current state to kShutdown.
|
||||
void ShutdownRequested();
|
||||
|
||||
// This is the method use by background threads to request and wait for GC.
|
||||
void AwaitCollectionBackground();
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_COLLECTION_BARRIER_H_
|
@ -35,6 +35,7 @@
|
||||
#include "src/heap/base/stack.h"
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/code-stats.h"
|
||||
#include "src/heap/collection-barrier.h"
|
||||
#include "src/heap/combined-heap.h"
|
||||
#include "src/heap/concurrent-allocator.h"
|
||||
#include "src/heap/concurrent-marking.h"
|
||||
@ -195,7 +196,7 @@ Heap::Heap()
|
||||
global_pretenuring_feedback_(kInitialFeedbackCapacity),
|
||||
safepoint_(new GlobalSafepoint(this)),
|
||||
external_string_table_(this),
|
||||
collection_barrier_(this) {
|
||||
collection_barrier_(new CollectionBarrier(this)) {
|
||||
// Ensure old_generation_size_ is a multiple of kPageSize.
|
||||
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
|
||||
|
||||
@ -1154,7 +1155,7 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
|
||||
}
|
||||
|
||||
// Resume all threads waiting for the GC.
|
||||
collection_barrier_.ResumeThreadsAwaitingCollection();
|
||||
collection_barrier_->ResumeThreadsAwaitingCollection();
|
||||
}
|
||||
|
||||
void Heap::GarbageCollectionEpilogue() {
|
||||
@ -1514,6 +1515,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
|
||||
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
|
||||
GarbageCollectionReasonToString(gc_reason));
|
||||
|
||||
collection_barrier_->StopTimeToCollectionTimer();
|
||||
|
||||
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
|
||||
InvokeNearHeapLimitCallback();
|
||||
}
|
||||
@ -1890,67 +1893,16 @@ void Heap::EnsureFromSpaceIsCommitted() {
|
||||
FatalProcessOutOfMemory("Committing semi space failed.");
|
||||
}
|
||||
|
||||
void Heap::CollectionBarrier::ResumeThreadsAwaitingCollection() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
ClearCollectionRequested();
|
||||
cond_.NotifyAll();
|
||||
}
|
||||
|
||||
void Heap::CollectionBarrier::ShutdownRequested() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
state_.store(RequestState::kShutdown);
|
||||
cond_.NotifyAll();
|
||||
}
|
||||
|
||||
class BackgroundCollectionInterruptTask : public CancelableTask {
|
||||
public:
|
||||
explicit BackgroundCollectionInterruptTask(Heap* heap)
|
||||
: CancelableTask(heap->isolate()), heap_(heap) {}
|
||||
|
||||
~BackgroundCollectionInterruptTask() override = default;
|
||||
|
||||
private:
|
||||
// v8::internal::CancelableTask overrides.
|
||||
void RunInternal() override { heap_->CheckCollectionRequested(); }
|
||||
|
||||
Heap* heap_;
|
||||
DISALLOW_COPY_AND_ASSIGN(BackgroundCollectionInterruptTask);
|
||||
};
|
||||
|
||||
void Heap::CollectionBarrier::AwaitCollectionBackground() {
|
||||
if (FirstCollectionRequest()) {
|
||||
// This is the first background thread requesting collection, ask the main
|
||||
// thread for GC.
|
||||
ActivateStackGuardAndPostTask();
|
||||
}
|
||||
|
||||
BlockUntilCollected();
|
||||
}
|
||||
|
||||
void Heap::CollectionBarrier::ActivateStackGuardAndPostTask() {
|
||||
Isolate* isolate = heap_->isolate();
|
||||
ExecutionAccess access(isolate);
|
||||
isolate->stack_guard()->RequestGC();
|
||||
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
|
||||
reinterpret_cast<v8::Isolate*>(isolate));
|
||||
taskrunner->PostTask(
|
||||
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
|
||||
}
|
||||
|
||||
void Heap::CollectionBarrier::BlockUntilCollected() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
while (CollectionRequested()) {
|
||||
cond_.Wait(&mutex_);
|
||||
}
|
||||
bool Heap::CollectionRequested() {
|
||||
return collection_barrier_->CollectionRequested();
|
||||
}
|
||||
|
||||
void Heap::RequestCollectionBackground() {
|
||||
collection_barrier_.AwaitCollectionBackground();
|
||||
collection_barrier_->AwaitCollectionBackground();
|
||||
}
|
||||
|
||||
void Heap::CheckCollectionRequested() {
|
||||
if (!collection_barrier_.CollectionRequested()) return;
|
||||
if (!collection_barrier_->CollectionRequested()) return;
|
||||
|
||||
CollectAllGarbage(current_gc_flags_,
|
||||
GarbageCollectionReason::kBackgroundAllocationFailure,
|
||||
@ -5420,7 +5372,7 @@ void Heap::StartTearDown() {
|
||||
// process the event queue anymore. Avoid this deadlock by allowing all
|
||||
// allocations after tear down was requested to make sure all background
|
||||
// threads finish.
|
||||
collection_barrier_.ShutdownRequested();
|
||||
collection_barrier_->ShutdownRequested();
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
// {StartTearDown} is called fairly early during Isolate teardown, so it's
|
||||
|
@ -66,6 +66,7 @@ class ArrayBufferCollector;
|
||||
class ArrayBufferSweeper;
|
||||
class BasicMemoryChunk;
|
||||
class CodeLargeObjectSpace;
|
||||
class CollectionBarrier;
|
||||
class ConcurrentMarking;
|
||||
class GCIdleTimeHandler;
|
||||
class GCIdleTimeHeapState;
|
||||
@ -773,9 +774,7 @@ class Heap {
|
||||
MemoryPressureLevel::kNone;
|
||||
}
|
||||
|
||||
bool CollectionRequested() {
|
||||
return collection_barrier_.CollectionRequested();
|
||||
}
|
||||
bool CollectionRequested();
|
||||
|
||||
void CheckCollectionRequested();
|
||||
|
||||
@ -1576,70 +1575,6 @@ class Heap {
|
||||
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
|
||||
};
|
||||
|
||||
// This class stops and resumes all background threads waiting for GC.
|
||||
class CollectionBarrier {
|
||||
Heap* heap_;
|
||||
base::Mutex mutex_;
|
||||
base::ConditionVariable cond_;
|
||||
|
||||
enum class RequestState {
|
||||
// Default state, no collection requested and tear down wasn't initated
|
||||
// yet.
|
||||
kDefault,
|
||||
|
||||
// Collection was already requested
|
||||
kCollection,
|
||||
|
||||
// This state is reached after isolate starts to shut down. The main
|
||||
// thread can't perform any GCs anymore, so all allocations need to be
|
||||
// allowed from here on until background thread finishes.
|
||||
kShutdown,
|
||||
};
|
||||
|
||||
// The current state.
|
||||
std::atomic<RequestState> state_;
|
||||
|
||||
void BlockUntilCollected();
|
||||
|
||||
// Request GC by activating stack guards and posting a task to perform the
|
||||
// GC.
|
||||
void ActivateStackGuardAndPostTask();
|
||||
|
||||
// Returns true when state was successfully updated from kDefault to
|
||||
// kCollection.
|
||||
bool FirstCollectionRequest() {
|
||||
RequestState expected = RequestState::kDefault;
|
||||
return state_.compare_exchange_strong(expected,
|
||||
RequestState::kCollection);
|
||||
}
|
||||
|
||||
// Sets state back to kDefault - invoked at end of GC.
|
||||
void ClearCollectionRequested() {
|
||||
RequestState old_state =
|
||||
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
|
||||
CHECK_NE(old_state, RequestState::kShutdown);
|
||||
}
|
||||
|
||||
public:
|
||||
explicit CollectionBarrier(Heap* heap)
|
||||
: heap_(heap), state_(RequestState::kDefault) {}
|
||||
|
||||
// Checks whether any background thread requested GC.
|
||||
bool CollectionRequested() {
|
||||
return state_.load(std::memory_order_relaxed) ==
|
||||
RequestState::kCollection;
|
||||
}
|
||||
|
||||
// Resumes threads waiting for collection.
|
||||
void ResumeThreadsAwaitingCollection();
|
||||
|
||||
// Sets current state to kShutdown.
|
||||
void ShutdownRequested();
|
||||
|
||||
// This is the method use by background threads to request and wait for GC.
|
||||
void AwaitCollectionBackground();
|
||||
};
|
||||
|
||||
struct StringTypeTable {
|
||||
InstanceType type;
|
||||
int size;
|
||||
@ -2325,7 +2260,7 @@ class Heap {
|
||||
|
||||
base::Mutex relocation_mutex_;
|
||||
|
||||
CollectionBarrier collection_barrier_;
|
||||
std::unique_ptr<CollectionBarrier> collection_barrier_;
|
||||
|
||||
int gc_callbacks_depth_ = 0;
|
||||
|
||||
|
@ -21,7 +21,7 @@ void GlobalSafepoint::EnterSafepointScope() {
|
||||
|
||||
if (++active_safepoint_scopes_ > 1) return;
|
||||
|
||||
TimedHistogramScope timer(heap_->isolate()->counters()->time_to_safepoint());
|
||||
TimedHistogramScope timer(heap_->isolate()->counters()->stop_the_world());
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::STOP_THE_WORLD);
|
||||
|
||||
local_heaps_mutex_.Lock();
|
||||
|
@ -138,9 +138,10 @@ namespace internal {
|
||||
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
|
||||
HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
|
||||
HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
|
||||
HT(time_to_safepoint, V8.TimeToSafepoint, 10000, MILLISECOND) \
|
||||
HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
|
||||
MILLISECOND) \
|
||||
HT(stop_the_world, V8.StopTheWorld, 10000, MILLISECOND) \
|
||||
HT(time_to_collection, V8.TimeToCollection, 10000, MILLISECOND) \
|
||||
/* TurboFan timers. */ \
|
||||
HT(turbofan_optimize_prepare, V8.TurboFanOptimizePrepare, 1000000, \
|
||||
MICROSECOND) \
|
||||
|
Loading…
Reference in New Issue
Block a user