Reland "[heap] Optimize time to reach global safepoint"

This is a reland of 86038ecfdc

Compared to the previous CL this one is adding a TSAN suppression
for GlobalSafepoint::EnterSafepointScope. local_heaps_mutex_ of client
isolates may be locked in any order. This would be detected by TSAN as a
potential race. Add some additional DCHECKs to compensate for that
missing test coverage.

As a cleanup this CL also removes the unused methods ContainsLocalHeap()
and ContainsAnyLocalHeap() from LocalHeap.

Original change's description:
> [heap] Optimize time to reach global safepoint
>
> Initial support for global safepoints kept it simple by entering a
> safepoint for each of them one after another. This means
> time-to-global-safepoint is the sum of all time-to-safepoint operations.
> We can improve this slightly by splitting up the safepoint iteration
> into two operations:
>
> 1) Initiate safepoint lock (locks local_heaps_mutex_, arms the barrier
>    and sets SafepointRequested flag for all client threads)
> 2) Block until all runnning client threads reach a safepoint
>
> We now perform operation 1) for all clients first and only then start
> with operation 2).
>
> Bug: v8:11708
> Change-Id: Iaafd3c6d70bcf7026f722633e9250b04148b3da6
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3310910
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#78308}

Bug: v8:11708, v8:12492
Change-Id: I7087ba23c08f2d4edb9b632eef3c218fc76342e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3328786
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78583}
This commit is contained in:
Dominik Inführ 2021-12-10 15:49:54 +01:00 committed by V8 LUCI CQ
parent 883b251774
commit 93f28d16c7
4 changed files with 109 additions and 45 deletions

View File

@ -33,14 +33,13 @@ void IsolateSafepoint::EnterLocalSafepointScope() {
DCHECK_NULL(LocalHeap::Current());
DCHECK(AllowGarbageCollection::IsAllowed());
LockMutex(heap_->isolate()->main_thread_local_heap());
LockMutex(isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return;
// Local safepoint can only be initiated on the isolate's main thread.
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
DCHECK_EQ(ThreadId::Current(), isolate()->thread_id());
TimedHistogramScope timer(
heap_->isolate()->counters()->gc_time_to_safepoint());
TimedHistogramScope timer(isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
barrier_.Arm();
@ -48,26 +47,56 @@ void IsolateSafepoint::EnterLocalSafepointScope() {
barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
void IsolateSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
// Safepoints need to be initiated on some main thread.
DCHECK_NULL(LocalHeap::Current());
class PerClientSafepointData final {
public:
explicit PerClientSafepointData(Isolate* isolate) : isolate_(isolate) {}
{
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
void set_locked_and_running(size_t running) {
locked_ = true;
running_ = running;
}
CHECK_EQ(++active_safepoint_scopes_, 1);
IsolateSafepoint* safepoint() const { return heap()->safepoint(); }
Heap* heap() const { return isolate_->heap(); }
Isolate* isolate() const { return isolate_; }
bool is_locked() const { return locked_; }
size_t running() const { return running_; }
private:
Isolate* const isolate_;
size_t running_ = 0;
bool locked_ = false;
};
void IsolateSafepoint::InitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
shared_isolate()->global_safepoint()->AssertActive();
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
void IsolateSafepoint::TryInitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
shared_isolate()->global_safepoint()->AssertActive();
if (!local_heaps_mutex_.TryLock()) return;
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
Isolate* initiator, PerClientSafepointData* client_data) {
CHECK_EQ(++active_safepoint_scopes_, 1);
barrier_.Arm();
size_t running =
SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
barrier_.WaitUntilRunningThreadsInSafepoint(running);
client_data->set_locked_and_running(running);
}
IsolateSafepoint::IncludeMainThread
IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
const bool is_initiator = heap_->isolate() == initiator;
const bool is_initiator = isolate() == initiator;
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
}
@ -149,6 +178,11 @@ void IsolateSafepoint::WaitInUnpark() { barrier_.WaitInUnpark(); }
void IsolateSafepoint::NotifyPark() { barrier_.NotifyPark(); }
void IsolateSafepoint::WaitUntilRunningThreadsInSafepoint(
const PerClientSafepointData* client_data) {
barrier_.WaitUntilRunningThreadsInSafepoint(client_data->running());
}
void IsolateSafepoint::Barrier::Arm() {
base::MutexGuard guard(&mutex_);
DCHECK(!IsArmed());
@ -200,23 +234,6 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
}
}
bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
while (current) {
if (current == local_heap) return true;
current = current->next_;
}
return false;
}
bool IsolateSafepoint::ContainsAnyLocalHeap() {
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
return local_heaps_head_ != nullptr;
}
void IsolateSafepoint::Iterate(RootVisitor* visitor) {
AssertActive();
for (LocalHeap* current = local_heaps_head_; current;
@ -230,6 +247,12 @@ void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
DCHECK_NULL(heap_->main_thread_local_heap()->next_);
}
Isolate* IsolateSafepoint::isolate() const { return heap_->isolate(); }
Isolate* IsolateSafepoint::shared_isolate() const {
return isolate()->shared_isolate();
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterLocalSafepointScope();
}
@ -286,6 +309,9 @@ void GlobalSafepoint::RemoveClient(Isolate* client) {
void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
// Safepoints need to be initiated on some main thread.
DCHECK_NULL(LocalHeap::Current());
if (!clients_mutex_.TryLock()) {
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
ParkedScope parked_scope(initiator->main_thread_local_heap());
@ -297,14 +323,36 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
TRACE_GC(initiator->heap()->tracer(),
GCTracer::Scope::TIME_TO_GLOBAL_SAFEPOINT);
IterateClientIsolates([this, initiator](Isolate* client) {
Heap* client_heap = client->heap();
client_heap->safepoint()->EnterGlobalSafepointScope(initiator);
std::vector<PerClientSafepointData> clients;
USE(this);
DCHECK_EQ(client->shared_isolate(), shared_isolate_);
DCHECK(client_heap->deserialization_complete());
// Try to initiate safepoint for all clients. Fail immediately when the
// local_heaps_mutex_ can't be locked without blocking.
IterateClientIsolates([&clients, initiator](Isolate* client) {
clients.emplace_back(client);
client->heap()->safepoint()->TryInitiateGlobalSafepointScope(
initiator, &clients.back());
});
// Iterate all clients again to initiate the safepoint for all of them - even
// if that means blocking.
for (PerClientSafepointData& client : clients) {
if (client.is_locked()) continue;
client.safepoint()->InitiateGlobalSafepointScope(initiator, &client);
}
#if DEBUG
for (const PerClientSafepointData& client : clients) {
DCHECK_EQ(client.isolate()->shared_isolate(), shared_isolate_);
DCHECK(client.heap()->deserialization_complete());
}
#endif // DEBUG
// Now that safepoints were initiated for all clients, wait until all threads
// of all clients reached a safepoint.
for (const PerClientSafepointData& client : clients) {
DCHECK(client.is_locked());
client.safepoint()->WaitUntilRunningThreadsInSafepoint(&client);
}
}
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {

View File

@ -17,6 +17,7 @@ namespace internal {
class Heap;
class LocalHeap;
class PerClientSafepointData;
class RootVisitor;
// Used to bring all threads with heap access in an isolate to a safepoint such
@ -25,9 +26,6 @@ class IsolateSafepoint final {
public:
explicit IsolateSafepoint(Heap* heap);
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
// Iterate handles in local heaps
void Iterate(RootVisitor* visitor);
@ -43,7 +41,7 @@ class IsolateSafepoint final {
void AssertActive() { local_heaps_mutex_.AssertHeld(); }
void AssertMainThreadIsOnlyThread();
V8_EXPORT_PRIVATE void AssertMainThreadIsOnlyThread();
private:
class Barrier {
@ -79,12 +77,23 @@ class IsolateSafepoint final {
// Running thread reached a safepoint by parking itself.
void NotifyPark();
// Methods for entering/leaving local safepoint scopes.
void EnterLocalSafepointScope();
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveLocalSafepointScope();
// Methods for entering/leaving global safepoint scopes.
void TryInitiateGlobalSafepointScope(Isolate* initiator,
PerClientSafepointData* client_data);
void InitiateGlobalSafepointScope(Isolate* initiator,
PerClientSafepointData* client_data);
void InitiateGlobalSafepointScopeRaw(Isolate* initiator,
PerClientSafepointData* client_data);
void LeaveGlobalSafepointScope(Isolate* initiator);
// Blocks until all running threads reached a safepoint.
void WaitUntilRunningThreadsInSafepoint(
const PerClientSafepointData* client_data);
IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
void LockMutex(LocalHeap* local_heap);
@ -123,6 +132,9 @@ class IsolateSafepoint final {
local_heaps_head_ = local_heap->next_;
}
Isolate* isolate() const;
Isolate* shared_isolate() const;
Barrier barrier_;
Heap* heap_;
@ -133,11 +145,9 @@ class IsolateSafepoint final {
int active_safepoint_scopes_;
friend class Heap;
friend class GlobalSafepoint;
friend class GlobalSafepointScope;
friend class LocalHeap;
friend class PersistentHandles;
friend class SafepointScope;
};
@ -169,6 +179,8 @@ class GlobalSafepoint final {
void AssertNoClients();
void AssertActive() { clients_mutex_.AssertHeld(); }
private:
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveGlobalSafepointScope(Isolate* initiator);

View File

@ -19,7 +19,7 @@ using LocalHeapTest = TestWithIsolate;
TEST_F(LocalHeapTest, Initialize) {
Heap* heap = i_isolate()->heap();
CHECK(heap->safepoint()->ContainsAnyLocalHeap());
heap->safepoint()->AssertMainThreadIsOnlyThread();
}
TEST_F(LocalHeapTest, Current) {

View File

@ -4,3 +4,7 @@
# Incorrectly detected lock cycles in test-lockers
# https://code.google.com/p/thread-sanitizer/issues/detail?id=81
deadlock:LockAndUnlockDifferentIsolatesThread::Run
# A global safepoint might lock client isolate mutexes in any order, which
# would be reported as potential deadlocks.
deadlock:GlobalSafepoint::EnterGlobalSafepointScope