Revert "[heap] Optimize time to reach global safepoint"
This reverts commit 86038ecfdc
.
Reason for revert: Caused TSAN failures
Original change's description:
> [heap] Optimize time to reach global safepoint
>
> Initial support for global safepoints kept it simple by entering a
> safepoint for each of them one after another. This means
> time-to-global-safepoint is the sum of all time-to-safepoint operations.
> We can improve this slightly by splitting up the safepoint iteration
> into two operations:
>
> 1) Initiate safepoint lock (locks local_heaps_mutex_, arms the barrier
> and sets SafepointRequested flag for all client threads)
> 2) Block until all runnning client threads reach a safepoint
>
> We now perform operation 1) for all clients first and only then start
> with operation 2).
>
> Bug: v8:11708
> Change-Id: Iaafd3c6d70bcf7026f722633e9250b04148b3da6
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3310910
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#78308}
Bug: v8:11708, v8:12492
Change-Id: I32ef7139d4392adfadeffeb70c06f3ed18109ca8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3328782
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78328}
This commit is contained in:
parent
529b8d3c72
commit
b51a100dbd
@ -48,49 +48,21 @@ void IsolateSafepoint::EnterLocalSafepointScope() {
|
||||
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
||||
}
|
||||
|
||||
class PerClientSafepointData final {
|
||||
public:
|
||||
explicit PerClientSafepointData(Isolate* isolate) : isolate_(isolate) {}
|
||||
void IsolateSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
// Safepoints need to be initiated on some main thread.
|
||||
DCHECK_NULL(LocalHeap::Current());
|
||||
|
||||
void set_locked_and_running(size_t running) {
|
||||
locked_ = true;
|
||||
running_ = running;
|
||||
{
|
||||
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
|
||||
LockMutex(initiator->main_thread_local_heap());
|
||||
}
|
||||
|
||||
IsolateSafepoint* safepoint() const { return heap()->safepoint(); }
|
||||
Heap* heap() const { return isolate_->heap(); }
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
bool is_locked() const { return locked_; }
|
||||
size_t running() const { return running_; }
|
||||
|
||||
private:
|
||||
Isolate* const isolate_;
|
||||
size_t running_ = 0;
|
||||
bool locked_ = false;
|
||||
};
|
||||
|
||||
void IsolateSafepoint::InitiateGlobalSafepointScope(
|
||||
Isolate* initiator, PerClientSafepointData* client_data) {
|
||||
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
|
||||
LockMutex(initiator->main_thread_local_heap());
|
||||
InitiateGlobalSafepointScopeRaw(initiator, client_data);
|
||||
}
|
||||
|
||||
void IsolateSafepoint::TryInitiateGlobalSafepointScope(
|
||||
Isolate* initiator, PerClientSafepointData* client_data) {
|
||||
if (!local_heaps_mutex_.TryLock()) return;
|
||||
InitiateGlobalSafepointScopeRaw(initiator, client_data);
|
||||
}
|
||||
|
||||
void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
|
||||
Isolate* initiator, PerClientSafepointData* client_data) {
|
||||
CHECK_EQ(++active_safepoint_scopes_, 1);
|
||||
|
||||
barrier_.Arm();
|
||||
|
||||
size_t running =
|
||||
SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
|
||||
client_data->set_locked_and_running(running);
|
||||
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
||||
}
|
||||
|
||||
IsolateSafepoint::IncludeMainThread
|
||||
@ -177,11 +149,6 @@ void IsolateSafepoint::WaitInUnpark() { barrier_.WaitInUnpark(); }
|
||||
|
||||
void IsolateSafepoint::NotifyPark() { barrier_.NotifyPark(); }
|
||||
|
||||
void IsolateSafepoint::WaitUntilRunningThreadsInSafepoint(
|
||||
const PerClientSafepointData* client_data) {
|
||||
barrier_.WaitUntilRunningThreadsInSafepoint(client_data->running());
|
||||
}
|
||||
|
||||
void IsolateSafepoint::Barrier::Arm() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK(!IsArmed());
|
||||
@ -319,9 +286,6 @@ void GlobalSafepoint::RemoveClient(Isolate* client) {
|
||||
void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
|
||||
|
||||
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
// Safepoints need to be initiated on some main thread.
|
||||
DCHECK_NULL(LocalHeap::Current());
|
||||
|
||||
if (!clients_mutex_.TryLock()) {
|
||||
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
|
||||
ParkedScope parked_scope(initiator->main_thread_local_heap());
|
||||
@ -333,36 +297,14 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
TRACE_GC(initiator->heap()->tracer(),
|
||||
GCTracer::Scope::TIME_TO_GLOBAL_SAFEPOINT);
|
||||
|
||||
std::vector<PerClientSafepointData> clients;
|
||||
IterateClientIsolates([this, initiator](Isolate* client) {
|
||||
Heap* client_heap = client->heap();
|
||||
client_heap->safepoint()->EnterGlobalSafepointScope(initiator);
|
||||
|
||||
// Try to initiate safepoint for all clients. Fail immediately when the
|
||||
// local_heaps_mutex_ can't be locked without blocking.
|
||||
IterateClientIsolates([&clients, initiator](Isolate* client) {
|
||||
clients.emplace_back(client);
|
||||
client->heap()->safepoint()->TryInitiateGlobalSafepointScope(
|
||||
initiator, &clients.back());
|
||||
USE(this);
|
||||
DCHECK_EQ(client->shared_isolate(), shared_isolate_);
|
||||
DCHECK(client_heap->deserialization_complete());
|
||||
});
|
||||
|
||||
// Iterate all clients again to initiate the safepoint for all of them - even
|
||||
// if that means blocking.
|
||||
for (PerClientSafepointData& client : clients) {
|
||||
if (client.is_locked()) continue;
|
||||
client.safepoint()->InitiateGlobalSafepointScope(initiator, &client);
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
for (const PerClientSafepointData& client : clients) {
|
||||
DCHECK_EQ(client.isolate()->shared_isolate(), shared_isolate_);
|
||||
DCHECK(client.heap()->deserialization_complete());
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
// Now that safepoints were initiated for all clients, wait until all threads
|
||||
// of all clients reached a safepoint.
|
||||
for (const PerClientSafepointData& client : clients) {
|
||||
DCHECK(client.is_locked());
|
||||
client.safepoint()->WaitUntilRunningThreadsInSafepoint(&client);
|
||||
}
|
||||
}
|
||||
|
||||
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
|
||||
|
@ -17,7 +17,6 @@ namespace internal {
|
||||
|
||||
class Heap;
|
||||
class LocalHeap;
|
||||
class PerClientSafepointData;
|
||||
class RootVisitor;
|
||||
|
||||
// Used to bring all threads with heap access in an isolate to a safepoint such
|
||||
@ -80,23 +79,12 @@ class IsolateSafepoint final {
|
||||
// Running thread reached a safepoint by parking itself.
|
||||
void NotifyPark();
|
||||
|
||||
// Methods for entering/leaving local safepoint scopes.
|
||||
void EnterLocalSafepointScope();
|
||||
void EnterGlobalSafepointScope(Isolate* initiator);
|
||||
|
||||
void LeaveLocalSafepointScope();
|
||||
|
||||
// Methods for entering/leaving global safepoint scopes.
|
||||
void TryInitiateGlobalSafepointScope(Isolate* initiator,
|
||||
PerClientSafepointData* client_data);
|
||||
void InitiateGlobalSafepointScope(Isolate* initiator,
|
||||
PerClientSafepointData* client_data);
|
||||
void InitiateGlobalSafepointScopeRaw(Isolate* initiator,
|
||||
PerClientSafepointData* client_data);
|
||||
void LeaveGlobalSafepointScope(Isolate* initiator);
|
||||
|
||||
// Blocks until all running threads reached a safepoint.
|
||||
void WaitUntilRunningThreadsInSafepoint(
|
||||
const PerClientSafepointData* client_data);
|
||||
|
||||
IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
|
||||
|
||||
void LockMutex(LocalHeap* local_heap);
|
||||
|
Loading…
Reference in New Issue
Block a user