From e9c4ff4052dfd1f1c7600baf71d7d62e88bc0496 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Sat, 12 Jun 2021 20:33:35 +0200 Subject: [PATCH] Reland "[heap] Introduce ParkedSharedMutexGuardIf and use it in compiler" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reland of 4cd856eee480fdb4a1635e4df54c9b19b5c3e48a This CL fixes the problem that local_isolate() returned nullptr on the main thread. Original change's description: > [heap] Introduce ParkedSharedMutexGuardIf and use it in compiler > > In some cases it could happen that the concurrent compiler tries to get > a shared lock on a mutex that is already exclusively held by the main > thread. The background thread will then block itself until the > main thread leaves the critical section. If the main thread then also > starts a GC while holding the lock, this will result in a deadlock. > > A GC can't start until the background thread reaches a safepoint and > the main thread can't leave the critical section before the GC ran. > > This CL introduces a new version of SharedMutexGuard named > RecursiveSharedMutexGuardIfNeeded. This class will park the thread > when blocking is needed and will unpark the thread again as soon as > the lock was acquired successfully. This resolves the deadlock on > safepointing. > > Turbofan can then simply use that class internally for > MapUpdaterGuardIfNeeded. > > Bug: v8:10315, chromium:1218318 > Change-Id: Ice04b222cc979e4905791118caede26e71fca6de > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2953288 > Commit-Queue: Dominik Inführ > Reviewed-by: Georg Neis > Reviewed-by: Michael Lippautz > Cr-Commit-Position: refs/heads/master@{#75107} Bug: v8:10315 Bug: chromium:1218318 Change-Id: Ic56afb14a537e0cbf412311f11407c1f09278225 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2958408 Reviewed-by: Georg Neis Reviewed-by: Michael Lippautz Commit-Queue: Dominik Inführ Cr-Commit-Position: refs/heads/master@{#75124} --- src/compiler/js-heap-broker.h | 9 +++++--- src/heap/parked-scope.h | 43 +++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 3 deletions(-) diff --git a/src/compiler/js-heap-broker.h b/src/compiler/js-heap-broker.h index e089b66d30..45013e4dc3 100644 --- a/src/compiler/js-heap-broker.h +++ b/src/compiler/js-heap-broker.h @@ -356,11 +356,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker { // occurrence. This is done to have a recursive shared lock on {mutex}. class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded { protected: - RecursiveSharedMutexGuardIfNeeded(base::SharedMutex* mutex, + RecursiveSharedMutexGuardIfNeeded(LocalIsolate* local_isolate, + base::SharedMutex* mutex, int* mutex_depth_address) : mutex_depth_address_(mutex_depth_address), initial_mutex_depth_(*mutex_depth_address_), - shared_mutex_guard_(mutex, initial_mutex_depth_ == 0) { + shared_mutex_guard_(local_isolate, mutex, initial_mutex_depth_ == 0) { (*mutex_depth_address_)++; } @@ -373,7 +374,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { private: int* const mutex_depth_address_; const int initial_mutex_depth_; - base::SharedMutexGuardIf shared_mutex_guard_; + ParkedSharedMutexGuardIf shared_mutex_guard_; }; class MapUpdaterGuardIfNeeded final @@ -381,6 +382,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { public: explicit MapUpdaterGuardIfNeeded(JSHeapBroker* broker) : RecursiveSharedMutexGuardIfNeeded( + broker->local_isolate_or_isolate(), broker->isolate()->map_updater_access(), &broker->map_updater_mutex_depth_) {} }; @@ -390,6 +392,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { public: explicit BoilerplateMigrationGuardIfNeeded(JSHeapBroker* broker) : RecursiveSharedMutexGuardIfNeeded( + broker->local_isolate_or_isolate(), broker->isolate()->boilerplate_migration_access(), &broker->boilerplate_migration_mutex_depth_) {} }; diff --git a/src/heap/parked-scope.h b/src/heap/parked-scope.h index ffb8027412..41dc6bfc1f 100644 --- a/src/heap/parked-scope.h +++ b/src/heap/parked-scope.h @@ -65,6 +65,49 @@ class V8_NODISCARD ParkedMutexGuard { base::Mutex* mutex_; }; +template +class V8_NODISCARD ParkedSharedMutexGuardIf final { + public: + ParkedSharedMutexGuardIf(LocalIsolate* local_isolate, + base::SharedMutex* mutex, bool enable_mutex) + : ParkedSharedMutexGuardIf(local_isolate->heap(), mutex, enable_mutex) {} + ParkedSharedMutexGuardIf(LocalHeap* local_heap, base::SharedMutex* mutex, + bool enable_mutex) { + DCHECK_IMPLIES(Behavior == base::NullBehavior::kRequireNotNull, + mutex != nullptr); + if (!enable_mutex) return; + mutex_ = mutex; + + if (kIsShared) { + if (!mutex_->TryLockShared()) { + ParkedScope scope(local_heap); + mutex_->LockShared(); + } + } else { + if (!mutex_->TryLockExclusive()) { + ParkedScope scope(local_heap); + mutex_->LockExclusive(); + } + } + } + ParkedSharedMutexGuardIf(const ParkedSharedMutexGuardIf&) = delete; + ParkedSharedMutexGuardIf& operator=(const ParkedSharedMutexGuardIf&) = delete; + + ~ParkedSharedMutexGuardIf() { + if (!mutex_) return; + + if (kIsShared) { + mutex_->UnlockShared(); + } else { + mutex_->UnlockExclusive(); + } + } + + private: + base::SharedMutex* mutex_ = nullptr; +}; + } // namespace internal } // namespace v8