[heap] Introduce ParkedSharedMutexGuardIf and use it in compiler

In some cases it could happen that the concurrent compiler tries to get
a shared lock on a mutex that is already exclusively held by the main
thread. The background thread will then block itself until the
main thread leaves the critical section. If the main thread then also
starts a GC while holding the lock, this will result in a deadlock.

A GC can't start until the background thread reaches a safepoint and
the main thread can't leave the critical section before the GC ran.

This CL introduces a new version of SharedMutexGuard named
RecursiveSharedMutexGuardIfNeeded. This class will park the thread
when blocking is needed and will unpark the thread again as soon as
the lock was acquired successfully. This resolves the deadlock on
safepointing.

Turbofan can then simply use that class internally for
MapUpdaterGuardIfNeeded.

Bug: v8:10315, chromium:1218318
Change-Id: Ice04b222cc979e4905791118caede26e71fca6de
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2953288
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Georg Neis <neis@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75107}
This commit is contained in:
Dominik Inführ 2021-06-11 20:06:31 +02:00 committed by V8 LUCI CQ
parent ce9cc71c4b
commit 4cd856eee4
2 changed files with 49 additions and 4 deletions

View File

@ -356,11 +356,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
protected:
RecursiveSharedMutexGuardIfNeeded(base::SharedMutex* mutex,
RecursiveSharedMutexGuardIfNeeded(LocalIsolate* local_isolate,
base::SharedMutex* mutex,
int* mutex_depth_address)
: mutex_depth_address_(mutex_depth_address),
initial_mutex_depth_(*mutex_depth_address_),
shared_mutex_guard_(mutex, initial_mutex_depth_ == 0) {
shared_mutex_guard_(local_isolate, mutex, initial_mutex_depth_ == 0) {
(*mutex_depth_address_)++;
}
@ -373,7 +374,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
private:
int* const mutex_depth_address_;
const int initial_mutex_depth_;
base::SharedMutexGuardIf<base::kShared> shared_mutex_guard_;
ParkedSharedMutexGuardIf<base::kShared> shared_mutex_guard_;
};
class MapUpdaterGuardIfNeeded final
@ -381,7 +382,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
public:
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* broker)
: RecursiveSharedMutexGuardIfNeeded(
broker->isolate()->map_updater_access(),
broker->local_isolate(), broker->isolate()->map_updater_access(),
&broker->map_updater_mutex_depth_) {}
};
@ -390,6 +391,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
public:
explicit BoilerplateMigrationGuardIfNeeded(JSHeapBroker* broker)
: RecursiveSharedMutexGuardIfNeeded(
broker->local_isolate(),
broker->isolate()->boilerplate_migration_access(),
&broker->boilerplate_migration_mutex_depth_) {}
};

View File

@ -65,6 +65,49 @@ class V8_NODISCARD ParkedMutexGuard {
base::Mutex* mutex_;
};
template <base::MutexSharedType kIsShared,
base::NullBehavior Behavior = base::NullBehavior::kRequireNotNull>
class V8_NODISCARD ParkedSharedMutexGuardIf final {
public:
ParkedSharedMutexGuardIf(LocalIsolate* local_isolate,
base::SharedMutex* mutex, bool enable_mutex)
: ParkedSharedMutexGuardIf(local_isolate->heap(), mutex, enable_mutex) {}
ParkedSharedMutexGuardIf(LocalHeap* local_heap, base::SharedMutex* mutex,
bool enable_mutex) {
DCHECK_IMPLIES(Behavior == base::NullBehavior::kRequireNotNull,
mutex != nullptr);
if (!enable_mutex) return;
mutex_ = mutex;
if (kIsShared) {
if (!mutex_->TryLockShared()) {
ParkedScope scope(local_heap);
mutex_->LockShared();
}
} else {
if (!mutex_->TryLockExclusive()) {
ParkedScope scope(local_heap);
mutex_->LockExclusive();
}
}
}
ParkedSharedMutexGuardIf(const ParkedSharedMutexGuardIf&) = delete;
ParkedSharedMutexGuardIf& operator=(const ParkedSharedMutexGuardIf&) = delete;
~ParkedSharedMutexGuardIf() {
if (!mutex_) return;
if (kIsShared) {
mutex_->UnlockShared();
} else {
mutex_->UnlockExclusive();
}
}
private:
base::SharedMutex* mutex_ = nullptr;
};
} // namespace internal
} // namespace v8