Reland "[heap] Avoid ParkedMutexGuard during allocation"

This is a reland of 273f4e42e3

Original change's description:
> [heap] Avoid ParkedMutexGuard during allocation
> 
> Since main thread allocation does not start incremental marking anymore
> while holding allocation_mutex_, background allocation does not need
> ParkedMutexGuard anymore to avoid deadlocks.
> 
> This also means background thread allocation isn't paused anymore to
> perform a GC, which already resulted in subtle bugs (e.g. in
> ExpandBackground with incremental marking). We also do not
> stop-the-world anymore while holding allocation_mutex_.
> 
> Bug: v8:10315
> Change-Id: Iadf00bc26434c765722b82a10497ab06151f15cc
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2289771
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#68754}

Bug: v8:10315
Change-Id: If5aec78370685369ad0f1d7a76002d45f149ddfb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2297468
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68873}
This commit is contained in:
Dominik Inführ 2020-07-14 15:08:42 +02:00 committed by Commit Bot
parent 0eaee5cab9
commit e15fcb6651

View File

@ -347,7 +347,7 @@ Page* PagedSpace::Expand() {
Page* PagedSpace::ExpandBackground(LocalHeap* local_heap) {
Page* page = AllocatePage();
if (page == nullptr) return nullptr;
ParkedMutexGuard lock(local_heap, &allocation_mutex_);
base::MutexGuard lock(&allocation_mutex_);
AddPage(page);
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
@ -577,7 +577,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
{
ParkedMutexGuard lock(local_heap, &allocation_mutex_);
base::MutexGuard lock(&allocation_mutex_);
RefillFreeList();
}
@ -598,7 +598,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
invalidated_slots_in_free_space);
{
ParkedMutexGuard lock(local_heap, &allocation_mutex_);
base::MutexGuard lock(&allocation_mutex_);
RefillFreeList();
}
@ -624,7 +624,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
collector->DrainSweepingWorklistForSpace(identity());
{
ParkedMutexGuard lock(local_heap, &allocation_mutex_);
base::MutexGuard lock(&allocation_mutex_);
RefillFreeList();
}
@ -642,7 +642,7 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
ParkedMutexGuard lock(local_heap, &allocation_mutex_);
base::MutexGuard lock(&allocation_mutex_);
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK_EQ(identity(), OLD_SPACE);