diff --git a/src/mark-compact.cc b/src/mark-compact.cc index 595d6ee3ce..81361f02aa 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -4098,39 +4098,44 @@ int MarkCompactCollector::SweepConservatively(PagedSpace* space, int MarkCompactCollector::SweepInParallel(PagedSpace* space, int required_freed_bytes) { - PageIterator it(space); - FreeList* free_list = space == heap()->old_pointer_space() - ? free_list_old_pointer_space_.get() - : free_list_old_data_space_.get(); - FreeList private_free_list(space); int max_freed = 0; int max_freed_overall = 0; + PageIterator it(space); while (it.has_next()) { Page* p = it.next(); - if (p->TryParallelSweeping()) { - if (space->swept_precisely()) { - max_freed = SweepPrecisely( - space, &private_free_list, p, NULL); - } else { - max_freed = SweepConservatively( - space, &private_free_list, p); - } - ASSERT(max_freed >= 0); - free_list->Concatenate(&private_free_list); - if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { - return max_freed; - } - max_freed_overall = Max(max_freed, max_freed_overall); + max_freed = SweepInParallel(p, space); + ASSERT(max_freed >= 0); + if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { + return max_freed; } + max_freed_overall = Max(max_freed, max_freed_overall); if (p == space->end_of_unswept_pages()) break; } return max_freed_overall; } +int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { + int max_freed = 0; + if (page->TryParallelSweeping()) { + FreeList* free_list = space == heap()->old_pointer_space() + ? free_list_old_pointer_space_.get() + : free_list_old_data_space_.get(); + FreeList private_free_list(space); + if (space->swept_precisely()) { + max_freed = SweepPrecisely( + space, &private_free_list, page, NULL); + } else { + max_freed = SweepConservatively( + space, &private_free_list, page); + } + free_list->Concatenate(&private_free_list); + } + return max_freed; +} + + void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { space->set_swept_precisely(sweeper == PRECISE || sweeper == CONCURRENT_PRECISE || diff --git a/src/mark-compact.h b/src/mark-compact.h index 1145badba6..638076f0b0 100644 --- a/src/mark-compact.h +++ b/src/mark-compact.h @@ -658,9 +658,14 @@ class MarkCompactCollector { // Concurrent and parallel sweeping support. If required_freed_bytes was set // to a value larger than 0, then sweeping returns after a block of at least // required_freed_bytes was freed. If required_freed_bytes was set to zero - // then the whole given space is swept. + // then the whole given space is swept. It returns the size of the maximum + // continuous freed memory chunk. int SweepInParallel(PagedSpace* space, int required_freed_bytes); + // Sweeps a given page concurrently to the sweeper threads. It returns the + // size of the maximum continuous freed memory chunk. + int SweepInParallel(Page* page, PagedSpace* space); + void EnsureSweepingCompleted(); // If sweeper threads are not active this method will return true. If diff --git a/src/store-buffer.cc b/src/store-buffer.cc index 3745d91a8a..d1a04d292f 100644 --- a/src/store-buffer.cc +++ b/src/store-buffer.cc @@ -505,6 +505,16 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, } } } else { + if (page->parallel_sweeping() > MemoryChunk::SWEEPING_FINALIZE) { + heap_->mark_compact_collector()->SweepInParallel(page, owner); + if (page->parallel_sweeping() > MemoryChunk::SWEEPING_FINALIZE) { + // We were not able to sweep that page, i.e., a concurrent + // sweeper thread currently owns this page. + // TODO(hpayer): This may introduce a huge pause here. We + // just care about finish sweeping of the scan on scavenge page. + heap_->mark_compact_collector()->EnsureSweepingCompleted(); + } + } FindPointersToNewSpaceInRegion( start, end, slot_callback, clear_maps); }