[heap] Ensure unmapper task doesn't start during GC
There is now only one invocation left of MemoryAllocator::Unmapper::FreeQueuedChunks in the GC epilogue. Bug: chromium:1329064, chromium:1327132 Change-Id: Icc21ada4c5a8a9505ed6435ef1f62fe48b2dbb52 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3667079 Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#80745}
This commit is contained in:
parent
d21b37d3f2
commit
8e47a2c603
@ -1458,6 +1458,14 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
|
||||
ReduceNewSpaceSize();
|
||||
}
|
||||
|
||||
// Ensure that unmapper task isn't running during full GC. We need access to
|
||||
// those pages for accessing page flags when processing old-to-new slots.
|
||||
DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR,
|
||||
!memory_allocator()->unmapper()->IsRunning());
|
||||
|
||||
// Start concurrent unmapper tasks to free pages queued during GC.
|
||||
memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
|
||||
// Remove CollectionRequested flag from main thread state, as the collection
|
||||
// was just performed.
|
||||
safepoint()->AssertActive();
|
||||
|
@ -522,14 +522,12 @@ void NewLargeObjectSpace::FreeDeadObjects(
|
||||
const std::function<bool(HeapObject)>& is_dead) {
|
||||
bool is_marking = heap()->incremental_marking()->IsMarking();
|
||||
size_t surviving_object_size = 0;
|
||||
bool freed_pages = false;
|
||||
PtrComprCageBase cage_base(heap()->isolate());
|
||||
for (auto it = begin(); it != end();) {
|
||||
LargePage* page = *it;
|
||||
it++;
|
||||
HeapObject object = page->GetObject();
|
||||
if (is_dead(object)) {
|
||||
freed_pages = true;
|
||||
RemovePage(page);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
|
||||
page);
|
||||
@ -543,9 +541,6 @@ void NewLargeObjectSpace::FreeDeadObjects(
|
||||
// Right-trimming does not update the objects_size_ counter. We are lazily
|
||||
// updating it after every GC.
|
||||
objects_size_ = surviving_object_size;
|
||||
if (freed_pages) {
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
}
|
||||
}
|
||||
|
||||
void NewLargeObjectSpace::SetCapacity(size_t capacity) {
|
||||
|
@ -1107,11 +1107,10 @@ void MarkCompactCollector::Finish() {
|
||||
|
||||
sweeper()->StartSweeperTasks();
|
||||
|
||||
// Give pages that are queued to be freed back to the OS. Ensure unmapper
|
||||
// tasks are stopped such that queued pages aren't freed before this point. We
|
||||
// still need all pages to be accessible for the "update pointers" phase.
|
||||
// Ensure unmapper tasks are stopped such that queued pages aren't freed
|
||||
// before this point. We still need all pages to be accessible for the "update
|
||||
// pointers" phase.
|
||||
DCHECK(!heap_->memory_allocator()->unmapper()->IsRunning());
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
|
||||
// Shrink pages if possible after processing and filtering slots.
|
||||
ShrinkPagesToObjectSizes(heap(), heap()->lo_space());
|
||||
@ -5777,8 +5776,6 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
|
||||
void MinorMarkCompactCollector::EvacuateEpilogue() {
|
||||
SemiSpaceNewSpace::From(heap()->new_space())
|
||||
->set_age_mark(heap()->new_space()->top());
|
||||
// Give pages that are queued to be freed back to the OS.
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
}
|
||||
|
||||
int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
|
||||
|
@ -104,6 +104,8 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
|
||||
};
|
||||
|
||||
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
|
||||
if (NumberOfChunks() == 0) return;
|
||||
|
||||
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
|
||||
if (job_handle_ && job_handle_->IsValid()) {
|
||||
job_handle_->NotifyConcurrencyIncrease();
|
||||
|
@ -161,7 +161,6 @@ void SemiSpace::Uncommit() {
|
||||
DCHECK_EQ(CommittedMemory(), removed_page_size);
|
||||
DCHECK_EQ(CommittedPhysicalMemory(), 0);
|
||||
AccountUncommitted(removed_page_size);
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
DCHECK(!IsCommitted());
|
||||
}
|
||||
|
||||
@ -225,7 +224,6 @@ void SemiSpace::ShrinkTo(size_t new_capacity) {
|
||||
int delta_pages = static_cast<int>(delta / Page::kPageSize);
|
||||
RewindPages(delta_pages);
|
||||
AccountUncommitted(delta);
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
}
|
||||
target_capacity_ = new_capacity;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user