From 7d5969da3d2b1a9b08c6fc6517d69f8ca4aca94c Mon Sep 17 00:00:00 2001 From: mlippautz Date: Mon, 20 Jun 2016 06:19:25 -0700 Subject: [PATCH] Reland "[heap] Add page evacuation mode for new->new" Adds an evacuation mode that allows moving pages within new space without copying objects. Basic idea: a) Move page within new space b) Sweep page to make iterable and process ArrayBuffers c) Finish sweep till next scavenge Threshold is currently 70% live bytes, i.e., the same threshold we use to determine fragmented pages. This reverts commit 2263ee9bf4e5aa341cbac547add68a2105963477. BUG=chromium:581412 LOG=N CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_tsan_rel,v8_mac64_asan_rel Review-Url: https://codereview.chromium.org/2078863002 Cr-Commit-Position: refs/heads/master@{#37104} --- src/heap/heap.cc | 2 + src/heap/mark-compact.cc | 407 +++++++++++++++--------- src/heap/mark-compact.h | 6 +- src/heap/spaces.cc | 73 +++-- src/heap/spaces.h | 38 ++- test/cctest/BUILD.gn | 1 + test/cctest/cctest.gyp | 1 + test/cctest/heap/test-heap.cc | 50 --- test/cctest/heap/test-page-promotion.cc | 129 ++++++++ 9 files changed, 468 insertions(+), 239 deletions(-) create mode 100644 test/cctest/heap/test-page-promotion.cc diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 443b594b69..db55ce19f5 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -1611,6 +1611,8 @@ void Heap::Scavenge() { // Pause the inline allocation steps. PauseAllocationObserversScope pause_observers(this); + mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); + #ifdef VERIFY_HEAP if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); #endif diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 5f1c6de4a8..5fd67f2dbb 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -471,13 +471,13 @@ class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task { private: // v8::Task overrides. void Run() override { - DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); + DCHECK_GE(space_to_start_, FIRST_SPACE); DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); - const int offset = space_to_start_ - FIRST_PAGED_SPACE; - const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; + const int offset = space_to_start_ - FIRST_SPACE; + const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1; for (int i = 0; i < num_spaces; i++) { - const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); - DCHECK_GE(space_id, FIRST_PAGED_SPACE); + const int space_id = FIRST_SPACE + ((i + offset) % num_spaces); + DCHECK_GE(space_id, FIRST_SPACE); DCHECK_LE(space_id, LAST_PAGED_SPACE); sweeper_->ParallelSweepSpace(static_cast(space_id), 0); } @@ -515,9 +515,9 @@ void MarkCompactCollector::Sweeper::StartSweepingHelper( void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( Page* page) { - PagedSpace* owner = reinterpret_cast(page->owner()); if (!page->SweepingDone()) { - ParallelSweepPage(page, owner); + PagedSpace* owner = reinterpret_cast(page->owner()); + ParallelSweepPage(page, owner->identity()); if (!page->SweepingDone()) { // We were not able to sweep that page, i.e., a concurrent // sweeper thread currently owns this page. Wait for the sweeper @@ -560,12 +560,27 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() { } } - ForAllSweepingSpaces( - [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); + ForAllSweepingSpaces([this](AllocationSpace space) { + if (space == NEW_SPACE) { + swept_list_[NEW_SPACE].Clear(); + } + DCHECK(sweeping_list_[space].empty()); + }); late_pages_ = false; sweeping_in_progress_ = false; } +void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { + if (!sweeping_in_progress_) return; + if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { + NewSpacePageIterator pit(heap_->new_space()); + while (pit.has_next()) { + Page* page = pit.next(); + SweepOrWaitUntilSweepingCompleted(page); + } + } +} + void MarkCompactCollector::EnsureSweepingCompleted() { if (!sweeper().sweeping_in_progress()) return; @@ -1883,13 +1898,17 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final : public MarkCompactCollector::HeapObjectVisitor { public: explicit EvacuateNewSpacePageVisitor(Heap* heap) - : heap_(heap), promoted_size_(0) {} + : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {} - static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { - if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { - Page* new_page = Page::ConvertNewToOld(page, owner); - new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); - } + static void MoveToOldSpace(Page* page, PagedSpace* owner) { + page->Unlink(); + Page* new_page = Page::ConvertNewToOld(page, owner); + new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); + } + + static void MoveToToSpace(Page* page) { + page->heap()->new_space()->MovePageFromSpaceToSpace(page); + page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); } inline bool Visit(HeapObject* object) { @@ -1900,10 +1919,16 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final } intptr_t promoted_size() { return promoted_size_; } + intptr_t semispace_copied_size() { return semispace_copied_size_; } + + void account_semispace_copied(intptr_t copied) { + semispace_copied_size_ += copied; + } private: Heap* heap_; intptr_t promoted_size_; + intptr_t semispace_copied_size_; }; class MarkCompactCollector::EvacuateOldSpaceVisitor final @@ -3043,12 +3068,26 @@ void MarkCompactCollector::EvacuateNewSpacePrologue() { new_space->ResetAllocationInfo(); } -void MarkCompactCollector::EvacuateNewSpaceEpilogue() { - newspace_evacuation_candidates_.Rewind(0); -} - class MarkCompactCollector::Evacuator : public Malloced { public: + enum EvacuationMode { + kObjectsNewToOld, + kPageNewToOld, + kObjectsOldToOld, + kPageNewToNew, + }; + + static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { + // Note: The order of checks is important in this function. + if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) + return kPageNewToOld; + if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) + return kPageNewToNew; + if (chunk->InNewSpace()) return kObjectsNewToOld; + DCHECK(chunk->IsEvacuationCandidate()); + return kObjectsOldToOld; + } + // NewSpacePages with more live bytes than this threshold qualify for fast // evacuation. static int PageEvacuationThreshold() { @@ -3078,33 +3117,15 @@ class MarkCompactCollector::Evacuator : public Malloced { CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } private: - enum EvacuationMode { - kObjectsNewToOld, - kPageNewToOld, - kObjectsOldToOld, - }; - static const int kInitialLocalPretenuringFeedbackCapacity = 256; inline Heap* heap() { return collector_->heap(); } - inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { - // Note: The order of checks is important in this function. - if (chunk->InNewSpace()) return kObjectsNewToOld; - if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) - return kPageNewToOld; - DCHECK(chunk->IsEvacuationCandidate()); - return kObjectsOldToOld; - } - void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { duration_ += duration; bytes_compacted_ += bytes_compacted; } - template - inline bool EvacuateSinglePage(Page* p, Visitor* visitor); - MarkCompactCollector* collector_; // Locally cached collector data. @@ -3121,87 +3142,78 @@ class MarkCompactCollector::Evacuator : public Malloced { intptr_t bytes_compacted_; }; -template -bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p, - Visitor* visitor) { +bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { bool success = false; - DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || - p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); - int saved_live_bytes = p->LiveBytes(); - double evacuation_time; + DCHECK(page->SweepingDone()); + int saved_live_bytes = page->LiveBytes(); + double evacuation_time = 0.0; + Heap* heap = page->heap(); { - AlwaysAllocateScope always_allocate(heap()->isolate()); + AlwaysAllocateScope always_allocate(heap->isolate()); TimedScope timed_scope(&evacuation_time); - success = collector_->VisitLiveObjects(p, visitor, mode); + switch (ComputeEvacuationMode(page)) { + case kObjectsNewToOld: + success = collector_->VisitLiveObjects(page, &new_space_visitor_, + kClearMarkbits); + ArrayBufferTracker::ProcessBuffers( + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); + DCHECK(success); + break; + case kPageNewToOld: + success = collector_->VisitLiveObjects(page, &new_space_page_visitor, + kKeepMarking); + // ArrayBufferTracker will be updated during sweeping. + DCHECK(success); + break; + case kPageNewToNew: + new_space_page_visitor.account_semispace_copied(page->LiveBytes()); + // ArrayBufferTracker will be updated during sweeping. + success = true; + break; + case kObjectsOldToOld: + success = collector_->VisitLiveObjects(page, &old_space_visitor_, + kClearMarkbits); + if (!success) { + // Aborted compaction page. We have to record slots here, since we + // might not have recorded them in first place. + // Note: We mark the page as aborted here to be able to record slots + // for code objects in |RecordMigratedSlotVisitor|. + page->SetFlag(Page::COMPACTION_WAS_ABORTED); + EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); + success = + collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking); + ArrayBufferTracker::ProcessBuffers( + page, ArrayBufferTracker::kUpdateForwardedKeepOthers); + DCHECK(success); + // We need to return failure here to indicate that we want this page + // added to the sweeper. + success = false; + } else { + ArrayBufferTracker::ProcessBuffers( + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); + } + break; + default: + UNREACHABLE(); + } } + ReportCompactionProgress(evacuation_time, saved_live_bytes); if (FLAG_trace_evacuation) { - const char age_mark_tag = - !p->InNewSpace() - ? 'x' - : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) - ? '>' - : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' - : '#'; - PrintIsolate(heap()->isolate(), - "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " - "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", - static_cast(this), static_cast(p), - p->InNewSpace(), age_mark_tag, - p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION), - p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, - evacuation_time); - } - if (success) { - ReportCompactionProgress(evacuation_time, saved_live_bytes); + PrintIsolate(heap->isolate(), + "evacuation[%p]: page=%p new_space=%d " + "page_evacuation=%d executable=%d contains_age_mark=%d " + "live_bytes=%d time=%f\n", + static_cast(this), static_cast(page), + page->InNewSpace(), + page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || + page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), + page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), + page->Contains(heap->new_space()->age_mark()), + saved_live_bytes, evacuation_time); } return success; } -bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { - bool result = false; - DCHECK(page->SweepingDone()); - switch (ComputeEvacuationMode(page)) { - case kObjectsNewToOld: - result = EvacuateSinglePage(page, &new_space_visitor_); - ArrayBufferTracker::ProcessBuffers( - page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); - DCHECK(result); - USE(result); - break; - case kPageNewToOld: - result = EvacuateSinglePage(page, &new_space_page_visitor); - // ArrayBufferTracker will be updated during sweeping. - DCHECK(result); - USE(result); - break; - case kObjectsOldToOld: - result = EvacuateSinglePage(page, &old_space_visitor_); - if (!result) { - // Aborted compaction page. We have to record slots here, since we might - // not have recorded them in first place. - // Note: We mark the page as aborted here to be able to record slots - // for code objects in |RecordMigratedSlotVisitor|. - page->SetFlag(Page::COMPACTION_WAS_ABORTED); - EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); - result = EvacuateSinglePage(page, &record_visitor); - ArrayBufferTracker::ProcessBuffers( - page, ArrayBufferTracker::kUpdateForwardedKeepOthers); - DCHECK(result); - USE(result); - // We need to return failure here to indicate that we want this page - // added to the sweeper. - return false; - } - ArrayBufferTracker::ProcessBuffers( - page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); - - break; - default: - UNREACHABLE(); - } - return result; -} - void MarkCompactCollector::Evacuator::Finalize() { heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); heap()->code_space()->MergeCompactionSpace( @@ -3210,11 +3222,13 @@ void MarkCompactCollector::Evacuator::Finalize() { heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + new_space_page_visitor.promoted_size()); heap()->IncrementSemiSpaceCopiedObjectSize( - new_space_visitor_.semispace_copied_size()); + new_space_visitor_.semispace_copied_size() + + new_space_page_visitor.semispace_copied_size()); heap()->IncrementYoungSurvivorsCounter( new_space_visitor_.promoted_size() + new_space_visitor_.semispace_copied_size() + - new_space_page_visitor.promoted_size()); + new_space_page_visitor.promoted_size() + + new_space_page_visitor.semispace_copied_size()); heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); } @@ -3262,30 +3276,33 @@ class EvacuationJobTraits { static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, bool success, PerPageData data) { - if (chunk->InNewSpace()) { - DCHECK(success); - } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { - DCHECK(success); - Page* p = static_cast(chunk); - p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); - p->ForAllFreeListCategories( - [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); - heap->mark_compact_collector()->sweeper().AddLatePage( - p->owner()->identity(), p); - } else { - Page* p = static_cast(chunk); - if (success) { - DCHECK(p->IsEvacuationCandidate()); - DCHECK(p->SweepingDone()); - p->Unlink(); - } else { - // We have partially compacted the page, i.e., some objects may have - // moved, others are still in place. - p->ClearEvacuationCandidate(); - // Slots have already been recorded so we just need to add it to the - // sweeper. - *data += 1; - } + using Evacuator = MarkCompactCollector::Evacuator; + Page* p = static_cast(chunk); + switch (Evacuator::ComputeEvacuationMode(p)) { + case Evacuator::kPageNewToOld: + break; + case Evacuator::kPageNewToNew: + DCHECK(success); + break; + case Evacuator::kObjectsNewToOld: + DCHECK(success); + break; + case Evacuator::kObjectsOldToOld: + if (success) { + DCHECK(p->IsEvacuationCandidate()); + DCHECK(p->SweepingDone()); + p->Unlink(); + } else { + // We have partially compacted the page, i.e., some objects may have + // moved, others are still in place. + p->ClearEvacuationCandidate(); + // Slots have already been recorded so we just need to add it to the + // sweeper, which will happen after updating pointers. + *data += 1; + } + break; + default: + UNREACHABLE(); } } }; @@ -3307,10 +3324,14 @@ void MarkCompactCollector::EvacuatePagesInParallel() { live_bytes += page->LiveBytes(); if (!page->NeverEvacuate() && (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && - page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && !page->Contains(age_mark)) { - EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); + if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { + EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); + } else { + EvacuateNewSpacePageVisitor::MoveToToSpace(page); + } } + job.AddPage(page, &abandoned_pages); } DCHECK_GE(job.NumberOfPages(), 1); @@ -3364,13 +3385,14 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer { template int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v) { DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); - DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, - space->identity() == CODE_SPACE); + DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) || + (skip_list_mode == REBUILD_SKIP_LIST)); DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); @@ -3403,8 +3425,13 @@ int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, if (free_space_mode == ZAP_FREE_SPACE) { memset(free_start, 0xcc, size); } - freed_bytes = space->UnaccountedFree(free_start, size); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); + if (free_list_mode == REBUILD_FREE_LIST) { + freed_bytes = space->UnaccountedFree(free_start, size); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + } else { + p->heap()->CreateFillerObjectAt(free_start, size, + ClearRecordedSlots::kNo); + } } Map* map = object->synchronized_map(); int size = object->SizeFromMap(map); @@ -3431,10 +3458,16 @@ int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, if (free_space_mode == ZAP_FREE_SPACE) { memset(free_start, 0xcc, size); } - freed_bytes = space->UnaccountedFree(free_start, size); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); + if (free_list_mode == REBUILD_FREE_LIST) { + freed_bytes = space->UnaccountedFree(free_start, size); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + } else { + p->heap()->CreateFillerObjectAt(free_start, size, + ClearRecordedSlots::kNo); + } } p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); + if (free_list_mode == IGNORE_FREE_LIST) return 0; return FreeList::GuaranteedAllocatable(static_cast(max_freed_bytes)); } @@ -3550,12 +3583,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { EvacuateNewSpacePrologue(); EvacuatePagesInParallel(); - EvacuateNewSpaceEpilogue(); heap()->new_space()->set_age_mark(heap()->new_space()->top()); } UpdatePointersAfterEvacuation(); + if (!heap()->new_space()->Rebalance()) { + FatalProcessOutOfMemory("NewSpace::Rebalance"); + } + // Give pages that are queued to be freed back to the OS. Note that filtering // slots only handles old space (for unboxed doubles), and thus map space can // still contain stale pointers. We only free the chunks after pointer updates @@ -3565,6 +3601,19 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); + for (Page* p : newspace_evacuation_candidates_) { + if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { + p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); + sweeper().AddLatePage(p->owner()->identity(), p); + } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { + p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); + p->ForAllFreeListCategories( + [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); + sweeper().AddLatePage(p->owner()->identity(), p); + } + } + newspace_evacuation_candidates_.Rewind(0); + for (Page* p : evacuation_candidates_) { // Important: skip list should be cleared only after roots were updated // because root iteration traverses the stack and might have to find @@ -3659,6 +3708,15 @@ class PointerUpdateJobTraits { if (heap->InToSpace(*slot)) { return KEEP_SLOT; } + } else if (heap->InToSpace(*slot)) { + DCHECK(Page::FromAddress(reinterpret_cast(*slot)->address()) + ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)); + // Slots can be in "to" space after a page has been moved. Since there is + // no forwarding information present we need to check the markbits to + // determine liveness. + if (Marking::IsBlack( + Marking::MarkBitFrom(reinterpret_cast(*slot)))) + return KEEP_SLOT; } else { DCHECK(!heap->InNewSpace(*slot)); } @@ -3691,6 +3749,24 @@ class ToSpacePointerUpdateJobTraits { static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, MemoryChunk* chunk, PerPageData limits) { + if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { + // New->new promoted pages contain garbage so they require iteration + // using markbits. + ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); + } else { + ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); + } + return true; + } + + static const bool NeedSequentialFinalization = false; + static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { + } + + private: + static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, + MemoryChunk* chunk, + PerPageData limits) { for (Address cur = limits.first; cur < limits.second;) { HeapObject* object = HeapObject::FromAddress(cur); Map* map = object->map(); @@ -3698,10 +3774,18 @@ class ToSpacePointerUpdateJobTraits { object->IterateBody(map->instance_type(), size, visitor); cur += size; } - return true; } - static const bool NeedSequentialFinalization = false; - static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { + + static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, + MemoryChunk* chunk, + PerPageData limits) { + LiveObjectIterator it(chunk); + HeapObject* object = NULL; + while ((object = it.Next()) != NULL) { + Map* map = object->map(); + int size = object->SizeFromMap(map); + object->IterateBody(map->instance_type(), size, visitor); + } } }; @@ -3777,7 +3861,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, int pages_freed = 0; Page* page = nullptr; while ((page = GetSweepingPageSafe(identity)) != nullptr) { - int freed = ParallelSweepPage(page, heap_->paged_space(identity)); + int freed = ParallelSweepPage(page, identity); pages_freed += 1; DCHECK_GE(freed, 0); max_freed = Max(max_freed, freed); @@ -3789,7 +3873,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, } int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, - PagedSpace* space) { + AllocationSpace identity) { int max_freed = 0; if (page->mutex()->TryLock()) { // If this page was already swept in the meantime, we can return here. @@ -3798,19 +3882,25 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, return 0; } page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); - if (space->identity() == OLD_SPACE) { + if (identity == NEW_SPACE) { + RawSweep(nullptr, page, nullptr); + } else if (identity == OLD_SPACE) { max_freed = RawSweep(space, page, NULL); - } else if (space->identity() == CODE_SPACE) { + REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( + heap_->paged_space(identity), page, nullptr); + } else if (identity == CODE_SPACE) { max_freed = RawSweep(space, page, NULL); + REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( + heap_->paged_space(identity), page, nullptr); } else { max_freed = RawSweep(space, page, NULL); + REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( + heap_->paged_space(identity), page, nullptr); } { base::LockGuard guard(&mutex_); - swept_list_[space->identity()].Add(page); + swept_list_[identity].Add(page); } page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); page->mutex()->Unlock(); @@ -3836,7 +3926,8 @@ void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) { page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); int to_sweep = page->area_size() - page->LiveBytes(); - heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); + if (space != NEW_SPACE) + heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); } Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( @@ -3900,8 +3991,8 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { // testing this is fine. p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); Sweeper::RawSweep( - space, p, nullptr); + Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST, + Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); continue; } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 0e5d860e77..07b289e2ba 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -408,6 +408,7 @@ class MarkCompactCollector { enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS }; enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST }; + enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST }; enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE }; enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; @@ -416,6 +417,7 @@ class MarkCompactCollector { template static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v); @@ -434,11 +436,12 @@ class MarkCompactCollector { int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, int max_pages = 0); - int ParallelSweepPage(Page* page, PagedSpace* space); + int ParallelSweepPage(Page* page, AllocationSpace identity); void StartSweeping(); void StartSweepingHelper(AllocationSpace space_to_start); void EnsureCompleted(); + void EnsureNewSpaceCompleted(); bool IsSweepingCompleted(); void SweepOrWaitUntilSweepingCompleted(Page* page); @@ -791,7 +794,6 @@ class MarkCompactCollector { void SweepSpaces(); void EvacuateNewSpacePrologue(); - void EvacuateNewSpaceEpilogue(); void EvacuatePagesInParallel(); diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index a0eae92d69..c8b8baa0fe 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -1386,7 +1386,6 @@ void NewSpace::TearDown() { from_space_.TearDown(); } - void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } @@ -1432,6 +1431,48 @@ void NewSpace::Shrink() { DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } +bool NewSpace::Rebalance() { + CHECK(heap()->promotion_queue()->is_empty()); + // Order here is important to make use of the page pool. + return to_space_.EnsureCurrentCapacity() && + from_space_.EnsureCurrentCapacity(); +} + +bool SemiSpace::EnsureCurrentCapacity() { + if (is_committed()) { + const int expected_pages = current_capacity_ / Page::kPageSize; + int actual_pages = 0; + Page* current_page = anchor()->next_page(); + while (current_page != anchor()) { + actual_pages++; + current_page = current_page->next_page(); + if (actual_pages > expected_pages) { + Page* to_remove = current_page->prev_page(); + // Make sure we don't overtake the actual top pointer. + CHECK_NE(to_remove, current_page_); + to_remove->Unlink(); + heap()->memory_allocator()->Free( + to_remove); + } + } + while (actual_pages < expected_pages) { + actual_pages++; + current_page = + heap()->memory_allocator()->AllocatePage( + Page::kAllocatableMemory, this, executable()); + if (current_page == nullptr) return false; + DCHECK_NOT_NULL(current_page); + current_page->InsertAfter(anchor()); + Bitmap::Clear(current_page); + current_page->SetFlags(anchor()->prev_page()->GetFlags(), + Page::kCopyAllFlags); + heap()->CreateFillerObjectAt(current_page->area_start(), + current_page->area_size(), + ClearRecordedSlots::kNo); + } + } + return true; +} void LocalAllocationBuffer::Close() { if (IsValid()) { @@ -1488,7 +1529,6 @@ void NewSpace::ResetAllocationInfo() { Address old_top = allocation_info_.top(); to_space_.Reset(); UpdateAllocationInfo(); - pages_used_ = 0; // Clear all mark-bits in the to-space. NewSpacePageIterator it(&to_space_); while (it.has_next()) { @@ -1534,7 +1574,6 @@ bool NewSpace::AddFreshPage() { int remaining_in_page = static_cast(limit - top); heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo); - pages_used_++; UpdateAllocationInfo(); return true; @@ -1872,23 +1911,21 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { void SemiSpace::Reset() { DCHECK_NE(anchor_.next_page(), &anchor_); current_page_ = anchor_.next_page(); + pages_used_ = 0; } -bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) { - // TODO(mlippautz): We do not have to get a new page here when the semispace - // is uncommitted later on. - Page* new_page = heap()->memory_allocator()->AllocatePage( - Page::kAllocatableMemory, this, executable()); - if (new_page == nullptr) return false; - Bitmap::Clear(new_page); - new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags); - new_page->set_next_page(old_page->next_page()); - new_page->set_prev_page(old_page->prev_page()); - old_page->next_page()->set_prev_page(new_page); - old_page->prev_page()->set_next_page(new_page); - heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(), - ClearRecordedSlots::kNo); - return true; +void SemiSpace::RemovePage(Page* page) { + if (current_page_ == page) { + current_page_ = page->prev_page(); + } + page->Unlink(); +} + +void SemiSpace::PrependPage(Page* page) { + page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags); + page->set_owner(this); + page->InsertAfter(anchor()); + pages_used_++; } void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 0981c3650e..29a0b66d9d 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -425,6 +425,10 @@ class MemoryChunk { // from new to old space during evacuation. PAGE_NEW_OLD_PROMOTION, + // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved + // within the new space during evacuation. + PAGE_NEW_NEW_PROMOTION, + // A black page has all mark bits set to 1 (black). A black page currently // cannot be iterated because it is not swept. Moreover live bytes are also // not updated. @@ -2408,7 +2412,8 @@ class SemiSpace : public Space { committed_(false), id_(semispace), anchor_(this), - current_page_(nullptr) {} + current_page_(nullptr), + pages_used_(0) {} inline bool Contains(HeapObject* o); inline bool Contains(Object* o); @@ -2431,6 +2436,8 @@ class SemiSpace : public Space { // than the current capacity. bool ShrinkTo(int new_capacity); + bool EnsureCurrentCapacity(); + // Returns the start address of the first page of the space. Address space_start() { DCHECK_NE(anchor_.next_page(), anchor()); @@ -2439,6 +2446,7 @@ class SemiSpace : public Space { Page* first_page() { return anchor_.next_page(); } Page* current_page() { return current_page_; } + int pages_used() { return pages_used_; } // Returns one past the end address of the space. Address space_end() { return anchor_.prev_page()->area_end(); } @@ -2451,15 +2459,19 @@ class SemiSpace : public Space { bool AdvancePage() { Page* next_page = current_page_->next_page(); - if (next_page == anchor()) return false; + if (next_page == anchor() || pages_used_ == max_pages()) { + return false; + } current_page_ = next_page; + pages_used_++; return true; } // Resets the space to using the first page. void Reset(); - bool ReplaceWithEmptyPage(Page* page); + void RemovePage(Page* page); + void PrependPage(Page* page); // Age mark accessors. Address age_mark() { return age_mark_; } @@ -2513,6 +2525,7 @@ class SemiSpace : public Space { void RewindPages(Page* start, int num_pages); inline Page* anchor() { return &anchor_; } + inline int max_pages() { return current_capacity_ / Page::kPageSize; } // Copies the flags into the masked positions on all pages in the space. void FixPagesFlags(intptr_t flags, intptr_t flag_mask); @@ -2520,7 +2533,8 @@ class SemiSpace : public Space { // The currently committed space capacity. int current_capacity_; - // The maximum capacity that can be used by this space. + // The maximum capacity that can be used by this space. A space cannot grow + // beyond that size. int maximum_capacity_; // The minimum capacity for the space. A space cannot shrink below this size. @@ -2534,9 +2548,11 @@ class SemiSpace : public Space { Page anchor_; Page* current_page_; + int pages_used_; - friend class SemiSpaceIterator; + friend class NewSpace; friend class NewSpacePageIterator; + friend class SemiSpaceIterator; }; @@ -2606,7 +2622,6 @@ class NewSpace : public Space { to_space_(heap, kToSpace), from_space_(heap, kFromSpace), reservation_(), - pages_used_(0), top_on_previous_step_(0), allocated_histogram_(nullptr), promoted_histogram_(nullptr) {} @@ -2638,7 +2653,7 @@ class NewSpace : public Space { // Return the allocated bytes in the active semispace. intptr_t Size() override { - return pages_used_ * Page::kAllocatableMemory + + return to_space_.pages_used() * Page::kAllocatableMemory + static_cast(top() - to_space_.page_low()); } @@ -2715,12 +2730,14 @@ class NewSpace : public Space { return static_cast(allocated); } - bool ReplaceWithEmptyPage(Page* page) { - // This method is called after flipping the semispace. + void MovePageFromSpaceToSpace(Page* page) { DCHECK(page->InFromSpace()); - return from_space_.ReplaceWithEmptyPage(page); + from_space_.RemovePage(page); + to_space_.PrependPage(page); } + bool Rebalance(); + // Return the maximum capacity of a semispace. int MaximumCapacity() { DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity()); @@ -2873,7 +2890,6 @@ class NewSpace : public Space { SemiSpace to_space_; SemiSpace from_space_; base::VirtualMemory reservation_; - int pages_used_; // Allocation pointer and limit for normal allocation and allocation during // mark-compact collection. diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn index 40ddba9452..b232bc7113 100644 --- a/test/cctest/BUILD.gn +++ b/test/cctest/BUILD.gn @@ -70,6 +70,7 @@ v8_executable("cctest") { "heap/test-incremental-marking.cc", "heap/test-lab.cc", "heap/test-mark-compact.cc", + "heap/test-page-promotion.cc", "heap/test-spaces.cc", "interpreter/bytecode-expectations-printer.cc", "interpreter/bytecode-expectations-printer.h", diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index 0982fc2e1e..9c9afdf955 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -114,6 +114,7 @@ 'heap/test-incremental-marking.cc', 'heap/test-lab.cc', 'heap/test-mark-compact.cc', + 'heap/test-page-promotion.cc', 'heap/test-spaces.cc', 'libsampler/test-sampler.cc', 'print-extension.cc', diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc index 8fbe9963b1..725a8ddfdd 100644 --- a/test/cctest/heap/test-heap.cc +++ b/test/cctest/heap/test-heap.cc @@ -6561,56 +6561,6 @@ HEAP_TEST(Regress589413) { heap->CollectGarbage(OLD_SPACE); } -UNINITIALIZED_TEST(PagePromotion) { - FLAG_page_promotion = true; - FLAG_page_promotion_threshold = 0; // % - i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB); - // We cannot optimize for size as we require a new space with more than one - // page. - i::FLAG_optimize_for_size = false; - // Set max_semi_space_size because it could've been initialized by an - // implication of optimize_for_size. - i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size; - v8::Isolate::CreateParams create_params; - create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); - v8::Isolate* isolate = v8::Isolate::New(create_params); - i::Isolate* i_isolate = reinterpret_cast(isolate); - { - v8::Isolate::Scope isolate_scope(isolate); - v8::HandleScope handle_scope(isolate); - v8::Context::New(isolate)->Enter(); - Heap* heap = i_isolate->heap(); - - // Clean up any left over objects from cctest initialization. - heap->CollectAllGarbage(); - heap->CollectAllGarbage(); - - std::vector> handles; - heap::SimulateFullSpace(heap->new_space(), &handles); - heap->CollectGarbage(NEW_SPACE); - CHECK_GT(handles.size(), 0u); - // First object in handle should be on the first page. - Handle first_object = handles.front(); - Page* first_page = Page::FromAddress(first_object->address()); - // The age mark should not be on the first page. - CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark())); - // To perform a sanity check on live bytes we need to mark the heap. - heap::SimulateIncrementalMarking(heap, true); - // Sanity check that the page meets the requirements for promotion. - const int threshold_bytes = - FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; - CHECK_GE(first_page->LiveBytes(), threshold_bytes); - - // Actual checks: The page is in new space first, but is moved to old space - // during a full GC. - CHECK(heap->new_space()->ContainsSlow(first_page->address())); - CHECK(!heap->old_space()->ContainsSlow(first_page->address())); - heap->CollectGarbage(OLD_SPACE); - CHECK(!heap->new_space()->ContainsSlow(first_page->address())); - CHECK(heap->old_space()->ContainsSlow(first_page->address())); - } -} - TEST(Regress598319) { // This test ensures that no white objects can cross the progress bar of large // objects during incremental marking. It checks this by using Shift() during diff --git a/test/cctest/heap/test-page-promotion.cc b/test/cctest/heap/test-page-promotion.cc new file mode 100644 index 0000000000..4ec2e2a416 --- /dev/null +++ b/test/cctest/heap/test-page-promotion.cc @@ -0,0 +1,129 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/array-buffer-tracker.h" +#include "test/cctest/cctest.h" +#include "test/cctest/heap/heap-utils.h" + +namespace { + +v8::Isolate* NewIsolateForPagePromotion() { + i::FLAG_page_promotion = true; + i::FLAG_page_promotion_threshold = 0; // % + i::FLAG_min_semi_space_size = 8 * (i::Page::kPageSize / i::MB); + // We cannot optimize for size as we require a new space with more than one + // page. + i::FLAG_optimize_for_size = false; + // Set max_semi_space_size because it could've been initialized by an + // implication of optimize_for_size. + i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size; + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + return isolate; +} + +} // namespace + +namespace v8 { +namespace internal { + +UNINITIALIZED_TEST(PagePromotion_NewToOld) { + v8::Isolate* isolate = NewIsolateForPagePromotion(); + i::Isolate* i_isolate = reinterpret_cast(isolate); + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + v8::Context::New(isolate)->Enter(); + Heap* heap = i_isolate->heap(); + + std::vector> handles; + heap::SimulateFullSpace(heap->new_space(), &handles); + heap->CollectGarbage(NEW_SPACE); + CHECK_GT(handles.size(), 0u); + // First object in handle should be on the first page. + Handle first_object = handles.front(); + Page* first_page = Page::FromAddress(first_object->address()); + // To perform a sanity check on live bytes we need to mark the heap. + heap::SimulateIncrementalMarking(heap, true); + // Sanity check that the page meets the requirements for promotion. + const int threshold_bytes = + FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; + CHECK_GE(first_page->LiveBytes(), threshold_bytes); + + // Actual checks: The page is in new space first, but is moved to old space + // during a full GC. + CHECK(heap->new_space()->ContainsSlow(first_page->address())); + CHECK(!heap->old_space()->ContainsSlow(first_page->address())); + heap::GcAndSweep(heap, OLD_SPACE); + CHECK(!heap->new_space()->ContainsSlow(first_page->address())); + CHECK(heap->old_space()->ContainsSlow(first_page->address())); + } +} + +UNINITIALIZED_TEST(PagePromotion_NewToNew) { + v8::Isolate* isolate = NewIsolateForPagePromotion(); + Isolate* i_isolate = reinterpret_cast(isolate); + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + v8::Context::New(isolate)->Enter(); + Heap* heap = i_isolate->heap(); + + std::vector> handles; + heap::SimulateFullSpace(heap->new_space(), &handles); + CHECK_GT(handles.size(), 0u); + // Last object in handles should definitely be on the last page which does + // not contain the age mark. + Handle last_object = handles.back(); + Page* to_be_promoted_page = Page::FromAddress(last_object->address()); + CHECK(to_be_promoted_page->Contains(last_object->address())); + CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address())); + heap::GcAndSweep(heap, OLD_SPACE); + CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address())); + CHECK(to_be_promoted_page->Contains(last_object->address())); + } +} + +UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) { + // Test makes sure JSArrayBuffer backing stores are still tracked after + // new-to-new promotion. + v8::Isolate* isolate = NewIsolateForPagePromotion(); + Isolate* i_isolate = reinterpret_cast(isolate); + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + v8::Context::New(isolate)->Enter(); + Heap* heap = i_isolate->heap(); + + // Fill the current page which potentially contains the age mark. + heap::FillCurrentPage(heap->new_space()); + + // Allocate a buffer we would like to check against. + Handle buffer = + i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); + JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100); + std::vector> handles; + // Simulate a full space, filling the interesting page with live objects. + heap::SimulateFullSpace(heap->new_space(), &handles); + CHECK_GT(handles.size(), 0u); + // Last object in handles should definitely be on the last page which does + // not contain the age mark. + Handle first_object = handles.front(); + Page* to_be_promoted_page = Page::FromAddress(first_object->address()); + CHECK(to_be_promoted_page->Contains(first_object->address())); + CHECK(to_be_promoted_page->Contains(buffer->address())); + CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address())); + CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address())); + heap::GcAndSweep(heap, OLD_SPACE); + CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address())); + CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address())); + CHECK(to_be_promoted_page->Contains(first_object->address())); + CHECK(to_be_promoted_page->Contains(buffer->address())); + CHECK(ArrayBufferTracker::IsTracked(*buffer)); + } +} + +} // namespace internal +} // namespace v8