diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 1a28415e7c..6103db2077 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -157,7 +157,8 @@ Heap::Heap() heap_iterator_depth_(0), embedder_heap_tracer_(nullptr), embedder_reference_reporter_(new TracePossibleWrapperReporter(this)), - force_oom_(false) { + force_oom_(false), + delay_sweeper_tasks_for_testing_(false) { // Allow build-time customization of the max semispace size. Building // V8 with snapshots and a non-default max semispace size is much // easier if you can define it as part of the build environment. diff --git a/src/heap/heap.h b/src/heap/heap.h index eee7d28a69..30949bb64a 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -2317,6 +2317,7 @@ class Heap { // Used for testing purposes. bool force_oom_; + bool delay_sweeper_tasks_for_testing_; // Classes in "heap" can be friends. friend class AlwaysAllocateScope; diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc index e5d1e8264a..185cb1b537 100644 --- a/src/heap/incremental-marking.cc +++ b/src/heap/incremental-marking.cc @@ -1049,7 +1049,7 @@ void IncrementalMarking::FinalizeSweeping() { DCHECK(state_ == SWEEPING); if (heap_->mark_compact_collector()->sweeping_in_progress() && (!FLAG_concurrent_sweeping || - heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) { + !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) { heap_->mark_compact_collector()->EnsureSweepingCompleted(); } if (!heap_->mark_compact_collector()->sweeping_in_progress()) { diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 31107f06fb..8b3d455824 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -317,7 +317,7 @@ void MarkCompactCollector::CollectGarbage() { } #endif - SweepSpaces(); + StartSweepSpaces(); EvacuateNewSpaceAndCandidates(); @@ -447,20 +447,18 @@ void MarkCompactCollector::Sweeper::StartSweeping() { std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); }); - if (FLAG_concurrent_sweeping) { - ForAllSweepingSpaces([this](AllocationSpace space) { - if (space == NEW_SPACE) return; - StartSweepingHelper(space); - }); - } } -void MarkCompactCollector::Sweeper::StartSweepingHelper( - AllocationSpace space_to_start) { - num_sweeping_tasks_.Increment(1); - V8::GetCurrentPlatform()->CallOnBackgroundThread( - new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), - v8::Platform::kShortRunningTask); +void MarkCompactCollector::Sweeper::StartSweeperTasks() { + if (FLAG_concurrent_sweeping && sweeping_in_progress_) { + ForAllSweepingSpaces([this](AllocationSpace space) { + if (space == NEW_SPACE) return; + num_sweeping_tasks_.Increment(1); + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space), + v8::Platform::kShortRunningTask); + }); + } } void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( @@ -477,7 +475,8 @@ void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( } void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { - if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) { + if (FLAG_concurrent_sweeping && + !sweeper().IsSweepingCompleted(space->identity())) { sweeper().ParallelSweepSpace(space->identity(), 0); space->RefillFreeList(); } @@ -497,10 +496,11 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() { // If sweeping is not completed or not running at all, we try to complete it // here. - if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { - ForAllSweepingSpaces( - [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); - } + ForAllSweepingSpaces([this](AllocationSpace space) { + if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) { + ParallelSweepSpace(space, 0); + } + }); if (FLAG_concurrent_sweeping) { while (num_sweeping_tasks_.Value() > 0) { @@ -515,13 +515,12 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() { } DCHECK(sweeping_list_[space].empty()); }); - late_pages_ = false; sweeping_in_progress_ = false; } void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { if (!sweeping_in_progress_) return; - if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { + if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) { for (Page* p : *heap_->new_space()) { SweepOrWaitUntilSweepingCompleted(p); } @@ -543,13 +542,20 @@ void MarkCompactCollector::EnsureSweepingCompleted() { #endif } -bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { +bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() { DCHECK(FLAG_concurrent_sweeping); while (pending_sweeper_tasks_semaphore_.WaitFor( base::TimeDelta::FromSeconds(0))) { num_sweeping_tasks_.Increment(-1); } - return num_sweeping_tasks_.Value() == 0; + return num_sweeping_tasks_.Value() != 0; +} + +bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) { + DCHECK(FLAG_concurrent_sweeping); + if (AreSweeperTasksRunning()) return false; + base::LockGuard guard(&mutex_); + return sweeping_list_[space].empty(); } const char* AllocationSpaceName(AllocationSpace space) { @@ -833,10 +839,8 @@ void MarkCompactCollector::Prepare() { void MarkCompactCollector::Finish() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); - if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) { - // If we added some more pages during MC, we need to start at least one - // more task as all other tasks might already be finished. - sweeper().StartSweepingHelper(OLD_SPACE); + if (!heap()->delay_sweeper_tasks_for_testing_) { + sweeper().StartSweeperTasks(); } // The hashing of weak_object_to_code_table is no longer valid. @@ -3032,17 +3036,15 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, // // The number of parallel compaction tasks is limited by: // - #evacuation pages - // - (#cores - 1) + // - #cores const double kTargetCompactionTimeInMs = .5; - const int kNumSweepingTasks = 3; double compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); const int available_cores = Max( 1, static_cast( - V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) - - kNumSweepingTasks - 1); + V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); int tasks; if (compaction_speed > 0) { tasks = 1 + static_cast(live_bytes / compaction_speed / @@ -3429,12 +3431,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { for (Page* p : newspace_evacuation_candidates_) { if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); - sweeper().AddLatePage(p->owner()->identity(), p); + sweeper().AddPage(p->owner()->identity(), p); } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ForAllFreeListCategories( [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); - sweeper().AddLatePage(p->owner()->identity(), p); + sweeper().AddPage(p->owner()->identity(), p); } } newspace_evacuation_candidates_.Rewind(0); @@ -3446,7 +3448,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { SkipList* list = p->skip_list(); if (list != NULL) list->Clear(); if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { - sweeper().AddLatePage(p->owner()->identity(), p); + sweeper().AddPage(p->owner()->identity(), p); p->ClearFlag(Page::COMPACTION_WAS_ABORTED); } } @@ -3745,19 +3747,11 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, } void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { - DCHECK(!sweeping_in_progress_); + DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning()); PrepareToBeSweptPage(space, page); sweeping_list_[space].push_back(page); } -void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, - Page* page) { - DCHECK(sweeping_in_progress_); - PrepareToBeSweptPage(space, page); - late_pages_ = true; - AddSweepingPageSafe(space, page); -} - void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) { page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); @@ -3837,7 +3831,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { } } -void MarkCompactCollector::SweepSpaces() { +void MarkCompactCollector::StartSweepSpaces() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); #ifdef DEBUG state_ = SWEEP_SPACES; diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index a5d4b0e642..7203ad05c0 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -316,24 +316,25 @@ class MarkCompactCollector { : heap_(heap), pending_sweeper_tasks_semaphore_(0), sweeping_in_progress_(false), - late_pages_(false), num_sweeping_tasks_(0) {} bool sweeping_in_progress() { return sweeping_in_progress_; } - bool contains_late_pages() { return late_pages_; } void AddPage(AllocationSpace space, Page* page); - void AddLatePage(AllocationSpace space, Page* page); int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, int max_pages = 0); int ParallelSweepPage(Page* page, AllocationSpace identity); + // After calling this function sweeping is considered to be in progress + // and the main thread can sweep lazily, but the background sweeper tasks + // are not running yet. void StartSweeping(); - void StartSweepingHelper(AllocationSpace space_to_start); + void StartSweeperTasks(); void EnsureCompleted(); void EnsureNewSpaceCompleted(); - bool IsSweepingCompleted(); + bool AreSweeperTasksRunning(); + bool IsSweepingCompleted(AllocationSpace space); void SweepOrWaitUntilSweepingCompleted(Page* page); void AddSweptPageSafe(PagedSpace* space, Page* page); @@ -362,7 +363,6 @@ class MarkCompactCollector { SweptList swept_list_[kAllocationSpaces]; SweepingList sweeping_list_[kAllocationSpaces]; bool sweeping_in_progress_; - bool late_pages_; base::AtomicNumber num_sweeping_tasks_; }; @@ -637,21 +637,10 @@ class MarkCompactCollector { void AbortTransitionArrays(); - // ----------------------------------------------------------------------- - // Phase 2: Sweeping to clear mark bits and free non-live objects for - // a non-compacting collection. - // - // Before: Live objects are marked and non-live objects are unmarked. - // - // After: Live objects are unmarked, non-live regions have been added to - // their space's free list. Active eden semispace is compacted by - // evacuation. - // - - // If we are not compacting the heap, we simply sweep the spaces except - // for the large object space, clearing mark bits and adding unmarked - // regions to each space's free list. - void SweepSpaces(); + // Starts sweeping of spaces by contributing on the main thread and setting + // up other pages for sweeping. Does not start sweeper tasks. + void StartSweepSpaces(); + void StartSweepSpace(PagedSpace* space); void EvacuateNewSpacePrologue(); @@ -674,9 +663,6 @@ class MarkCompactCollector { void ReleaseEvacuationCandidates(); - // Starts sweeping of a space by contributing on the main thread and setting - // up other pages for sweeping. - void StartSweepSpace(PagedSpace* space); #ifdef DEBUG friend class MarkObjectVisitor; diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 3a794a8c6b..26b29afaeb 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -395,11 +395,12 @@ void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() { bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) { MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector(); - // We cannot free memory chunks in new space while the sweeper is running - // since a sweeper thread might be stuck right before trying to lock the - // corresponding page. - return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping || - mc->sweeper().IsSweepingCompleted(); + // We cannot free a memory chunk in new space while the sweeper is running + // because the memory chunk can be in the queue of a sweeper task. + // Chunks in old generation are unmapped if they are empty. + DCHECK(chunk->InNewSpace() || chunk->SweepingDone()); + return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping || + mc->sweeper().IsSweepingCompleted(NEW_SPACE); } bool MemoryAllocator::CommitMemory(Address base, size_t size, diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 8b26ae0138..7d379490d9 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -411,6 +411,10 @@ class MemoryChunk { return concurrent_sweeping_; } + bool SweepingDone() { + return concurrent_sweeping_state().Value() == kSweepingDone; + } + // Manage live byte count, i.e., count of bytes in black objects. inline void ResetLiveBytes(); inline void IncrementLiveBytes(int by); @@ -735,10 +739,6 @@ class Page : public MemoryChunk { DCHECK(SweepingDone()); } - bool SweepingDone() { - return concurrent_sweeping_state().Value() == kSweepingDone; - } - void ResetFreeListStatistics(); size_t AvailableInFreeList(); diff --git a/test/cctest/heap/heap-tester.h b/test/cctest/heap/heap-tester.h index a01de69291..42b761723e 100644 --- a/test/cctest/heap/heap-tester.h +++ b/test/cctest/heap/heap-tester.h @@ -32,6 +32,7 @@ V(Regress587004) \ V(Regress538257) \ V(Regress589413) \ + V(Regress658718) \ V(WriteBarriersInCopyJSObject) #define HEAP_TEST(Name) \ @@ -39,6 +40,11 @@ #Name, true, true); \ void v8::internal::HeapTester::Test##Name() +#define UNINITIALIZED_HEAP_TEST(Name) \ + CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \ + #Name, true, false); \ + void v8::internal::HeapTester::Test##Name() + #define THREADED_HEAP_TEST(Name) \ RegisterThreadedTest register_##Name(v8::internal::HeapTester::Test##Name, \ #Name); \ diff --git a/test/cctest/heap/test-page-promotion.cc b/test/cctest/heap/test-page-promotion.cc index b3ac4960a5..4673f2edcf 100644 --- a/test/cctest/heap/test-page-promotion.cc +++ b/test/cctest/heap/test-page-promotion.cc @@ -14,20 +14,22 @@ // src/type-feedback-vector-inl.h #include "src/type-feedback-vector-inl.h" #include "test/cctest/cctest.h" +#include "test/cctest/heap/heap-tester.h" #include "test/cctest/heap/heap-utils.h" namespace { -v8::Isolate* NewIsolateForPagePromotion() { +v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8, + int max_semi_space_size = 8) { i::FLAG_page_promotion = true; i::FLAG_page_promotion_threshold = 0; // % - i::FLAG_min_semi_space_size = 8; + i::FLAG_min_semi_space_size = min_semi_space_size; // We cannot optimize for size as we require a new space with more than one // page. i::FLAG_optimize_for_size = false; // Set max_semi_space_size because it could've been initialized by an // implication of optimize_for_size. - i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size; + i::FLAG_max_semi_space_size = max_semi_space_size; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); @@ -134,5 +136,38 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) { } } +UNINITIALIZED_HEAP_TEST(Regress658718) { + v8::Isolate* isolate = NewIsolateForPagePromotion(4, 8); + Isolate* i_isolate = reinterpret_cast(isolate); + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + v8::Context::New(isolate)->Enter(); + Heap* heap = i_isolate->heap(); + heap->delay_sweeper_tasks_for_testing_ = true; + heap->new_space()->Grow(); + { + v8::HandleScope inner_handle_scope(isolate); + std::vector> handles; + heap::SimulateFullSpace(heap->new_space(), &handles); + CHECK_GT(handles.size(), 0u); + // Last object in handles should definitely be on the last page which does + // not contain the age mark. + Handle last_object = handles.back(); + Page* to_be_promoted_page = Page::FromAddress(last_object->address()); + CHECK(to_be_promoted_page->Contains(last_object->address())); + CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address())); + heap->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting); + CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address())); + CHECK(to_be_promoted_page->Contains(last_object->address())); + } + heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting); + heap->new_space()->Shrink(); + heap->memory_allocator()->unmapper()->WaitUntilCompleted(); + heap->mark_compact_collector()->sweeper().StartSweeperTasks(); + heap->mark_compact_collector()->EnsureSweepingCompleted(); + } +} + } // namespace internal } // namespace v8