From 65c9c2a2ddd42ead9c5d286d95feb6e4a0bfd82c Mon Sep 17 00:00:00 2001 From: "hpayer@chromium.org" Date: Thu, 21 Aug 2014 14:50:18 +0000 Subject: [PATCH] Remove conservative sweeping. BUG= R=jarin@chromium.org Review URL: https://codereview.chromium.org/479113004 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23283 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/flag-definitions.h | 1 - src/heap-snapshot-generator.cc | 9 - src/heap/heap.cc | 8 +- src/heap/heap.h | 11 +- src/heap/mark-compact-inl.h | 1 - src/heap/mark-compact.cc | 315 +++++---------------------------- src/heap/mark-compact.h | 18 +- src/heap/spaces.cc | 22 +-- src/heap/spaces.h | 32 +--- src/heap/store-buffer.cc | 29 ++- 10 files changed, 76 insertions(+), 370 deletions(-) diff --git a/src/flag-definitions.h b/src/flag-definitions.h index f795457e67..03ee2a1738 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -526,7 +526,6 @@ DEFINE_BOOL(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_BOOL(track_gc_object_stats, false, "track object counts and memory usage") -DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely") DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping") DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping") DEFINE_INT(sweeper_threads, 0, diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc index aaac48ae1f..50fdec5557 100644 --- a/src/heap-snapshot-generator.cc +++ b/src/heap-snapshot-generator.cc @@ -2580,15 +2580,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() { #ifdef VERIFY_HEAP Heap* debug_heap = heap_; - CHECK(debug_heap->old_data_space()->swept_precisely()); - CHECK(debug_heap->old_pointer_space()->swept_precisely()); - CHECK(debug_heap->code_space()->swept_precisely()); - CHECK(debug_heap->cell_space()->swept_precisely()); - CHECK(debug_heap->property_cell_space()->swept_precisely()); - CHECK(debug_heap->map_space()->swept_precisely()); -#endif - -#ifdef VERIFY_HEAP debug_heap->Verify(); #endif diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 76046641cd..c7e1de398e 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -1273,14 +1273,10 @@ static void VerifyNonPointerSpacePointers(Heap* heap) { object = code_it.Next()) object->Iterate(&v); - // The old data space was normally swept conservatively so that the iterator - // doesn't work, so we normally skip the next bit. - if (heap->old_data_space()->swept_precisely()) { HeapObjectIterator data_it(heap->old_data_space()); for (HeapObject* object = data_it.Next(); object != NULL; object = data_it.Next()) object->Iterate(&v); - } } #endif // VERIFY_HEAP @@ -4242,9 +4238,7 @@ AllocationResult Heap::AllocateStruct(InstanceType type) { bool Heap::IsHeapIterable() { // TODO(hpayer): This function is not correct. Allocation folding in old // space breaks the iterability. - return (old_pointer_space()->swept_precisely() && - old_data_space()->swept_precisely() && - new_space_top_after_last_gc_ == new_space()->top()); + return new_space_top_after_last_gc_ == new_space()->top(); } diff --git a/src/heap/heap.h b/src/heap/heap.h index 6886ddd70c..dacf916edc 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -715,14 +715,11 @@ class Heap { const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); static const int kNoGCFlags = 0; - static const int kSweepPreciselyMask = 1; - static const int kReduceMemoryFootprintMask = 2; - static const int kAbortIncrementalMarkingMask = 4; + static const int kReduceMemoryFootprintMask = 1; + static const int kAbortIncrementalMarkingMask = 2; - // Making the heap iterable requires us to sweep precisely and abort any - // incremental marking as well. - static const int kMakeHeapIterableMask = - kSweepPreciselyMask | kAbortIncrementalMarkingMask; + // Making the heap iterable requires us to abort incremental marking. + static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is // non-zero, then the slower precise sweeper is used, which leaves the heap diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h index 934fce847d..742ffeacb9 100644 --- a/src/heap/mark-compact-inl.h +++ b/src/heap/mark-compact-inl.h @@ -23,7 +23,6 @@ MarkBit Marking::MarkBitFrom(Address addr) { void MarkCompactCollector::SetFlags(int flags) { - sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0); reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0); abort_incremental_marking_ = ((flags & Heap::kAbortIncrementalMarkingMask) != 0); diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index f114ba3f22..e4057ff4b5 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -41,7 +41,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) #ifdef DEBUG state_(IDLE), #endif - sweep_precisely_(false), reduce_memory_footprint_(false), abort_incremental_marking_(false), marking_parity_(ODD_MARKING_PARITY), @@ -200,7 +199,6 @@ static void VerifyEvacuation(NewSpace* space) { static void VerifyEvacuation(Heap* heap, PagedSpace* space) { - if (!space->swept_precisely()) return; if (FLAG_use_allocation_folding && (space == heap->old_pointer_space() || space == heap->old_data_space())) { return; @@ -3126,7 +3124,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { AlwaysAllocateScope always_allocate(isolate()); PagedSpace* space = static_cast(p->owner()); DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); - p->MarkSweptPrecisely(); + p->SetWasSwept(); int offsets[16]; @@ -3290,10 +3288,7 @@ static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start, } -// Sweep a space precisely. After this has been done the space can -// be iterated precisely, hitting only the live objects. Code space -// is always swept precisely because we want to be able to iterate -// over it. Map space is swept precisely, because it is not compacted. +// Sweeps a page. After sweeping the page can be iterated. // Slots in live objects pointing into evacuation candidates are updated // if requested. // Returns the size of the biggest continuous freed memory chunk in bytes. @@ -3301,8 +3296,8 @@ template -static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p, - ObjectVisitor* v) { +static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, + ObjectVisitor* v) { DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, space->identity() == CODE_SPACE); @@ -3384,7 +3379,7 @@ static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p, // sweeping by the main thread. p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); } else { - p->MarkSweptPrecisely(); + p->SetWasSwept(); } return FreeList::GuaranteedAllocatable(static_cast(max_freed_bytes)); } @@ -3621,22 +3616,24 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { switch (space->identity()) { case OLD_DATA_SPACE: - SweepConservatively(space, NULL, p); + Sweep(space, NULL, p, + &updating_visitor); break; case OLD_POINTER_SPACE: - SweepPrecisely( - space, NULL, p, &updating_visitor); + Sweep(space, NULL, p, + &updating_visitor); break; case CODE_SPACE: if (FLAG_zap_code_space) { - SweepPrecisely( - space, NULL, p, &updating_visitor); + Sweep(space, NULL, p, + &updating_visitor); } else { - SweepPrecisely( - space, NULL, p, &updating_visitor); + Sweep(space, NULL, p, + &updating_visitor); } break; default: @@ -4119,182 +4116,6 @@ static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { } -static inline Address DigestFreeStart(Address approximate_free_start, - uint32_t free_start_cell) { - DCHECK(free_start_cell != 0); - - // No consecutive 1 bits. - DCHECK((free_start_cell & (free_start_cell << 1)) == 0); - - int offsets[16]; - uint32_t cell = free_start_cell; - int offset_of_last_live; - if ((cell & 0x80000000u) != 0) { - // This case would overflow below. - offset_of_last_live = 31; - } else { - // Remove all but one bit, the most significant. This is an optimization - // that may or may not be worthwhile. - cell |= cell >> 16; - cell |= cell >> 8; - cell |= cell >> 4; - cell |= cell >> 2; - cell |= cell >> 1; - cell = (cell + 1) >> 1; - int live_objects = MarkWordToObjectStarts(cell, offsets); - DCHECK(live_objects == 1); - offset_of_last_live = offsets[live_objects - 1]; - } - Address last_live_start = - approximate_free_start + offset_of_last_live * kPointerSize; - HeapObject* last_live = HeapObject::FromAddress(last_live_start); - Address free_start = last_live_start + last_live->Size(); - return free_start; -} - - -static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { - DCHECK(cell != 0); - - // No consecutive 1 bits. - DCHECK((cell & (cell << 1)) == 0); - - int offsets[16]; - if (cell == 0x80000000u) { // Avoid overflow below. - return block_address + 31 * kPointerSize; - } - uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; - DCHECK((first_set_bit & cell) == first_set_bit); - int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); - DCHECK(live_objects == 1); - USE(live_objects); - return block_address + offsets[0] * kPointerSize; -} - - -// Force instantiation of templatized SweepConservatively method for -// SWEEP_ON_MAIN_THREAD mode. -template int MarkCompactCollector::SweepConservatively< - MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*); - - -// Force instantiation of templatized SweepConservatively method for -// SWEEP_IN_PARALLEL mode. -template int MarkCompactCollector::SweepConservatively< - MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*); - - -// Sweeps a space conservatively. After this has been done the larger free -// spaces have been put on the free list and the smaller ones have been -// ignored and left untouched. A free space is always either ignored or put -// on the free list, never split up into two parts. This is important -// because it means that any FreeSpace maps left actually describe a region of -// memory that can be ignored when scanning. Dead objects other than free -// spaces will not contain the free space map. -template -int MarkCompactCollector::SweepConservatively(PagedSpace* space, - FreeList* free_list, Page* p) { - DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); - DCHECK( - (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) || - (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && - free_list == NULL)); - - intptr_t freed_bytes = 0; - intptr_t max_freed_bytes = 0; - size_t size = 0; - - // Skip over all the dead objects at the start of the page and mark them free. - Address cell_base = 0; - MarkBit::CellType* cell = NULL; - MarkBitCellIterator it(p); - for (; !it.Done(); it.Advance()) { - cell_base = it.CurrentCellBase(); - cell = it.CurrentCell(); - if (*cell != 0) break; - } - - if (it.Done()) { - size = p->area_end() - p->area_start(); - freed_bytes = - Free(space, free_list, p->area_start(), static_cast(size)); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); - DCHECK_EQ(0, p->LiveBytes()); - if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { - // When concurrent sweeping is active, the page will be marked after - // sweeping by the main thread. - p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); - } else { - p->MarkSweptConservatively(); - } - return FreeList::GuaranteedAllocatable(static_cast(max_freed_bytes)); - } - - // Grow the size of the start-of-page free space a little to get up to the - // first live object. - Address free_end = StartOfLiveObject(cell_base, *cell); - // Free the first free space. - size = free_end - p->area_start(); - freed_bytes = - Free(space, free_list, p->area_start(), static_cast(size)); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); - - // The start of the current free area is represented in undigested form by - // the address of the last 32-word section that contained a live object and - // the marking bitmap for that cell, which describes where the live object - // started. Unless we find a large free space in the bitmap we will not - // digest this pair into a real address. We start the iteration here at the - // first word in the marking bit map that indicates a live object. - Address free_start = cell_base; - MarkBit::CellType free_start_cell = *cell; - - for (; !it.Done(); it.Advance()) { - cell_base = it.CurrentCellBase(); - cell = it.CurrentCell(); - if (*cell != 0) { - // We have a live object. Check approximately whether it is more than 32 - // words since the last live object. - if (cell_base - free_start > 32 * kPointerSize) { - free_start = DigestFreeStart(free_start, free_start_cell); - if (cell_base - free_start > 32 * kPointerSize) { - // Now that we know the exact start of the free space it still looks - // like we have a large enough free space to be worth bothering with. - // so now we need to find the start of the first live object at the - // end of the free space. - free_end = StartOfLiveObject(cell_base, *cell); - freed_bytes = Free(space, free_list, free_start, - static_cast(free_end - free_start)); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); - } - } - // Update our undigested record of where the current free area started. - free_start = cell_base; - free_start_cell = *cell; - // Clear marking bits for current cell. - *cell = 0; - } - } - - // Handle the free space at the end of the page. - if (cell_base - free_start > 32 * kPointerSize) { - free_start = DigestFreeStart(free_start, free_start_cell); - freed_bytes = Free(space, free_list, free_start, - static_cast(p->area_end() - free_start)); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); - } - - p->ResetLiveBytes(); - if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { - // When concurrent sweeping is active, the page will be marked after - // sweeping by the main thread. - p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); - } else { - p->MarkSweptConservatively(); - } - return FreeList::GuaranteedAllocatable(static_cast(max_freed_bytes)); -} - - int MarkCompactCollector::SweepInParallel(PagedSpace* space, int required_freed_bytes) { int max_freed = 0; @@ -4321,14 +4142,8 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { ? free_list_old_pointer_space_.get() : free_list_old_data_space_.get(); FreeList private_free_list(space); - if (space->swept_precisely()) { - max_freed = SweepPrecisely( - space, &private_free_list, page, NULL); - } else { - max_freed = SweepConservatively( - space, &private_free_list, page); - } + max_freed = Sweep(space, &private_free_list, page, NULL); free_list->Concatenate(&private_free_list); } return max_freed; @@ -4336,9 +4151,6 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { - space->set_swept_precisely(sweeper == PRECISE || - sweeper == CONCURRENT_PRECISE || - sweeper == PARALLEL_PRECISE); space->ClearStats(); // We defensively initialize end_of_unswept_pages_ here with the first page @@ -4356,8 +4168,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); // Clear sweeping flags indicating that marking bits are still intact. - p->ClearSweptPrecisely(); - p->ClearSweptConservatively(); + p->ClearWasSwept(); if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || p->IsEvacuationCandidate()) { @@ -4383,19 +4194,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { } switch (sweeper) { - case CONCURRENT_CONSERVATIVE: - case PARALLEL_CONSERVATIVE: { + case CONCURRENT_SWEEPING: + case PARALLEL_SWEEPING: if (!parallel_sweeping_active) { if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", + PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast(p)); } - SweepConservatively(space, NULL, p); + Sweep(space, NULL, p, NULL); pages_swept++; parallel_sweeping_active = true; } else { if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", + PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n", reinterpret_cast(p)); } p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); @@ -4403,42 +4215,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { } space->set_end_of_unswept_pages(p); break; - } - case CONCURRENT_PRECISE: - case PARALLEL_PRECISE: - if (!parallel_sweeping_active) { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", - reinterpret_cast(p)); - } - SweepPrecisely(space, NULL, p, NULL); - pages_swept++; - parallel_sweeping_active = true; - } else { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", - reinterpret_cast(p)); - } - p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); - space->IncreaseUnsweptFreeBytes(p); - } - space->set_end_of_unswept_pages(p); - break; - case PRECISE: { + case SEQUENTIAL_SWEEPING: { if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", - reinterpret_cast(p)); + PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast(p)); } if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { - SweepPrecisely(space, NULL, p, NULL); + Sweep(space, NULL, p, NULL); } else if (space->identity() == CODE_SPACE) { - SweepPrecisely(space, NULL, p, NULL); + Sweep(space, NULL, p, NULL); } else { - SweepPrecisely(space, NULL, p, NULL); + Sweep(space, NULL, p, NULL); } pages_swept++; break; @@ -4458,17 +4247,14 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { - return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || - type == MarkCompactCollector::CONCURRENT_CONSERVATIVE || - type == MarkCompactCollector::PARALLEL_PRECISE || - type == MarkCompactCollector::CONCURRENT_PRECISE; + return type == MarkCompactCollector::PARALLEL_SWEEPING || + type == MarkCompactCollector::CONCURRENT_SWEEPING; } static bool ShouldWaitForSweeperThreads( MarkCompactCollector::SweeperType type) { - return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || - type == MarkCompactCollector::PARALLEL_PRECISE; + return type == MarkCompactCollector::PARALLEL_SWEEPING; } @@ -4482,16 +4268,9 @@ void MarkCompactCollector::SweepSpaces() { #ifdef DEBUG state_ = SWEEP_SPACES; #endif - SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; - if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; - if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; - if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) { - how_to_sweep = PARALLEL_PRECISE; - } - if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) { - how_to_sweep = CONCURRENT_PRECISE; - } - if (sweep_precisely_) how_to_sweep = PRECISE; + SweeperType how_to_sweep = CONCURRENT_SWEEPING; + if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING; + if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING; MoveEvacuationCandidatesToEndOfPagesList(); @@ -4522,14 +4301,14 @@ void MarkCompactCollector::SweepSpaces() { { GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE); - SweepSpace(heap()->code_space(), PRECISE); + SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING); } { GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_CELL); - SweepSpace(heap()->cell_space(), PRECISE); - SweepSpace(heap()->property_cell_space(), PRECISE); + SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING); + SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING); } EvacuateNewSpaceAndCandidates(); @@ -4540,7 +4319,7 @@ void MarkCompactCollector::SweepSpaces() { { GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP); - SweepSpace(heap()->map_space(), PRECISE); + SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING); } // Deallocate unmarked objects and clear marked bits for marked objects. @@ -4562,11 +4341,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { Page* p = it.next(); if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); - if (space->swept_precisely()) { - p->MarkSweptPrecisely(); - } else { - p->MarkSweptConservatively(); - } + p->SetWasSwept(); } DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 7a8b1063a9..f7075d6e69 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -544,11 +544,9 @@ class MarkCompactCollector { void EnableCodeFlushing(bool enable); enum SweeperType { - PARALLEL_CONSERVATIVE, - CONCURRENT_CONSERVATIVE, - PARALLEL_PRECISE, - CONCURRENT_PRECISE, - PRECISE + PARALLEL_SWEEPING, + CONCURRENT_SWEEPING, + SEQUENTIAL_SWEEPING }; enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; @@ -561,12 +559,6 @@ class MarkCompactCollector { void VerifyOmittedMapChecks(); #endif - // Sweep a single page from the given space conservatively. - // Returns the size of the biggest continuous freed memory chunk in bytes. - template - static int SweepConservatively(PagedSpace* space, FreeList* free_list, - Page* p); - INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { return Page::FromAddress(reinterpret_cast
(anchor)) ->ShouldSkipEvacuationSlotRecording(); @@ -693,10 +685,6 @@ class MarkCompactCollector { CollectorState state_; #endif - // Global flag that forces sweeping to be precise, so we can traverse the - // heap. - bool sweep_precisely_; - bool reduce_memory_footprint_; bool abort_incremental_marking_; diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 5dd24d3949..ff55c89e5d 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -47,18 +47,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page, owner == page->heap()->code_space()); Initialize(reinterpret_cast(owner), page->area_start(), page->area_end(), kOnePageOnly, size_func); - DCHECK(page->WasSweptPrecisely() || - (static_cast(owner)->swept_precisely() && - page->SweepingCompleted())); + DCHECK(page->WasSwept() || page->SweepingCompleted()); } void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, HeapObjectIterator::PageMode mode, HeapObjectCallback size_f) { - // Check that we actually can iterate this space. - DCHECK(space->swept_precisely()); - space_ = space; cur_addr_ = cur; cur_end_ = end; @@ -83,9 +78,7 @@ bool HeapObjectIterator::AdvanceToNextPage() { if (cur_page == space_->anchor()) return false; cur_addr_ = cur_page->area_start(); cur_end_ = cur_page->area_end(); - DCHECK(cur_page->WasSweptPrecisely() || - (static_cast(cur_page->owner())->swept_precisely() && - cur_page->SweepingCompleted())); + DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted()); return true; } @@ -459,7 +452,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, chunk->ResetLiveBytes(); Bitmap::Clear(chunk); chunk->initialize_scan_on_scavenge(false); - chunk->SetFlag(WAS_SWEPT_PRECISELY); + chunk->SetFlag(WAS_SWEPT); DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); @@ -886,7 +879,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, Executability executable) : Space(heap, id, executable), free_list_(this), - swept_precisely_(true), unswept_free_bytes_(0), end_of_unswept_pages_(NULL), emergency_memory_(NULL) { @@ -936,7 +928,7 @@ size_t PagedSpace::CommittedPhysicalMemory() { Object* PagedSpace::FindObject(Address addr) { - // Note: this function can only be called on precisely swept spaces. + // Note: this function can only be called on iterable spaces. DCHECK(!heap()->mark_compact_collector()->in_use()); if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. @@ -1129,9 +1121,6 @@ void PagedSpace::Print() {} #ifdef VERIFY_HEAP void PagedSpace::Verify(ObjectVisitor* visitor) { - // We can only iterate over the pages if they were swept precisely. - if (!swept_precisely_) return; - bool allocation_pointer_found_in_space = (allocation_info_.top() == allocation_info_.limit()); PageIterator page_iterator(this); @@ -1141,7 +1130,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { if (page == Page::FromAllocationTop(allocation_info_.top())) { allocation_pointer_found_in_space = true; } - CHECK(page->WasSweptPrecisely()); + CHECK(page->WasSwept()); HeapObjectIterator it(page, NULL); Address end_of_previous_object = page->area_start(); Address top = page->area_end(); @@ -2737,7 +2726,6 @@ void PagedSpace::ReportStatistics() { ", available: %" V8_PTR_PREFIX "d, %%%d\n", Capacity(), Waste(), Available(), pct); - if (!swept_precisely_) return; if (heap()->mark_compact_collector()->sweeping_in_progress()) { heap()->mark_compact_collector()->EnsureSweepingCompleted(); } diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 312d75f52e..16261b3670 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -373,12 +373,9 @@ class MemoryChunk { EVACUATION_CANDIDATE, RESCAN_ON_EVACUATION, - // Pages swept precisely can be iterated, hitting only the live objects. - // Whereas those swept conservatively cannot be iterated over. Both flags - // indicate that marking bits have been cleared by the sweeper, otherwise - // marking bits are still intact. - WAS_SWEPT_PRECISELY, - WAS_SWEPT_CONSERVATIVELY, + // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, + // otherwise marking bits are still intact. + WAS_SWEPT, // Large objects can have a progress bar in their page header. These object // are scanned in increments and will be kept black while being scanned. @@ -765,15 +762,9 @@ class Page : public MemoryChunk { void InitializeAsAnchor(PagedSpace* owner); - bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } - bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } - bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } - - void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } - void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } - - void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } - void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } + bool WasSwept() { return IsFlagSet(WAS_SWEPT); } + void SetWasSwept() { SetFlag(WAS_SWEPT); } + void ClearWasSwept() { ClearFlag(WAS_SWEPT); } void ResetFreeListStatistics(); @@ -1830,14 +1821,11 @@ class PagedSpace : public Space { static void ResetCodeStatistics(Isolate* isolate); #endif - bool swept_precisely() { return swept_precisely_; } - void set_swept_precisely(bool b) { swept_precisely_ = b; } - // Evacuation candidates are swept by evacuator. Needs to return a valid // result before _and_ after evacuation has finished. static bool ShouldBeSweptBySweeperThreads(Page* p) { return !p->IsEvacuationCandidate() && - !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely(); + !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept(); } void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; } @@ -1907,12 +1895,8 @@ class PagedSpace : public Space { // Normal allocation information. AllocationInfo allocation_info_; - // This space was swept precisely, hence it is iterable. - bool swept_precisely_; - // The number of free bytes which could be reclaimed by advancing the - // concurrent sweeper threads. This is only an estimation because concurrent - // sweeping is done conservatively. + // concurrent sweeper threads. intptr_t unswept_free_bytes_; // The sweeper threads iterate over the list of pointer and data space pages diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc index 48e98a3dec..ab642e371c 100644 --- a/src/heap/store-buffer.cc +++ b/src/heap/store-buffer.cc @@ -477,10 +477,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, } else { Page* page = reinterpret_cast(chunk); PagedSpace* owner = reinterpret_cast(page->owner()); - Address start = page->area_start(); - Address end = page->area_end(); if (owner == heap_->map_space()) { - DCHECK(page->WasSweptPrecisely()); + DCHECK(page->WasSwept()); HeapObjectIterator iterator(page, NULL); for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; heap_object = iterator.Next()) { @@ -504,24 +502,17 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, heap_->mark_compact_collector()->EnsureSweepingCompleted(); } } - // TODO(hpayer): remove the special casing and merge map and pointer - // space handling as soon as we removed conservative sweeping. CHECK(page->owner() == heap_->old_pointer_space()); - if (heap_->old_pointer_space()->swept_precisely()) { - HeapObjectIterator iterator(page, NULL); - for (HeapObject* heap_object = iterator.Next(); - heap_object != NULL; heap_object = iterator.Next()) { - // We iterate over objects that contain new space pointers only. - if (heap_object->MayContainNewSpacePointers()) { - FindPointersToNewSpaceInRegion( - heap_object->address() + HeapObject::kHeaderSize, - heap_object->address() + heap_object->Size(), - slot_callback, clear_maps); - } + HeapObjectIterator iterator(page, NULL); + for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; + heap_object = iterator.Next()) { + // We iterate over objects that contain new space pointers only. + if (heap_object->MayContainNewSpacePointers()) { + FindPointersToNewSpaceInRegion( + heap_object->address() + HeapObject::kHeaderSize, + heap_object->address() + heap_object->Size(), slot_callback, + clear_maps); } - } else { - FindPointersToNewSpaceInRegion(start, end, slot_callback, - clear_maps); } } }