Remove sequential sweeping mode and perform lazy sweeping when no sweeper threads are active.
BUG= R=jarin@chromium.org Review URL: https://codereview.chromium.org/396443004 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22394 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
319e5226e9
commit
9180b1a319
@ -3312,9 +3312,8 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
|
||||
// pages is set after sweeping all pages.
|
||||
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
|
||||
page->WasSwept() ||
|
||||
(mark_compact_collector()->AreSweeperThreadsActivated() &&
|
||||
page->parallel_sweeping() <=
|
||||
MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
|
||||
(page->parallel_sweeping() <=
|
||||
MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
|
||||
}
|
||||
|
||||
|
||||
@ -4339,8 +4338,8 @@ bool Heap::IdleNotification(int hint) {
|
||||
// If the IdleNotifcation is called with a large hint we will wait for
|
||||
// the sweepter threads here.
|
||||
if (hint >= kMinHintForFullGC &&
|
||||
mark_compact_collector()->IsConcurrentSweepingInProgress()) {
|
||||
mark_compact_collector()->WaitUntilSweepingCompleted();
|
||||
mark_compact_collector()->sweeping_in_progress()) {
|
||||
mark_compact_collector()->EnsureSweepingCompleted();
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -536,7 +536,7 @@ void IncrementalMarking::Start(CompactionFlag flag) {
|
||||
|
||||
ResetStepCounters();
|
||||
|
||||
if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
|
||||
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
|
||||
StartMarking(flag);
|
||||
} else {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
@ -883,11 +883,11 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
|
||||
}
|
||||
|
||||
if (state_ == SWEEPING) {
|
||||
if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
|
||||
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
|
||||
heap_->mark_compact_collector()->IsSweepingCompleted()) {
|
||||
heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
|
||||
heap_->mark_compact_collector()->EnsureSweepingCompleted();
|
||||
}
|
||||
if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
|
||||
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
|
||||
bytes_scanned_ = 0;
|
||||
StartMarking(PREVENT_COMPACTION);
|
||||
}
|
||||
|
@ -1558,8 +1558,8 @@ void Isolate::Deinit() {
|
||||
sweeper_thread_ = NULL;
|
||||
|
||||
if (FLAG_job_based_sweeping &&
|
||||
heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
|
||||
heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
|
||||
heap_.mark_compact_collector()->sweeping_in_progress()) {
|
||||
heap_.mark_compact_collector()->EnsureSweepingCompleted();
|
||||
}
|
||||
|
||||
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
|
||||
|
@ -45,7 +45,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
|
||||
marking_parity_(ODD_MARKING_PARITY),
|
||||
compacting_(false),
|
||||
was_marked_incrementally_(false),
|
||||
sweeping_pending_(false),
|
||||
sweeping_in_progress_(false),
|
||||
pending_sweeper_jobs_semaphore_(0),
|
||||
sequential_sweeping_(false),
|
||||
tracer_(NULL),
|
||||
@ -573,7 +573,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
|
||||
void MarkCompactCollector::StartSweeperThreads() {
|
||||
ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
|
||||
ASSERT(free_list_old_data_space_.get()->IsEmpty());
|
||||
sweeping_pending_ = true;
|
||||
sweeping_in_progress_ = true;
|
||||
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
|
||||
isolate()->sweeper_threads()[i]->StartSweeping();
|
||||
}
|
||||
@ -588,8 +588,17 @@ void MarkCompactCollector::StartSweeperThreads() {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::WaitUntilSweepingCompleted() {
|
||||
ASSERT(sweeping_pending_ == true);
|
||||
void MarkCompactCollector::EnsureSweepingCompleted() {
|
||||
ASSERT(sweeping_in_progress_ == true);
|
||||
|
||||
// If sweeping is not completed, we try to complete it here. If we do not
|
||||
// have sweeper threads we have to complete since we do not have a good
|
||||
// indicator for a swept space in that case.
|
||||
if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
|
||||
SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
|
||||
SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
|
||||
}
|
||||
|
||||
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
|
||||
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
|
||||
}
|
||||
@ -599,7 +608,7 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
|
||||
pending_sweeper_jobs_semaphore_.Wait();
|
||||
}
|
||||
ParallelSweepSpacesComplete();
|
||||
sweeping_pending_ = false;
|
||||
sweeping_in_progress_ = false;
|
||||
RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
|
||||
RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
|
||||
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
|
||||
@ -613,6 +622,7 @@ bool MarkCompactCollector::IsSweepingCompleted() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_job_based_sweeping) {
|
||||
if (!pending_sweeper_jobs_semaphore_.WaitFor(
|
||||
base::TimeDelta::FromSeconds(0))) {
|
||||
@ -620,6 +630,7 @@ bool MarkCompactCollector::IsSweepingCompleted() {
|
||||
}
|
||||
pending_sweeper_jobs_semaphore_.Signal();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -648,12 +659,6 @@ bool MarkCompactCollector::AreSweeperThreadsActivated() {
|
||||
}
|
||||
|
||||
|
||||
bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
|
||||
return (space == NULL || space->is_swept_concurrently()) &&
|
||||
sweeping_pending_;
|
||||
}
|
||||
|
||||
|
||||
void Marking::TransferMark(Address old_start, Address new_start) {
|
||||
// This is only used when resizing an object.
|
||||
ASSERT(MemoryChunk::FromAddress(old_start) ==
|
||||
@ -959,9 +964,9 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
|
||||
|
||||
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
|
||||
|
||||
if (IsConcurrentSweepingInProgress()) {
|
||||
if (sweeping_in_progress()) {
|
||||
// Instead of waiting we could also abort the sweeper threads here.
|
||||
WaitUntilSweepingCompleted();
|
||||
EnsureSweepingCompleted();
|
||||
}
|
||||
|
||||
// Clear marking bits if incremental marking is aborted.
|
||||
@ -4006,7 +4011,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
|
||||
static_cast<int>(size));
|
||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
||||
ASSERT_EQ(0, p->LiveBytes());
|
||||
return freed_bytes;
|
||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
||||
}
|
||||
|
||||
// Grow the size of the start-of-page free space a little to get up to the
|
||||
@ -4063,7 +4068,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
|
||||
}
|
||||
|
||||
p->ResetLiveBytes();
|
||||
return max_freed_bytes;
|
||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
||||
}
|
||||
|
||||
|
||||
@ -4097,7 +4102,6 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
|
||||
|
||||
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
||||
space->set_is_iterable(sweeper == PRECISE);
|
||||
space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
|
||||
space->ClearStats();
|
||||
|
||||
// We defensively initialize end_of_unswept_pages_ here with the first page
|
||||
@ -4142,15 +4146,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
||||
}
|
||||
|
||||
switch (sweeper) {
|
||||
case CONSERVATIVE: {
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
|
||||
reinterpret_cast<intptr_t>(p));
|
||||
}
|
||||
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
|
||||
pages_swept++;
|
||||
break;
|
||||
}
|
||||
case CONCURRENT_CONSERVATIVE:
|
||||
case PARALLEL_CONSERVATIVE: {
|
||||
if (!parallel_sweeping_active) {
|
||||
@ -4212,11 +4207,10 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
#ifdef DEBUG
|
||||
state_ = SWEEP_SPACES;
|
||||
#endif
|
||||
SweeperType how_to_sweep = CONSERVATIVE;
|
||||
if (AreSweeperThreadsActivated()) {
|
||||
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
|
||||
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
|
||||
}
|
||||
SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
|
||||
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
|
||||
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
|
||||
|
||||
if (sweep_precisely_) how_to_sweep = PRECISE;
|
||||
|
||||
MoveEvacuationCandidatesToEndOfPagesList();
|
||||
@ -4238,7 +4232,7 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
}
|
||||
|
||||
if (how_to_sweep == PARALLEL_CONSERVATIVE) {
|
||||
WaitUntilSweepingCompleted();
|
||||
EnsureSweepingCompleted();
|
||||
}
|
||||
}
|
||||
RemoveDeadInvalidatedCode();
|
||||
|
@ -570,7 +570,6 @@ class MarkCompactCollector {
|
||||
void EnableCodeFlushing(bool enable);
|
||||
|
||||
enum SweeperType {
|
||||
CONSERVATIVE,
|
||||
PARALLEL_CONSERVATIVE,
|
||||
CONCURRENT_CONSERVATIVE,
|
||||
PRECISE
|
||||
@ -665,18 +664,19 @@ class MarkCompactCollector {
|
||||
// then the whole given space is swept.
|
||||
int SweepInParallel(PagedSpace* space, int required_freed_bytes);
|
||||
|
||||
void WaitUntilSweepingCompleted();
|
||||
void EnsureSweepingCompleted();
|
||||
|
||||
// If sweeper threads are not active this method will return true. If
|
||||
// this is a latency issue we should be smarter here. Otherwise, it will
|
||||
// return true if the sweeper threads are done processing the pages.
|
||||
bool IsSweepingCompleted();
|
||||
|
||||
void RefillFreeList(PagedSpace* space);
|
||||
|
||||
bool AreSweeperThreadsActivated();
|
||||
|
||||
// If a paged space is passed in, this method checks if the given space is
|
||||
// swept concurrently. Otherwise, this method checks if concurrent sweeping
|
||||
// is in progress right now on any space.
|
||||
bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
|
||||
// Checks if sweeping is in progress right now on any space.
|
||||
bool sweeping_in_progress() { return sweeping_in_progress_; }
|
||||
|
||||
void set_sequential_sweeping(bool sequential_sweeping) {
|
||||
sequential_sweeping_ = sequential_sweeping;
|
||||
@ -739,7 +739,7 @@ class MarkCompactCollector {
|
||||
bool was_marked_incrementally_;
|
||||
|
||||
// True if concurrent or parallel sweeping is currently in progress.
|
||||
bool sweeping_pending_;
|
||||
bool sweeping_in_progress_;
|
||||
|
||||
base::Semaphore pending_sweeper_jobs_semaphore_;
|
||||
|
||||
|
@ -936,7 +936,6 @@ PagedSpace::PagedSpace(Heap* heap,
|
||||
: Space(heap, id, executable),
|
||||
free_list_(this),
|
||||
is_iterable_(true),
|
||||
is_swept_concurrently_(false),
|
||||
unswept_free_bytes_(0),
|
||||
end_of_unswept_pages_(NULL) {
|
||||
if (id == CODE_SPACE) {
|
||||
@ -2547,8 +2546,8 @@ void PagedSpace::PrepareForMarkCompact() {
|
||||
|
||||
|
||||
intptr_t PagedSpace::SizeOfObjects() {
|
||||
ASSERT(heap()->mark_compact_collector()->
|
||||
IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
|
||||
ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() ||
|
||||
(unswept_free_bytes_ == 0));
|
||||
return Size() - unswept_free_bytes_ - (limit() - top());
|
||||
}
|
||||
|
||||
@ -2578,24 +2577,12 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
|
||||
}
|
||||
|
||||
|
||||
HeapObject* PagedSpace::EnsureSweepingProgress(
|
||||
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
|
||||
int size_in_bytes) {
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
|
||||
if (collector->IsConcurrentSweepingInProgress(this)) {
|
||||
// If sweeping is still in progress try to sweep pages on the main thread.
|
||||
int free_chunk =
|
||||
collector->SweepInParallel(this, size_in_bytes);
|
||||
if (free_chunk >= size_in_bytes) {
|
||||
HeapObject* object = free_list_.Allocate(size_in_bytes);
|
||||
// We should be able to allocate an object here since we just freed that
|
||||
// much memory.
|
||||
ASSERT(object != NULL);
|
||||
if (object != NULL) return object;
|
||||
}
|
||||
|
||||
if (collector->sweeping_in_progress()) {
|
||||
// Wait for the sweeper threads here and complete the sweeping phase.
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
collector->EnsureSweepingCompleted();
|
||||
|
||||
// After waiting for the sweeper threads, there may be new free-list
|
||||
// entries.
|
||||
@ -2608,14 +2595,28 @@ HeapObject* PagedSpace::EnsureSweepingProgress(
|
||||
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
// Allocation in this space has failed.
|
||||
|
||||
// If sweeper threads are active, try to re-fill the free-lists.
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
if (collector->IsConcurrentSweepingInProgress(this)) {
|
||||
// Sweeping is still in progress.
|
||||
if (collector->sweeping_in_progress()) {
|
||||
// First try to refill the free-list, concurrent sweeper threads
|
||||
// may have freed some objects in the meantime.
|
||||
collector->RefillFreeList(this);
|
||||
|
||||
// Retry the free list allocation.
|
||||
HeapObject* object = free_list_.Allocate(size_in_bytes);
|
||||
if (object != NULL) return object;
|
||||
|
||||
// If sweeping is still in progress try to sweep pages on the main thread.
|
||||
int free_chunk =
|
||||
collector->SweepInParallel(this, size_in_bytes);
|
||||
collector->RefillFreeList(this);
|
||||
if (free_chunk >= size_in_bytes) {
|
||||
HeapObject* object = free_list_.Allocate(size_in_bytes);
|
||||
// We should be able to allocate an object here since we just freed that
|
||||
// much memory.
|
||||
ASSERT(object != NULL);
|
||||
if (object != NULL) return object;
|
||||
}
|
||||
}
|
||||
|
||||
// Free list allocation failed and there is no next page. Fail if we have
|
||||
@ -2625,7 +2626,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
&& heap()->OldGenerationAllocationLimitReached()) {
|
||||
// If sweeper threads are active, wait for them at that point and steal
|
||||
// elements form their free-lists.
|
||||
HeapObject* object = EnsureSweepingProgress(size_in_bytes);
|
||||
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
|
||||
if (object != NULL) return object;
|
||||
}
|
||||
|
||||
@ -2638,7 +2639,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
// If sweeper threads are active, wait for them at that point and steal
|
||||
// elements form their free-lists. Allocation may still fail their which
|
||||
// would indicate that there is not enough memory for the given allocation.
|
||||
return EnsureSweepingProgress(size_in_bytes);
|
||||
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
|
||||
}
|
||||
|
||||
|
||||
|
24
src/spaces.h
24
src/spaces.h
@ -1619,6 +1619,21 @@ class FreeList {
|
||||
// aligned, and the size should be a non-zero multiple of the word size.
|
||||
int Free(Address start, int size_in_bytes);
|
||||
|
||||
// This method returns how much memory can be allocated after freeing
|
||||
// maximum_freed memory.
|
||||
static inline int GuaranteedAllocatable(int maximum_freed) {
|
||||
if (maximum_freed < kSmallListMin) {
|
||||
return 0;
|
||||
} else if (maximum_freed <= kSmallListMax) {
|
||||
return kSmallAllocationMax;
|
||||
} else if (maximum_freed <= kMediumListMax) {
|
||||
return kMediumAllocationMax;
|
||||
} else if (maximum_freed <= kLargeListMax) {
|
||||
return kLargeAllocationMax;
|
||||
}
|
||||
return maximum_freed;
|
||||
}
|
||||
|
||||
// Allocate a block of size 'size_in_bytes' from the free list. The block
|
||||
// is unitialized. A failure is returned if no block is available. The
|
||||
// number of bytes lost to fragmentation is returned in the output parameter
|
||||
@ -1905,9 +1920,6 @@ class PagedSpace : public Space {
|
||||
bool is_iterable() { return is_iterable_; }
|
||||
void set_is_iterable(bool b) { is_iterable_ = b; }
|
||||
|
||||
bool is_swept_concurrently() { return is_swept_concurrently_; }
|
||||
void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
|
||||
|
||||
// Evacuation candidates are swept by evacuator. Needs to return a valid
|
||||
// result before _and_ after evacuation has finished.
|
||||
static bool ShouldBeSweptBySweeperThreads(Page* p) {
|
||||
@ -1992,9 +2004,6 @@ class PagedSpace : public Space {
|
||||
// This space was swept precisely, hence it is iterable.
|
||||
bool is_iterable_;
|
||||
|
||||
// This space is currently swept by sweeper threads.
|
||||
bool is_swept_concurrently_;
|
||||
|
||||
// The number of free bytes which could be reclaimed by advancing the
|
||||
// concurrent sweeper threads. This is only an estimation because concurrent
|
||||
// sweeping is done conservatively.
|
||||
@ -2017,7 +2026,8 @@ class PagedSpace : public Space {
|
||||
// If sweeping is still in progress try to sweep unswept pages. If that is
|
||||
// not successful, wait for the sweeper threads and re-try free-list
|
||||
// allocation.
|
||||
MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
|
||||
MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
|
||||
int size_in_bytes);
|
||||
|
||||
// Slow path of AllocateRaw. This function is space-dependent.
|
||||
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
|
||||
|
@ -44,8 +44,8 @@ using namespace v8::internal;
|
||||
static void SimulateIncrementalMarking() {
|
||||
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
|
||||
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
|
||||
if (collector->IsConcurrentSweepingInProgress()) {
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
if (collector->sweeping_in_progress()) {
|
||||
collector->EnsureSweepingCompleted();
|
||||
}
|
||||
CHECK(marking->IsMarking() || marking->IsStopped());
|
||||
if (marking->IsStopped()) {
|
||||
@ -1591,8 +1591,8 @@ TEST(TestSizeOfObjects) {
|
||||
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
|
||||
if (collector->IsConcurrentSweepingInProgress()) {
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
if (collector->sweeping_in_progress()) {
|
||||
collector->EnsureSweepingCompleted();
|
||||
}
|
||||
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
|
||||
|
||||
@ -1618,8 +1618,8 @@ TEST(TestSizeOfObjects) {
|
||||
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
|
||||
|
||||
// Waiting for sweeper threads should not change heap size.
|
||||
if (collector->IsConcurrentSweepingInProgress()) {
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
if (collector->sweeping_in_progress()) {
|
||||
collector->EnsureSweepingCompleted();
|
||||
}
|
||||
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user