Simplify old space allocation strategy.
BUG= R=mstarzinger@chromium.org Review URL: https://codereview.chromium.org/258733013 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@21017 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
1a9649ae13
commit
3055867f78
@ -1517,9 +1517,6 @@ void Heap::Scavenge() {
|
||||
|
||||
incremental_marking()->PrepareForScavenge();
|
||||
|
||||
paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
|
||||
paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
|
||||
|
||||
// Flip the semispaces. After flipping, to space is empty, from space has
|
||||
// live objects.
|
||||
new_space_.Flip();
|
||||
|
@ -1549,12 +1549,6 @@ class Heap {
|
||||
return &incremental_marking_;
|
||||
}
|
||||
|
||||
bool EnsureSweepersProgressed(int step_size) {
|
||||
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
|
||||
sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
|
||||
return sweeping_complete;
|
||||
}
|
||||
|
||||
ExternalStringTable* external_string_table() {
|
||||
return &external_string_table_;
|
||||
}
|
||||
|
@ -909,7 +909,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
|
||||
}
|
||||
|
||||
if (state_ == SWEEPING) {
|
||||
if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
|
||||
if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
|
||||
bytes_scanned_ = 0;
|
||||
StartMarking(PREVENT_COMPACTION);
|
||||
}
|
||||
|
@ -627,14 +627,14 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
|
||||
}
|
||||
ParallelSweepSpacesComplete();
|
||||
sweeping_pending_ = false;
|
||||
RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
|
||||
RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
|
||||
RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
|
||||
RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
|
||||
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
|
||||
heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
|
||||
}
|
||||
|
||||
|
||||
intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
|
||||
void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
|
||||
FreeList* free_list;
|
||||
|
||||
if (space == heap()->old_pointer_space()) {
|
||||
@ -644,13 +644,12 @@ intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
|
||||
} else {
|
||||
// Any PagedSpace might invoke RefillFreeLists, so we need to make sure
|
||||
// to only refill them for old data and pointer spaces.
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
|
||||
space->AddToAccountingStats(freed_bytes);
|
||||
space->DecrementUnsweptFreeBytes(freed_bytes);
|
||||
return freed_bytes;
|
||||
}
|
||||
|
||||
|
||||
|
@ -694,7 +694,7 @@ class MarkCompactCollector {
|
||||
|
||||
void WaitUntilSweepingCompleted();
|
||||
|
||||
intptr_t RefillFreeLists(PagedSpace* space);
|
||||
void RefillFreeList(PagedSpace* space);
|
||||
|
||||
bool AreSweeperThreadsActivated();
|
||||
|
||||
@ -713,7 +713,7 @@ class MarkCompactCollector {
|
||||
void MarkWeakObjectToCodeTable();
|
||||
|
||||
// Special case for processing weak references in a full collection. We need
|
||||
// to artifically keep AllocationSites alive for a time.
|
||||
// to artificially keep AllocationSites alive for a time.
|
||||
void MarkAllocationSite(AllocationSite* site);
|
||||
|
||||
private:
|
||||
|
@ -2587,33 +2587,13 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
|
||||
}
|
||||
|
||||
|
||||
bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
if (collector->AreSweeperThreadsActivated()) {
|
||||
if (collector->IsConcurrentSweepingInProgress()) {
|
||||
if (collector->RefillFreeLists(this) < size_in_bytes) {
|
||||
if (!collector->sequential_sweeping()) {
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
// Allocation in this space has failed.
|
||||
|
||||
// If there are unswept pages advance sweeping a bounded number of times
|
||||
// until we find a size_in_bytes contiguous piece of memory
|
||||
const int kMaxSweepingTries = 5;
|
||||
bool sweeping_complete = false;
|
||||
|
||||
for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
|
||||
sweeping_complete = EnsureSweeperProgress(size_in_bytes);
|
||||
// If sweeper threads are active, try to re-fill the free-lists.
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
if (collector->IsConcurrentSweepingInProgress()) {
|
||||
collector->RefillFreeList(this);
|
||||
|
||||
// Retry the free list allocation.
|
||||
HeapObject* object = free_list_.Allocate(size_in_bytes);
|
||||
@ -2634,11 +2614,12 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
return free_list_.Allocate(size_in_bytes);
|
||||
}
|
||||
|
||||
// Last ditch, sweep all the remaining pages to try to find space.
|
||||
if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
|
||||
heap()->mark_compact_collector()->WaitUntilSweepingCompleted();
|
||||
// If sweeper threads are active, wait for them at that point.
|
||||
if (collector->IsConcurrentSweepingInProgress()) {
|
||||
collector->WaitUntilSweepingCompleted();
|
||||
|
||||
// Retry the free list allocation.
|
||||
// After waiting for the sweeper threads, there may be new free-list
|
||||
// entries.
|
||||
HeapObject* object = free_list_.Allocate(size_in_bytes);
|
||||
if (object != NULL) return object;
|
||||
}
|
||||
|
@ -1914,12 +1914,6 @@ class PagedSpace : public Space {
|
||||
unswept_free_bytes_ = 0;
|
||||
}
|
||||
|
||||
// This function tries to steal size_in_bytes memory from the sweeper threads
|
||||
// free-lists. If it does not succeed stealing enough memory, it will wait
|
||||
// for the sweeper threads to finish sweeping.
|
||||
// It returns true when sweeping is completed and false otherwise.
|
||||
bool EnsureSweeperProgress(intptr_t size_in_bytes);
|
||||
|
||||
Page* FirstPage() { return anchor_.next_page(); }
|
||||
Page* LastPage() { return anchor_.prev_page(); }
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user