Waiting for sweeper threads is last resort in SlowAllocateRaw.

BUG=
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/356403002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22090 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
hpayer@chromium.org 2014-06-30 12:39:06 +00:00
parent e1d80e2858
commit fd45684878
2 changed files with 30 additions and 16 deletions

View File

@ -2577,6 +2577,22 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
// If sweeper threads are still running, wait for them.
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
return free_list_.Allocate(size_in_bytes);
}
return NULL;
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
@ -2593,29 +2609,24 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
if (!heap()->always_allocate() &&
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point.
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (!heap()->always_allocate()
&& heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
if (object != NULL) return object;
}
return NULL;
}
// Try to expand the space and allocate in the new next page.
if (Expand()) {
ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
return free_list_.Allocate(size_in_bytes);
}
// Finally, fail.
return NULL;
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
}

View File

@ -2007,8 +2007,11 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
MUST_USE_RESULT HeapObject*
WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class MarkCompactCollector;