Respect old space allocation limit in PagedSpace::AllocateRaw.

BUG=v8:3976
LOG=NO

Review URL: https://codereview.chromium.org/1025643002

Cr-Commit-Position: refs/heads/master@{#27364}
This commit is contained in:
ulan 2015-03-23 06:24:07 -07:00 committed by Commit bot
parent 9cbf6c7d7b
commit fdc1745e33
3 changed files with 5 additions and 7 deletions

View File

@ -698,18 +698,12 @@ void Heap::CompletelyClearInstanceofCache() {
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()), daf_(isolate) {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
DCHECK(heap_->always_allocate_scope_depth_ == 0);
heap_->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_depth_--;
DCHECK(heap_->always_allocate_scope_depth_ == 0);
}

View File

@ -1516,6 +1516,10 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(isolate());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);

View File

@ -2618,7 +2618,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
if (object != NULL) return object;
return object;
}
// Try to expand the space and allocate in the new next page.