Use just one to-space page for the promotion queue.

BUG=454725
LOG=n

Review URL: https://codereview.chromium.org/919473008

Cr-Commit-Position: refs/heads/master@{#26577}
This commit is contained in:
hpayer 2015-02-11 05:39:29 -08:00 committed by Commit bot
parent 31637fb396
commit c889fb4c1d
4 changed files with 59 additions and 17 deletions

View File

@ -27,13 +27,6 @@ void PromotionQueue::insert(HeapObject* target, int size) {
return;
}
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
DCHECK(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
}
if ((rear_ - 2) < limit_) {
RelocateQueueHead();
emergency_stack_->Add(Entry(target, size));

View File

@ -1457,14 +1457,14 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
void PromotionQueue::Initialize() {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0);
limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<intptr_t*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
emergency_stack_ = NULL;
}

View File

@ -400,6 +400,9 @@ class StoreBufferRebuilder {
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
// The last page in to-space is used for the promotion queue. On conflict
// during scavenge, the promotion queue is allocated externally and all
// entries are copied to the external queue.
class PromotionQueue {
public:
explicit PromotionQueue(Heap* heap)
@ -422,6 +425,12 @@ class PromotionQueue {
}
void SetNewLimit(Address limit) {
// If we are already using an emergency stack, we can ignore it.
if (emergency_stack_) return;
// If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<intptr_t*>(limit);
if (limit_ <= rear_) {
@ -432,6 +441,10 @@ class PromotionQueue {
}
bool IsBelowPromotionQueue(Address to_space_top) {
// If an emergency stack is used, the to-space address cannot interfere
// with the promotion queue.
if (emergency_stack_) return true;
// If the given to-space top pointer and the head of the promotion queue
// are not on the same page, then the to-space objects are below the
// promotion queue.
@ -459,12 +472,6 @@ class PromotionQueue {
return;
}
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
DCHECK(!front_page->prev_page()->is_anchor());
front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
// Assert no underflow.

View File

@ -0,0 +1,42 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --expose-gc
var __v_9 = {};
var depth = 15;
var current = 0;
function __f_15(__v_3) {
if ((__v_3 % 50) != 0) {
return __v_3;
} else {
return __v_9 + 0.5;
}
}
function __f_13(a) {
a[100000 - 2] = 1;
for (var __v_3= 0; __v_3 < 70000; ++__v_3 ) {
a[__v_3] = __f_15(__v_3);
}
}
function __f_2(size) {
}
var tmp;
function __f_18(allocator) {
current++;
if (current == depth) return;
var __v_7 = new allocator(100000);
__f_13(__v_7);
var __v_4 = 6;
for (var __v_3= 0; __v_3 < 70000; __v_3 += 501 ) {
tmp += __v_3;
}
__f_18(Array);
current--;
}
gc();
__f_18(__f_2);