Make heap size estimation more accurate.

This improves the heap size estimation by not counting lazy swept pages
as completely allocated but use their live bytes counter instead.

R=vegorov@chromium.org
BUG=v8:1893
TEST=cctest/test-heap/TestSizeOfObjects

Review URL: http://codereview.chromium.org/9173001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10383 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
mstarzinger@chromium.org 2012-01-11 10:33:34 +00:00
parent befd149ef8
commit 83d912f0e4
4 changed files with 61 additions and 7 deletions

View File

@ -3641,6 +3641,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
reinterpret_cast<intptr_t>(p));
}
space->MarkPageForLazySweeping(p);
continue;
}

View File

@ -658,7 +658,8 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
first_unswept_page_(Page::FromAddress(NULL)) {
first_unswept_page_(Page::FromAddress(NULL)),
unswept_free_bytes_(0) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@ -2062,6 +2063,7 @@ void PagedSpace::PrepareForMarkCompact() {
} while (p != anchor());
}
first_unswept_page_ = Page::FromAddress(NULL);
unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@ -2110,6 +2112,7 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
reinterpret_cast<intptr_t>(p));
}
unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
p = next_page;

View File

@ -1469,9 +1469,12 @@ class PagedSpace : public Space {
// linear allocation area (between top and limit) are also counted here.
virtual intptr_t Size() { return accounting_stats_.Size(); }
// As size, but the bytes in the current linear allocation area are not
// included.
virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
virtual intptr_t SizeOfObjects() {
ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation. They do not include the
@ -1479,9 +1482,7 @@ class PagedSpace : public Space {
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
Address top() {
return allocation_info_.top;
}
Address top() { return allocation_info_.top; }
Address limit() { return allocation_info_.limit; }
// Allocate the requested number of bytes in the space if possible, return a
@ -1557,10 +1558,15 @@ class PagedSpace : public Space {
}
void SetPagesToSweep(Page* first) {
ASSERT(unswept_free_bytes_ == 0);
if (first == &anchor_) first = NULL;
first_unswept_page_ = first;
}
void MarkPageForLazySweeping(Page* p) {
unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
bool IsSweepingComplete() {
@ -1647,8 +1653,15 @@ class PagedSpace : public Space {
bool was_swept_conservatively_;
// The first page to be swept when the lazy sweeper advances. Is set
// to NULL when all pages have been swept.
Page* first_unswept_page_;
// The number of free bytes which could be reclaimed by advancing the
// lazy sweeper. This is only an estimation because lazy sweeping is
// done conservatively.
intptr_t unswept_free_bytes_;
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.

View File

@ -1187,6 +1187,43 @@ TEST(TestInternalWeakListsTraverseWithGC) {
}
TEST(TestSizeOfObjects) {
v8::V8::Initialize();
// Get initial heap size after several full GCs, which will stabilize
// the heap size and return with sweeping finished completely.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(HEAP->old_pointer_space()->IsSweepingComplete());
intptr_t initial_size = HEAP->SizeOfObjects();
{
// Allocate objects on several different old-space pages so that
// lazy sweeping kicks in for subsequent GC runs.
AlwaysAllocateScope always_allocate;
intptr_t filler_size = FixedArray::SizeFor(8192);
for (int i = 1; i <= 100; i++) {
HEAP->AllocateFixedArray(8192, TENURED)->ToObjectChecked();
CHECK_EQ(initial_size + i * filler_size, HEAP->SizeOfObjects());
}
}
// The heap size should go back to initial size after a full GC, even
// though sweeping didn't finish yet.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(!HEAP->old_pointer_space()->IsSweepingComplete());
CHECK_EQ(initial_size, HEAP->SizeOfObjects());
// Advancing the sweeper step-wise should not change the heap size.
while (!HEAP->old_pointer_space()->IsSweepingComplete()) {
HEAP->old_pointer_space()->AdvanceSweeper(KB);
CHECK_EQ(initial_size, HEAP->SizeOfObjects());
}
}
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
InitializeVM();
HEAP->EnsureHeapIsIterable();