Force relinking of paged space if first attempt to recommit from space fails.

That could improve chances for commit success as currently,
if we moved free pages out of order, we cannot shrink spaces.
However, when we experience problems commiting from space back, we should
use most of resources at our disposal.

Also get rid of currently unused parameter to DeallocateFunction.

Review URL: http://codereview.chromium.org/3260001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5372 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
antonm@chromium.org 2010-08-30 12:34:32 +00:00
parent 79a219cf31
commit 19dc35c13f
5 changed files with 134 additions and 172 deletions

View File

@ -568,6 +568,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed. // Committing memory to from space failed.
// Try shrinking and try again. // Try shrinking and try again.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->RelinkPageListInChunkOrder(true);
}
Shrink(); Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return; if (new_space_.CommitFromSpaceIfNeeded()) return;

View File

@ -1629,7 +1629,7 @@ static void SweepNewSpace(NewSpace* space) {
} }
static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE); PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences // During sweeping of paged space we are trying to find longest sequences
@ -1670,10 +1670,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count(); MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live. if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, space->DeallocateBlock(free_start,
static_cast<int>(current - free_start), static_cast<int>(current - free_start),
true, true);
false);
is_previous_alive = true; is_previous_alive = true;
} }
} else { } else {
@ -1703,7 +1702,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list. // without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) { if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false, true); space->DeallocateBlock(free_start, size_in_bytes, false);
} }
} }
} else { } else {
@ -1719,7 +1718,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (last_free_size > 0) { if (last_free_size > 0) {
Page::FromAddress(last_free_start)-> Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start); SetAllocationWatermark(last_free_start);
dealloc(last_free_start, last_free_size, true, true); space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL; last_free_start = NULL;
last_free_size = 0; last_free_size = 0;
} }
@ -1750,7 +1749,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page. // There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation // Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area. // top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false, true); space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start; new_allocation_top = last_free_start;
} }
@ -1771,61 +1770,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
} }
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::EncodeForwardingAddresses() { void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be // Objects in the active semispace of the young generation may be
@ -2090,14 +2034,14 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and // the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for // the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects. // non-live objects.
SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); SweepSpace(Heap::old_pointer_space());
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); SweepSpace(Heap::old_data_space());
SweepSpace(Heap::code_space(), &DeallocateCodeBlock); SweepSpace(Heap::code_space());
SweepSpace(Heap::cell_space(), &DeallocateCellBlock); SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space()); SweepNewSpace(Heap::new_space());
} }
SweepSpace(Heap::map_space(), &DeallocateMapBlock); SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(), Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion, &Heap::IteratePointersInDirtyMapsRegion,

View File

@ -36,15 +36,6 @@ namespace internal {
// to the first live object in the page (only used for old and map objects). // to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function for non-live blocks in the old generation.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
// Forward declarations. // Forward declarations.
class RootMarkingVisitor; class RootMarkingVisitor;
class MarkingVisitor; class MarkingVisitor;
@ -329,33 +320,6 @@ class MarkCompactCollector: public AllStatic {
static int IterateLiveObjectsInRange(Address start, Address end, static int IterateLiveObjectsInRange(Address start, Address end,
HeapObjectCallback size_func); HeapObjectCallback size_func);
// Callback functions for deallocating non-live blocks in the old
// generation.
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
// If we are not compacting the heap, we simply sweep the spaces except // If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked // for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list. // regions to each space's free list.

View File

@ -2002,76 +2002,87 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
} }
void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
const bool add_to_freelist = true;
// Mark used and unused pages to properly fill unused pages
// after reordering.
PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage();
bool in_use = true;
while (all_pages_iterator.has_next()) {
Page* p = all_pages_iterator.next();
p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) {
// We passed a page containing allocation top. All consequent
// pages are not used.
in_use = false;
}
}
if (page_list_is_chunk_ordered_) return;
Page* new_last_in_use = Page::FromAddress(NULL);
MemoryAllocator::RelinkPageListInChunkOrder(this,
&first_page_,
&last_page_,
&new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
if (size_in_bytes > 0) {
Address start = last_in_use->AllocationTop();
if (deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
Heap::CreateFillerObjectAt(start, size_in_bytes);
}
}
// New last in use page was in the middle of the list before
// sorting so it full.
SetTop(new_last_in_use->AllocationTop());
ASSERT(AllocationTopPage() == new_last_in_use);
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
}
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
while (pages_in_use_iterator.has_next()) {
Page* p = pages_in_use_iterator.next();
if (!p->WasInUseBeforeMC()) {
// Empty page is in the middle of a sequence of used pages.
// Allocate it as a whole and deallocate immediately.
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
p->SetAllocationWatermark(p->ObjectAreaStart());
Address start = p->ObjectAreaStart();
if (deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
Heap::CreateFillerObjectAt(start, size_in_bytes);
}
}
}
page_list_is_chunk_ordered_ = true;
}
void PagedSpace::PrepareForMarkCompact(bool will_compact) { void PagedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) { if (will_compact) {
// MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag RelinkPageListInChunkOrder(false);
// to skip unused pages. Update flag value for all pages in space.
PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage();
bool in_use = true;
while (all_pages_iterator.has_next()) {
Page* p = all_pages_iterator.next();
p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) {
// We passed a page containing allocation top. All consequent
// pages are not used.
in_use = false;
}
}
if (!page_list_is_chunk_ordered_) {
Page* new_last_in_use = Page::FromAddress(NULL);
MemoryAllocator::RelinkPageListInChunkOrder(this,
&first_page_,
&last_page_,
&new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
if (size_in_bytes > 0) {
// There is still some space left on this page. Create a fake
// object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page
// correctly.
Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
size_in_bytes);
}
// New last in use page was in the middle of the list before
// sorting so it full.
SetTop(new_last_in_use->AllocationTop());
ASSERT(AllocationTopPage() == new_last_in_use);
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
}
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
while (pages_in_use_iterator.has_next()) {
Page* p = pages_in_use_iterator.next();
if (!p->WasInUseBeforeMC()) {
// Empty page is in the middle of a sequence of used pages.
// Create a fake object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page correctly.
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
p->SetAllocationWatermark(p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
}
page_list_is_chunk_ordered_ = true;
}
} }
} }
@ -2207,6 +2218,13 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
} }
void OldSpace::DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
Free(start, size_in_bytes, add_to_freelist);
}
#ifdef DEBUG #ifdef DEBUG
struct CommentStatistic { struct CommentStatistic {
const char* comment; const char* comment;
@ -2481,6 +2499,21 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
} }
void FixedSpace::DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
// Free-list elements in fixed space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Free(a, add_to_freelist);
}
}
#ifdef DEBUG #ifdef DEBUG
void FixedSpace::ReportStatistics() { void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity(); int pct = Available() * 100 / Capacity();

View File

@ -1040,6 +1040,11 @@ class PagedSpace : public Space {
// Freed pages are moved to the end of page list. // Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last); void FreePages(Page* prev, Page* last);
// Deallocates a block.
virtual void DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist) = 0;
// Set space allocation info. // Set space allocation info.
void SetTop(Address top) { void SetTop(Address top) {
allocation_info_.top = top; allocation_info_.top = top;
@ -1098,6 +1103,8 @@ class PagedSpace : public Space {
// Returns the page of the allocation pointer. // Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); } Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
void RelinkPageListInChunkOrder(bool deallocate_blocks);
protected: protected:
// Maximum capacity of this space. // Maximum capacity of this space.
int max_capacity_; int max_capacity_;
@ -1815,6 +1822,10 @@ class OldSpace : public PagedSpace {
} }
} }
virtual void DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
// Prepare for full garbage collection. Resets the relocation pointer and // Prepare for full garbage collection. Resets the relocation pointer and
// clears the free list. // clears the free list.
virtual void PrepareForMarkCompact(bool will_compact); virtual void PrepareForMarkCompact(bool will_compact);
@ -1889,6 +1900,9 @@ class FixedSpace : public PagedSpace {
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
virtual void DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
#ifdef DEBUG #ifdef DEBUG
// Reports statistic info of the space // Reports statistic info of the space
void ReportStatistics(); void ReportStatistics();