Reapply r1900, r1897, r1895 with a fix.
When a paged space shrinks by an even multiple of the chunk size, ensure that the cached last page in the space is updated. Review URL: http://codereview.chromium.org/113267 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1944 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
ebbaeb3655
commit
4bc0e7cf8c
136
src/heap.cc
136
src/heap.cc
@ -538,7 +538,7 @@ class ScavengeVisitor: public ObjectVisitor {
|
||||
|
||||
|
||||
// Shared state read by the scavenge collector and set by ScavengeObject.
|
||||
static Address promoted_top = NULL;
|
||||
static Address promoted_rear = NULL;
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -554,24 +554,34 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static void VerifyNonPointerSpacePointers() {
|
||||
// Verify that there are no pointers to new space in spaces where we
|
||||
// do not expect them.
|
||||
VerifyNonPointerSpacePointersVisitor v;
|
||||
HeapObjectIterator code_it(Heap::code_space());
|
||||
while (code_it.has_next()) {
|
||||
HeapObject* object = code_it.next();
|
||||
if (object->IsCode()) {
|
||||
Code::cast(object)->ConvertICTargetsFromAddressToObject();
|
||||
object->Iterate(&v);
|
||||
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
|
||||
} else {
|
||||
// If we find non-code objects in code space (e.g., free list
|
||||
// nodes) we want to verify them as well.
|
||||
object->Iterate(&v);
|
||||
}
|
||||
}
|
||||
|
||||
HeapObjectIterator data_it(Heap::old_data_space());
|
||||
while (data_it.has_next()) data_it.next()->Iterate(&v);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Heap::Scavenge() {
|
||||
#ifdef DEBUG
|
||||
if (FLAG_enable_slow_asserts) {
|
||||
VerifyNonPointerSpacePointersVisitor v;
|
||||
HeapObjectIterator it(code_space_);
|
||||
while (it.has_next()) {
|
||||
HeapObject* object = it.next();
|
||||
if (object->IsCode()) {
|
||||
Code::cast(object)->ConvertICTargetsFromAddressToObject();
|
||||
}
|
||||
object->Iterate(&v);
|
||||
if (object->IsCode()) {
|
||||
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
|
||||
#endif
|
||||
|
||||
gc_state_ = SCAVENGE;
|
||||
@ -596,72 +606,70 @@ void Heap::Scavenge() {
|
||||
new_space_.Flip();
|
||||
new_space_.ResetAllocationInfo();
|
||||
|
||||
// We need to sweep newly copied objects which can be in either the to space
|
||||
// or the old space. For to space objects, we use a mark. Newly copied
|
||||
// objects lie between the mark and the allocation top. For objects
|
||||
// promoted to old space, we write their addresses downward from the top of
|
||||
// the new space. Sweeping newly promoted objects requires an allocation
|
||||
// pointer and a mark. Note that the allocation pointer 'top' actually
|
||||
// moves downward from the high address in the to space.
|
||||
// We need to sweep newly copied objects which can be either in the
|
||||
// to space or promoted to the old generation. For to-space
|
||||
// objects, we treat the bottom of the to space as a queue. Newly
|
||||
// copied and unswept objects lie between a 'front' mark and the
|
||||
// allocation pointer.
|
||||
//
|
||||
// There is guaranteed to be enough room at the top of the to space for the
|
||||
// addresses of promoted objects: every object promoted frees up its size in
|
||||
// bytes from the top of the new space, and objects are at least one pointer
|
||||
// in size. Using the new space to record promoted addresses makes the
|
||||
// scavenge collector agnostic to the allocation strategy (eg, linear or
|
||||
// free-list) used in old space.
|
||||
Address new_mark = new_space_.ToSpaceLow();
|
||||
Address promoted_mark = new_space_.ToSpaceHigh();
|
||||
promoted_top = new_space_.ToSpaceHigh();
|
||||
// Promoted objects can go into various old-generation spaces, and
|
||||
// can be allocated internally in the spaces (from the free list).
|
||||
// We treat the top of the to space as a queue of addresses of
|
||||
// promoted objects. The addresses of newly promoted and unswept
|
||||
// objects lie between a 'front' mark and a 'rear' mark that is
|
||||
// updated as a side effect of promoting an object.
|
||||
//
|
||||
// There is guaranteed to be enough room at the top of the to space
|
||||
// for the addresses of promoted objects: every object promoted
|
||||
// frees up its size in bytes from the top of the new space, and
|
||||
// objects are at least one pointer in size.
|
||||
Address new_space_front = new_space_.ToSpaceLow();
|
||||
Address promoted_front = new_space_.ToSpaceHigh();
|
||||
promoted_rear = new_space_.ToSpaceHigh();
|
||||
|
||||
ScavengeVisitor scavenge_visitor;
|
||||
// Copy roots.
|
||||
IterateRoots(&scavenge_visitor);
|
||||
|
||||
// Copy objects reachable from the old generation. By definition, there
|
||||
// are no intergenerational pointers in code or data spaces.
|
||||
// Copy objects reachable from weak pointers.
|
||||
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
|
||||
|
||||
// Copy objects reachable from the old generation. By definition,
|
||||
// there are no intergenerational pointers in code or data spaces.
|
||||
IterateRSet(old_pointer_space_, &ScavengePointer);
|
||||
IterateRSet(map_space_, &ScavengePointer);
|
||||
lo_space_->IterateRSet(&ScavengePointer);
|
||||
|
||||
bool has_processed_weak_pointers = false;
|
||||
do {
|
||||
ASSERT(new_space_front <= new_space_.top());
|
||||
ASSERT(promoted_front >= promoted_rear);
|
||||
|
||||
while (true) {
|
||||
ASSERT(new_mark <= new_space_.top());
|
||||
ASSERT(promoted_mark >= promoted_top);
|
||||
|
||||
// Copy objects reachable from newly copied objects.
|
||||
while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
|
||||
// Sweep newly copied objects in the to space. The allocation pointer
|
||||
// can change during sweeping.
|
||||
Address previous_top = new_space_.top();
|
||||
SemiSpaceIterator new_it(new_space(), new_mark);
|
||||
while (new_it.has_next()) {
|
||||
new_it.next()->Iterate(&scavenge_visitor);
|
||||
// The addresses new_space_front and new_space_.top() define a
|
||||
// queue of unprocessed copied objects. Process them until the
|
||||
// queue is empty.
|
||||
while (new_space_front < new_space_.top()) {
|
||||
HeapObject* object = HeapObject::FromAddress(new_space_front);
|
||||
object->Iterate(&scavenge_visitor);
|
||||
new_space_front += object->Size();
|
||||
}
|
||||
new_mark = previous_top;
|
||||
|
||||
// Sweep newly copied objects in the old space. The promotion 'top'
|
||||
// pointer could change during sweeping.
|
||||
previous_top = promoted_top;
|
||||
for (Address current = promoted_mark - kPointerSize;
|
||||
current >= previous_top;
|
||||
current -= kPointerSize) {
|
||||
HeapObject* object = HeapObject::cast(Memory::Object_at(current));
|
||||
// The addresses promoted_front and promoted_rear define a queue
|
||||
// of unprocessed addresses of promoted objects. Process them
|
||||
// until the queue is empty.
|
||||
while (promoted_front > promoted_rear) {
|
||||
promoted_front -= kPointerSize;
|
||||
HeapObject* object =
|
||||
HeapObject::cast(Memory::Object_at(promoted_front));
|
||||
object->Iterate(&scavenge_visitor);
|
||||
UpdateRSet(object);
|
||||
}
|
||||
promoted_mark = previous_top;
|
||||
}
|
||||
|
||||
if (has_processed_weak_pointers) break; // We are done.
|
||||
// Copy objects reachable from weak pointers.
|
||||
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
|
||||
has_processed_weak_pointers = true;
|
||||
}
|
||||
// Take another spin if there are now unswept objects in new space
|
||||
// (there are currently no more unswept promoted objects).
|
||||
} while (new_space_front < new_space_.top());
|
||||
|
||||
// Set age mark.
|
||||
new_space_.set_age_mark(new_mark);
|
||||
new_space_.set_age_mark(new_space_.top());
|
||||
|
||||
LOG(ResourceEvent("scavenge", "end"));
|
||||
|
||||
@ -882,8 +890,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
|
||||
if (target_space == Heap::old_pointer_space_) {
|
||||
// Record the object's address at the top of the to space, to allow
|
||||
// it to be swept by the scavenger.
|
||||
promoted_top -= kPointerSize;
|
||||
Memory::Object_at(promoted_top) = *p;
|
||||
promoted_rear -= kPointerSize;
|
||||
Memory::Object_at(promoted_rear) = *p;
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
// Objects promoted to the data space should not have pointers to
|
||||
|
@ -64,15 +64,16 @@ HeapObject* HeapObjectIterator::next() {
|
||||
// PageIterator
|
||||
|
||||
bool PageIterator::has_next() {
|
||||
return cur_page_ != stop_page_;
|
||||
return prev_page_ != stop_page_;
|
||||
}
|
||||
|
||||
|
||||
Page* PageIterator::next() {
|
||||
ASSERT(has_next());
|
||||
Page* result = cur_page_;
|
||||
cur_page_ = cur_page_->next_page();
|
||||
return result;
|
||||
prev_page_ = (prev_page_ == NULL)
|
||||
? space_->first_page_
|
||||
: prev_page_->next_page();
|
||||
return prev_page_;
|
||||
}
|
||||
|
||||
|
||||
|
@ -111,17 +111,26 @@ void HeapObjectIterator::Verify() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// PageIterator
|
||||
|
||||
PageIterator::PageIterator(PagedSpace* space, Mode mode) {
|
||||
cur_page_ = space->first_page_;
|
||||
PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
|
||||
prev_page_ = NULL;
|
||||
switch (mode) {
|
||||
case PAGES_IN_USE:
|
||||
stop_page_ = space->AllocationTopPage()->next_page();
|
||||
stop_page_ = space->AllocationTopPage();
|
||||
break;
|
||||
case PAGES_USED_BY_MC:
|
||||
stop_page_ = space->MCRelocationTopPage()->next_page();
|
||||
stop_page_ = space->MCRelocationTopPage();
|
||||
break;
|
||||
case ALL_PAGES:
|
||||
stop_page_ = Page::FromAddress(NULL);
|
||||
#ifdef DEBUG
|
||||
// Verify that the cached last page in the space is actually the
|
||||
// last page.
|
||||
for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
|
||||
if (!p->next_page()->is_valid()) {
|
||||
ASSERT(space->last_page_ == p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
stop_page_ = space->last_page_;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
@ -496,8 +505,11 @@ bool PagedSpace::Setup(Address start, size_t size) {
|
||||
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
|
||||
ASSERT(Capacity() <= max_capacity_);
|
||||
|
||||
// Sequentially initialize remembered sets in the newly allocated
|
||||
// pages and cache the current last page in the space.
|
||||
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
||||
p->ClearRSet();
|
||||
last_page_ = p;
|
||||
}
|
||||
|
||||
// Use first_page_ for allocation.
|
||||
@ -676,9 +688,11 @@ bool PagedSpace::Expand(Page* last_page) {
|
||||
|
||||
MemoryAllocator::SetNextPage(last_page, p);
|
||||
|
||||
// Clear remembered set of new pages.
|
||||
// Sequentially clear remembered set of new pages and and cache the
|
||||
// new last page in the space.
|
||||
while (p->is_valid()) {
|
||||
p->ClearRSet();
|
||||
last_page_ = p;
|
||||
p = p->next_page();
|
||||
}
|
||||
|
||||
@ -723,10 +737,13 @@ void PagedSpace::Shrink() {
|
||||
Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
|
||||
MemoryAllocator::SetNextPage(last_page_to_keep, p);
|
||||
|
||||
// Since pages are only freed in whole chunks, we may have kept more than
|
||||
// pages_to_keep.
|
||||
// Since pages are only freed in whole chunks, we may have kept more
|
||||
// than pages_to_keep. Count the extra pages and cache the new last
|
||||
// page in the space.
|
||||
last_page_ = last_page_to_keep;
|
||||
while (p->is_valid()) {
|
||||
pages_to_keep++;
|
||||
last_page_ = p;
|
||||
p = p->next_page();
|
||||
}
|
||||
|
||||
|
56
src/spaces.h
56
src/spaces.h
@ -511,11 +511,22 @@ class ObjectIterator : public Malloced {
|
||||
//
|
||||
// A HeapObjectIterator iterates objects from a given address to the
|
||||
// top of a space. The given address must be below the current
|
||||
// allocation pointer (space top). If the space top changes during
|
||||
// iteration (because of allocating new objects), the iterator does
|
||||
// not iterate new objects. The caller function must create a new
|
||||
// allocation pointer (space top). There are some caveats.
|
||||
//
|
||||
// (1) If the space top changes upward during iteration (because of
|
||||
// allocating new objects), the iterator does not iterate objects
|
||||
// above the original space top. The caller must create a new
|
||||
// iterator starting from the old top in order to visit these new
|
||||
// objects. Heap::Scavenage() is such an example.
|
||||
// objects.
|
||||
//
|
||||
// (2) If new objects are allocated below the original allocation top
|
||||
// (e.g., free-list allocation in paged spaces), the new objects
|
||||
// may or may not be iterated depending on their position with
|
||||
// respect to the current point of iteration.
|
||||
//
|
||||
// (3) The space top should not change downward during iteration,
|
||||
// otherwise the iterator will return not-necessarily-valid
|
||||
// objects.
|
||||
|
||||
class HeapObjectIterator: public ObjectIterator {
|
||||
public:
|
||||
@ -559,17 +570,35 @@ class HeapObjectIterator: public ObjectIterator {
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A PageIterator iterates pages in a space.
|
||||
// A PageIterator iterates the pages in a paged space.
|
||||
//
|
||||
// The PageIterator class provides three modes for iterating pages in a space:
|
||||
// PAGES_IN_USE iterates pages that are in use by the allocator;
|
||||
// PAGES_USED_BY_GC iterates pages that hold relocated objects during a
|
||||
// mark-compact collection;
|
||||
// PAGES_IN_USE iterates pages containing allocated objects.
|
||||
// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
|
||||
// mark-compact collection.
|
||||
// ALL_PAGES iterates all pages in the space.
|
||||
//
|
||||
// There are some caveats.
|
||||
//
|
||||
// (1) If the space expands during iteration, new pages will not be
|
||||
// returned by the iterator in any mode.
|
||||
//
|
||||
// (2) If new objects are allocated during iteration, they will appear
|
||||
// in pages returned by the iterator. Allocation may cause the
|
||||
// allocation pointer or MC allocation pointer in the last page to
|
||||
// change between constructing the iterator and iterating the last
|
||||
// page.
|
||||
//
|
||||
// (3) The space should not shrink during iteration, otherwise the
|
||||
// iterator will return deallocated pages.
|
||||
|
||||
class PageIterator BASE_EMBEDDED {
|
||||
public:
|
||||
enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
|
||||
enum Mode {
|
||||
PAGES_IN_USE,
|
||||
PAGES_USED_BY_MC,
|
||||
ALL_PAGES
|
||||
};
|
||||
|
||||
PageIterator(PagedSpace* space, Mode mode);
|
||||
|
||||
@ -577,8 +606,9 @@ class PageIterator BASE_EMBEDDED {
|
||||
inline Page* next();
|
||||
|
||||
private:
|
||||
Page* cur_page_; // next page to return
|
||||
Page* stop_page_; // page where to stop
|
||||
PagedSpace* space_;
|
||||
Page* prev_page_; // Previous page returned.
|
||||
Page* stop_page_; // Page to stop at (last page returned by the iterator).
|
||||
};
|
||||
|
||||
|
||||
@ -809,6 +839,10 @@ class PagedSpace : public Space {
|
||||
// The first page in this space.
|
||||
Page* first_page_;
|
||||
|
||||
// The last page in this space. Initially set in Setup, updated in
|
||||
// Expand and Shrink.
|
||||
Page* last_page_;
|
||||
|
||||
// Normal allocation information.
|
||||
AllocationInfo allocation_info_;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user