Revert r1900, r1897 and r1895 which are all gc changes. The changes
to the page iterator leads to occasional crashes in tests. Review URL: http://codereview.chromium.org/113262 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1915 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
18f69a7171
commit
00addb277a
144
src/heap.cc
144
src/heap.cc
@ -538,7 +538,7 @@ class ScavengeVisitor: public ObjectVisitor {
|
|||||||
|
|
||||||
|
|
||||||
// Shared state read by the scavenge collector and set by ScavengeObject.
|
// Shared state read by the scavenge collector and set by ScavengeObject.
|
||||||
static Address promoted_rear = NULL;
|
static Address promoted_top = NULL;
|
||||||
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
@ -554,34 +554,24 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static void VerifyNonPointerSpacePointers() {
|
|
||||||
// Verify that there are no pointers to new space in spaces where we
|
|
||||||
// do not expect them.
|
|
||||||
VerifyNonPointerSpacePointersVisitor v;
|
|
||||||
HeapObjectIterator code_it(Heap::code_space());
|
|
||||||
while (code_it.has_next()) {
|
|
||||||
HeapObject* object = code_it.next();
|
|
||||||
if (object->IsCode()) {
|
|
||||||
Code::cast(object)->ConvertICTargetsFromAddressToObject();
|
|
||||||
object->Iterate(&v);
|
|
||||||
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
|
|
||||||
} else {
|
|
||||||
// If we find non-code objects in code space (e.g., free list
|
|
||||||
// nodes) we want to verify them as well.
|
|
||||||
object->Iterate(&v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapObjectIterator data_it(Heap::old_data_space());
|
|
||||||
while (data_it.has_next()) data_it.next()->Iterate(&v);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void Heap::Scavenge() {
|
void Heap::Scavenge() {
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
|
if (FLAG_enable_slow_asserts) {
|
||||||
|
VerifyNonPointerSpacePointersVisitor v;
|
||||||
|
HeapObjectIterator it(code_space_);
|
||||||
|
while (it.has_next()) {
|
||||||
|
HeapObject* object = it.next();
|
||||||
|
if (object->IsCode()) {
|
||||||
|
Code::cast(object)->ConvertICTargetsFromAddressToObject();
|
||||||
|
}
|
||||||
|
object->Iterate(&v);
|
||||||
|
if (object->IsCode()) {
|
||||||
|
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
gc_state_ = SCAVENGE;
|
gc_state_ = SCAVENGE;
|
||||||
@ -606,70 +596,72 @@ void Heap::Scavenge() {
|
|||||||
new_space_.Flip();
|
new_space_.Flip();
|
||||||
new_space_.ResetAllocationInfo();
|
new_space_.ResetAllocationInfo();
|
||||||
|
|
||||||
// We need to sweep newly copied objects which can be either in the
|
// We need to sweep newly copied objects which can be in either the to space
|
||||||
// to space or promoted to the old generation. For to-space
|
// or the old space. For to space objects, we use a mark. Newly copied
|
||||||
// objects, we treat the bottom of the to space as a queue. Newly
|
// objects lie between the mark and the allocation top. For objects
|
||||||
// copied and unswept objects lie between a 'front' mark and the
|
// promoted to old space, we write their addresses downward from the top of
|
||||||
// allocation pointer.
|
// the new space. Sweeping newly promoted objects requires an allocation
|
||||||
|
// pointer and a mark. Note that the allocation pointer 'top' actually
|
||||||
|
// moves downward from the high address in the to space.
|
||||||
//
|
//
|
||||||
// Promoted objects can go into various old-generation spaces, and
|
// There is guaranteed to be enough room at the top of the to space for the
|
||||||
// can be allocated internally in the spaces (from the free list).
|
// addresses of promoted objects: every object promoted frees up its size in
|
||||||
// We treat the top of the to space as a queue of addresses of
|
// bytes from the top of the new space, and objects are at least one pointer
|
||||||
// promoted objects. The addresses of newly promoted and unswept
|
// in size. Using the new space to record promoted addresses makes the
|
||||||
// objects lie between a 'front' mark and a 'rear' mark that is
|
// scavenge collector agnostic to the allocation strategy (eg, linear or
|
||||||
// updated as a side effect of promoting an object.
|
// free-list) used in old space.
|
||||||
//
|
Address new_mark = new_space_.ToSpaceLow();
|
||||||
// There is guaranteed to be enough room at the top of the to space
|
Address promoted_mark = new_space_.ToSpaceHigh();
|
||||||
// for the addresses of promoted objects: every object promoted
|
promoted_top = new_space_.ToSpaceHigh();
|
||||||
// frees up its size in bytes from the top of the new space, and
|
|
||||||
// objects are at least one pointer in size.
|
|
||||||
Address new_space_front = new_space_.ToSpaceLow();
|
|
||||||
Address promoted_front = new_space_.ToSpaceHigh();
|
|
||||||
promoted_rear = new_space_.ToSpaceHigh();
|
|
||||||
|
|
||||||
ScavengeVisitor scavenge_visitor;
|
ScavengeVisitor scavenge_visitor;
|
||||||
// Copy roots.
|
// Copy roots.
|
||||||
IterateRoots(&scavenge_visitor);
|
IterateRoots(&scavenge_visitor);
|
||||||
|
|
||||||
// Copy objects reachable from weak pointers.
|
// Copy objects reachable from the old generation. By definition, there
|
||||||
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
|
// are no intergenerational pointers in code or data spaces.
|
||||||
|
|
||||||
// Copy objects reachable from the old generation. By definition,
|
|
||||||
// there are no intergenerational pointers in code or data spaces.
|
|
||||||
IterateRSet(old_pointer_space_, &ScavengePointer);
|
IterateRSet(old_pointer_space_, &ScavengePointer);
|
||||||
IterateRSet(map_space_, &ScavengePointer);
|
IterateRSet(map_space_, &ScavengePointer);
|
||||||
lo_space_->IterateRSet(&ScavengePointer);
|
lo_space_->IterateRSet(&ScavengePointer);
|
||||||
|
|
||||||
do {
|
bool has_processed_weak_pointers = false;
|
||||||
ASSERT(new_space_front <= new_space_.top());
|
|
||||||
ASSERT(promoted_front >= promoted_rear);
|
|
||||||
|
|
||||||
// The addresses new_space_front and new_space_.top() define a
|
while (true) {
|
||||||
// queue of unprocessed copied objects. Process them until the
|
ASSERT(new_mark <= new_space_.top());
|
||||||
// queue is empty.
|
ASSERT(promoted_mark >= promoted_top);
|
||||||
while (new_space_front < new_space_.top()) {
|
|
||||||
HeapObject* object = HeapObject::FromAddress(new_space_front);
|
// Copy objects reachable from newly copied objects.
|
||||||
object->Iterate(&scavenge_visitor);
|
while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
|
||||||
new_space_front += object->Size();
|
// Sweep newly copied objects in the to space. The allocation pointer
|
||||||
|
// can change during sweeping.
|
||||||
|
Address previous_top = new_space_.top();
|
||||||
|
SemiSpaceIterator new_it(new_space(), new_mark);
|
||||||
|
while (new_it.has_next()) {
|
||||||
|
new_it.next()->Iterate(&scavenge_visitor);
|
||||||
|
}
|
||||||
|
new_mark = previous_top;
|
||||||
|
|
||||||
|
// Sweep newly copied objects in the old space. The promotion 'top'
|
||||||
|
// pointer could change during sweeping.
|
||||||
|
previous_top = promoted_top;
|
||||||
|
for (Address current = promoted_mark - kPointerSize;
|
||||||
|
current >= previous_top;
|
||||||
|
current -= kPointerSize) {
|
||||||
|
HeapObject* object = HeapObject::cast(Memory::Object_at(current));
|
||||||
|
object->Iterate(&scavenge_visitor);
|
||||||
|
UpdateRSet(object);
|
||||||
|
}
|
||||||
|
promoted_mark = previous_top;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The addresses promoted_front and promoted_rear define a queue
|
if (has_processed_weak_pointers) break; // We are done.
|
||||||
// of unprocessed addresses of promoted objects. Process them
|
// Copy objects reachable from weak pointers.
|
||||||
// until the queue is empty.
|
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
|
||||||
while (promoted_front > promoted_rear) {
|
has_processed_weak_pointers = true;
|
||||||
promoted_front -= kPointerSize;
|
}
|
||||||
HeapObject* object =
|
|
||||||
HeapObject::cast(Memory::Object_at(promoted_front));
|
|
||||||
object->Iterate(&scavenge_visitor);
|
|
||||||
UpdateRSet(object);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take another spin if there are now unswept objects in new space
|
|
||||||
// (there are currently no more unswept promoted objects).
|
|
||||||
} while (new_space_front < new_space_.top());
|
|
||||||
|
|
||||||
// Set age mark.
|
// Set age mark.
|
||||||
new_space_.set_age_mark(new_space_.top());
|
new_space_.set_age_mark(new_mark);
|
||||||
|
|
||||||
LOG(ResourceEvent("scavenge", "end"));
|
LOG(ResourceEvent("scavenge", "end"));
|
||||||
|
|
||||||
@ -890,8 +882,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
|
|||||||
if (target_space == Heap::old_pointer_space_) {
|
if (target_space == Heap::old_pointer_space_) {
|
||||||
// Record the object's address at the top of the to space, to allow
|
// Record the object's address at the top of the to space, to allow
|
||||||
// it to be swept by the scavenger.
|
// it to be swept by the scavenger.
|
||||||
promoted_rear -= kPointerSize;
|
promoted_top -= kPointerSize;
|
||||||
Memory::Object_at(promoted_rear) = *p;
|
Memory::Object_at(promoted_top) = *p;
|
||||||
} else {
|
} else {
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
// Objects promoted to the data space should not have pointers to
|
// Objects promoted to the data space should not have pointers to
|
||||||
|
@ -64,16 +64,15 @@ HeapObject* HeapObjectIterator::next() {
|
|||||||
// PageIterator
|
// PageIterator
|
||||||
|
|
||||||
bool PageIterator::has_next() {
|
bool PageIterator::has_next() {
|
||||||
return prev_page_ != stop_page_;
|
return cur_page_ != stop_page_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Page* PageIterator::next() {
|
Page* PageIterator::next() {
|
||||||
ASSERT(has_next());
|
ASSERT(has_next());
|
||||||
prev_page_ = (prev_page_ == NULL)
|
Page* result = cur_page_;
|
||||||
? space_->first_page_
|
cur_page_ = cur_page_->next_page();
|
||||||
: prev_page_->next_page();
|
return result;
|
||||||
return prev_page_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -111,17 +111,17 @@ void HeapObjectIterator::Verify() {
|
|||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// PageIterator
|
// PageIterator
|
||||||
|
|
||||||
PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
|
PageIterator::PageIterator(PagedSpace* space, Mode mode) {
|
||||||
prev_page_ = NULL;
|
cur_page_ = space->first_page_;
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case PAGES_IN_USE:
|
case PAGES_IN_USE:
|
||||||
stop_page_ = space->AllocationTopPage();
|
stop_page_ = space->AllocationTopPage()->next_page();
|
||||||
break;
|
break;
|
||||||
case PAGES_USED_BY_MC:
|
case PAGES_USED_BY_MC:
|
||||||
stop_page_ = space->MCRelocationTopPage();
|
stop_page_ = space->MCRelocationTopPage()->next_page();
|
||||||
break;
|
break;
|
||||||
case ALL_PAGES:
|
case ALL_PAGES:
|
||||||
stop_page_ = space->last_page_;
|
stop_page_ = Page::FromAddress(NULL);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -496,11 +496,8 @@ bool PagedSpace::Setup(Address start, size_t size) {
|
|||||||
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
|
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
|
||||||
ASSERT(Capacity() <= max_capacity_);
|
ASSERT(Capacity() <= max_capacity_);
|
||||||
|
|
||||||
// Sequentially initialize remembered sets in the newly allocated
|
|
||||||
// pages and cache the current last page in the space.
|
|
||||||
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
||||||
p->ClearRSet();
|
p->ClearRSet();
|
||||||
last_page_ = p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use first_page_ for allocation.
|
// Use first_page_ for allocation.
|
||||||
@ -679,11 +676,9 @@ bool PagedSpace::Expand(Page* last_page) {
|
|||||||
|
|
||||||
MemoryAllocator::SetNextPage(last_page, p);
|
MemoryAllocator::SetNextPage(last_page, p);
|
||||||
|
|
||||||
// Sequentially clear remembered set of new pages and and cache the
|
// Clear remembered set of new pages.
|
||||||
// new last page in the space.
|
|
||||||
while (p->is_valid()) {
|
while (p->is_valid()) {
|
||||||
p->ClearRSet();
|
p->ClearRSet();
|
||||||
last_page_ = p;
|
|
||||||
p = p->next_page();
|
p = p->next_page();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -728,12 +723,10 @@ void PagedSpace::Shrink() {
|
|||||||
Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
|
Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
|
||||||
MemoryAllocator::SetNextPage(last_page_to_keep, p);
|
MemoryAllocator::SetNextPage(last_page_to_keep, p);
|
||||||
|
|
||||||
// Since pages are only freed in whole chunks, we may have kept more
|
// Since pages are only freed in whole chunks, we may have kept more than
|
||||||
// than pages_to_keep. Count the extra pages and cache the new last
|
// pages_to_keep.
|
||||||
// page in the space.
|
|
||||||
while (p->is_valid()) {
|
while (p->is_valid()) {
|
||||||
pages_to_keep++;
|
pages_to_keep++;
|
||||||
last_page_ = p;
|
|
||||||
p = p->next_page();
|
p = p->next_page();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
58
src/spaces.h
58
src/spaces.h
@ -511,22 +511,11 @@ class ObjectIterator : public Malloced {
|
|||||||
//
|
//
|
||||||
// A HeapObjectIterator iterates objects from a given address to the
|
// A HeapObjectIterator iterates objects from a given address to the
|
||||||
// top of a space. The given address must be below the current
|
// top of a space. The given address must be below the current
|
||||||
// allocation pointer (space top). There are some caveats.
|
// allocation pointer (space top). If the space top changes during
|
||||||
//
|
// iteration (because of allocating new objects), the iterator does
|
||||||
// (1) If the space top changes upward during iteration (because of
|
// not iterate new objects. The caller function must create a new
|
||||||
// allocating new objects), the iterator does not iterate objects
|
// iterator starting from the old top in order to visit these new
|
||||||
// above the original space top. The caller must create a new
|
// objects. Heap::Scavenage() is such an example.
|
||||||
// iterator starting from the old top in order to visit these new
|
|
||||||
// objects.
|
|
||||||
//
|
|
||||||
// (2) If new objects are allocated below the original allocation top
|
|
||||||
// (e.g., free-list allocation in paged spaces), the new objects
|
|
||||||
// may or may not be iterated depending on their position with
|
|
||||||
// respect to the current point of iteration.
|
|
||||||
//
|
|
||||||
// (3) The space top should not change downward during iteration,
|
|
||||||
// otherwise the iterator will return not-necessarily-valid
|
|
||||||
// objects.
|
|
||||||
|
|
||||||
class HeapObjectIterator: public ObjectIterator {
|
class HeapObjectIterator: public ObjectIterator {
|
||||||
public:
|
public:
|
||||||
@ -570,35 +559,17 @@ class HeapObjectIterator: public ObjectIterator {
|
|||||||
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// A PageIterator iterates the pages in a paged space.
|
// A PageIterator iterates pages in a space.
|
||||||
//
|
//
|
||||||
// The PageIterator class provides three modes for iterating pages in a space:
|
// The PageIterator class provides three modes for iterating pages in a space:
|
||||||
// PAGES_IN_USE iterates pages containing allocated objects.
|
// PAGES_IN_USE iterates pages that are in use by the allocator;
|
||||||
// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
|
// PAGES_USED_BY_GC iterates pages that hold relocated objects during a
|
||||||
// mark-compact collection.
|
// mark-compact collection;
|
||||||
// ALL_PAGES iterates all pages in the space.
|
// ALL_PAGES iterates all pages in the space.
|
||||||
//
|
|
||||||
// There are some caveats.
|
|
||||||
//
|
|
||||||
// (1) If the space expands during iteration, new pages will not be
|
|
||||||
// returned by the iterator in any mode.
|
|
||||||
//
|
|
||||||
// (2) If new objects are allocated during iteration, they will appear
|
|
||||||
// in pages returned by the iterator. Allocation may cause the
|
|
||||||
// allocation pointer or MC allocation pointer in the last page to
|
|
||||||
// change between constructing the iterator and iterating the last
|
|
||||||
// page.
|
|
||||||
//
|
|
||||||
// (3) The space should not shrink during iteration, otherwise the
|
|
||||||
// iterator will return deallocated pages.
|
|
||||||
|
|
||||||
class PageIterator BASE_EMBEDDED {
|
class PageIterator BASE_EMBEDDED {
|
||||||
public:
|
public:
|
||||||
enum Mode {
|
enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
|
||||||
PAGES_IN_USE,
|
|
||||||
PAGES_USED_BY_MC,
|
|
||||||
ALL_PAGES
|
|
||||||
};
|
|
||||||
|
|
||||||
PageIterator(PagedSpace* space, Mode mode);
|
PageIterator(PagedSpace* space, Mode mode);
|
||||||
|
|
||||||
@ -606,9 +577,8 @@ class PageIterator BASE_EMBEDDED {
|
|||||||
inline Page* next();
|
inline Page* next();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
PagedSpace* space_;
|
Page* cur_page_; // next page to return
|
||||||
Page* prev_page_; // Previous page returned.
|
Page* stop_page_; // page where to stop
|
||||||
Page* stop_page_; // Page to stop at (last page returned by the iterator).
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -839,10 +809,6 @@ class PagedSpace : public Space {
|
|||||||
// The first page in this space.
|
// The first page in this space.
|
||||||
Page* first_page_;
|
Page* first_page_;
|
||||||
|
|
||||||
// The last page in this space. Initially set in Setup, updated in
|
|
||||||
// Expand and Shrink.
|
|
||||||
Page* last_page_;
|
|
||||||
|
|
||||||
// Normal allocation information.
|
// Normal allocation information.
|
||||||
AllocationInfo allocation_info_;
|
AllocationInfo allocation_info_;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user