[heap] Clean up stale store buffer entries for aborted pages.

Fix the missed store buffer entries for live objects on aborted pages. Marking
the page as scan_on_scavenge takes care of rebuilding the entries. Note that
this requires an additional case in the rebuilding logic as we cannot iterate an
aborted pages using the object layout, but rather have to use mark bits for
this.

BUG=chromium:524425, chromium:564498
LOG=N

Review URL: https://codereview.chromium.org/1497883003

Cr-Commit-Position: refs/heads/master@{#32610}
This commit is contained in:
mlippautz 2015-12-04 04:10:54 -08:00 committed by Commit bot
parent b12db2574d
commit 26fcd8300b
3 changed files with 22 additions and 10 deletions

View File

@ -3292,8 +3292,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// happens upon moving (which we potentially didn't do).
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
// - Mark them for rescanning for store buffer entries as we otherwise
// might have stale store buffer entries that become "valid" again
// after reusing the memory. Note that all existing store buffer
// entries of such pages are filtered before rescanning.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
p->set_scan_on_scavenge(true);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
@ -3705,9 +3710,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// First pass on aborted pages, fixing up all live objects.
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// Clearing the evacuation candidate flag here has the effect of
// stopping recording of slots for it in the following pointer
// update phases.
p->ClearEvacuationCandidate();
VisitLiveObjects(p, &updating_visitor);
}

View File

@ -797,6 +797,7 @@ class MarkCompactCollector {
base::Semaphore pending_compaction_tasks_semaphore_;
friend class Heap;
friend class StoreBuffer;
};

View File

@ -490,13 +490,22 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
}
} else {
heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
page);
HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
heap_object->IterateBody(&visitor);
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// Aborted pages require iterating using mark bits because they
// don't have an iterable object layout before sweeping (which can
// only happen later). Note that we can never reach an
// aborted page through the scavenger.
DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
heap_->mark_compact_collector()->VisitLiveObjects(page, &visitor);
} else {
heap_->mark_compact_collector()
->SweepOrWaitUntilSweepingCompleted(page);
HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next();
heap_object != nullptr; heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
heap_object->IterateBody(&visitor);
}
}
}
}