[heap] Refactor live object visitation

- Restructure the methods that are not allowed to fail to avoid a branch.
- Undo compaction changes for the reland as they require further investigation

This reverts commit 7dea0f7b38.

Bug: chromium:651354
Change-Id: I93e8601bcdec534f41f8e27fd83848f8ef0f1244
Reviewed-on: https://chromium-review.googlesource.com/549462
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46259}
This commit is contained in:
Michael Lippautz 2017-06-27 15:46:24 +02:00 committed by Commit Bot
parent 3c41aba2ab
commit ab028038db
2 changed files with 127 additions and 84 deletions

View File

@ -3518,7 +3518,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() {} virtual ~Evacuator() {}
bool EvacuatePage(Page* page); void EvacuatePage(Page* page);
void AddObserver(MigrationObserver* observer) { void AddObserver(MigrationObserver* observer) {
new_space_visitor_.AddObserver(observer); new_space_visitor_.AddObserver(observer);
@ -3536,7 +3536,7 @@ class Evacuator : public Malloced {
static const int kInitialLocalPretenuringFeedbackCapacity = 256; static const int kInitialLocalPretenuringFeedbackCapacity = 256;
// |saved_live_bytes| returns the live bytes of the page that was processed. // |saved_live_bytes| returns the live bytes of the page that was processed.
virtual bool RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0; virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; } inline Heap* heap() { return heap_; }
@ -3564,31 +3564,29 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_; intptr_t bytes_compacted_;
}; };
bool Evacuator::EvacuatePage(Page* page) { void Evacuator::EvacuatePage(Page* page) {
bool success = false;
DCHECK(page->SweepingDone()); DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = 0; intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0; double evacuation_time = 0.0;
{ {
AlwaysAllocateScope always_allocate(heap()->isolate()); AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time); TimedScope timed_scope(&evacuation_time);
success = RawEvacuatePage(page, &saved_live_bytes); RawEvacuatePage(page, &saved_live_bytes);
} }
ReportCompactionProgress(evacuation_time, saved_live_bytes); ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
PrintIsolate(heap()->isolate(), PrintIsolate(
heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d " "evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d " "page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%" V8PRIdPTR " time=%f success=%d\n", "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
static_cast<void*>(this), static_cast<void*>(page), static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap()->new_space()->age_mark()), page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
saved_live_bytes, evacuation_time, success); evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
} }
return success;
} }
void Evacuator::Finalize() { void Evacuator::Finalize() {
@ -3616,64 +3614,51 @@ class FullEvacuator : public Evacuator {
: Evacuator(collector->heap(), record_visitor), collector_(collector) {} : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected: protected:
bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MarkCompactCollector* collector_; MarkCompactCollector* collector_;
}; };
bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
bool success = false;
LiveObjectVisitor object_visitor;
const MarkingState state = collector_->marking_state(page); const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes(); *live_bytes = state.live_bytes();
HeapObject* failed_object = nullptr;
switch (ComputeEvacuationMode(page)) { switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld: case kObjectsNewToOld:
success = object_visitor.VisitBlackObjects( LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
DCHECK(success);
ArrayBufferTracker::ProcessBuffers( ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
break; break;
case kPageNewToOld: case kPageNewToOld:
success = object_visitor.VisitBlackObjects( LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_to_old_page_visitor_, page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes()); new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// ArrayBufferTracker will be updated during sweeping. // ArrayBufferTracker will be updated during sweeping.
break; break;
case kPageNewToNew: case kPageNewToNew:
success = object_visitor.VisitBlackObjects( LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_to_new_page_visitor_, page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes()); new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// ArrayBufferTracker will be updated during sweeping. // ArrayBufferTracker will be updated during sweeping.
break; break;
case kObjectsOldToOld: case kObjectsOldToOld: {
success = object_visitor.VisitBlackObjects( const bool success = LiveObjectVisitor::VisitBlackObjects(
page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits); page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits,
&failed_object);
if (!success) { if (!success) {
// Aborted compaction page. We have to record slots here, since we // Aborted compaction page. Actual processing happens on the main
// might not have recorded them in first place. // thread for simplicity reasons.
// Note: We mark the page as aborted here to be able to record slots collector_->ReportAbortedEvacuationCandidate(failed_object, page);
// for code objects in |RecordMigratedSlotVisitor| and to be able
// to identify the page later on for post processing.
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
EvacuateRecordOnlyVisitor record_visitor(heap());
success = object_visitor.VisitBlackObjects(
page, state, &record_visitor, LiveObjectVisitor::kKeepMarking);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
DCHECK(success);
success = false;
} else { } else {
ArrayBufferTracker::ProcessBuffers( ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
} }
break; break;
} }
return success; }
} }
class YoungGenerationEvacuator : public Evacuator { class YoungGenerationEvacuator : public Evacuator {
@ -3683,30 +3668,26 @@ class YoungGenerationEvacuator : public Evacuator {
: Evacuator(collector->heap(), record_visitor), collector_(collector) {} : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected: protected:
bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_; MinorMarkCompactCollector* collector_;
}; };
bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) { intptr_t* live_bytes) {
bool success = false;
LiveObjectVisitor object_visitor;
const MarkingState state = collector_->marking_state(page); const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes(); *live_bytes = state.live_bytes();
switch (ComputeEvacuationMode(page)) { switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld: case kObjectsNewToOld:
success = object_visitor.VisitGreyObjectsNoFail( LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
DCHECK(success);
ArrayBufferTracker::ProcessBuffers( ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
break; break;
case kPageNewToOld: case kPageNewToOld:
success = object_visitor.VisitGreyObjectsNoFail( LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_to_old_page_visitor_, page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes()); new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can // TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC. // delay it until the next GC.
@ -3723,10 +3704,9 @@ bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
} }
break; break;
case kPageNewToNew: case kPageNewToNew:
success = object_visitor.VisitGreyObjectsNoFail( LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_to_new_page_visitor_, page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes()); new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can // TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC. // delay it until the next GC.
@ -3746,7 +3726,6 @@ bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
UNREACHABLE(); UNREACHABLE();
break; break;
} }
return success;
} }
class PageEvacuationItem : public ItemParallelJob::Item { class PageEvacuationItem : public ItemParallelJob::Item {
@ -4076,8 +4055,8 @@ bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) { void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
EvacuateRecordOnlyVisitor visitor(heap()); EvacuateRecordOnlyVisitor visitor(heap());
LiveObjectVisitor object_visitor; LiveObjectVisitor::VisitBlackObjectsNoFail(page, MarkingState::Internal(page),
object_visitor.VisitBlackObjects(page, MarkingState::Internal(page), &visitor, &visitor,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
} }
@ -4085,7 +4064,8 @@ template <class Visitor>
bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk, bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
const MarkingState& state, const MarkingState& state,
Visitor* visitor, Visitor* visitor,
IterationMode iteration_mode) { IterationMode iteration_mode,
HeapObject** failed_object) {
for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) { for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) {
HeapObject* const object = object_and_size.first; HeapObject* const object = object_and_size.first;
if (!visitor->Visit(object, object_and_size.second)) { if (!visitor->Visit(object, object_and_size.second)) {
@ -4093,12 +4073,7 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
state.bitmap()->ClearRange( state.bitmap()->ClearRange(
chunk->AddressToMarkbitIndex(chunk->area_start()), chunk->AddressToMarkbitIndex(chunk->area_start()),
chunk->AddressToMarkbitIndex(object->address())); chunk->AddressToMarkbitIndex(object->address()));
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, chunk->address(), *failed_object = object;
object->address(),
SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(),
object->address());
RecomputeLiveBytes(chunk, state);
} }
return false; return false;
} }
@ -4110,21 +4085,37 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
} }
template <class Visitor> template <class Visitor>
bool LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk, void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
const MarkingState& state,
Visitor* visitor,
IterationMode iteration_mode) {
for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) {
HeapObject* const object = object_and_size.first;
DCHECK(ObjectMarking::IsBlack(object, state));
const bool success = visitor->Visit(object, object_and_size.second);
USE(success);
DCHECK(success);
}
if (iteration_mode == kClearMarkbits) {
state.ClearLiveness();
}
}
template <class Visitor>
void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
const MarkingState& state, const MarkingState& state,
Visitor* visitor, Visitor* visitor,
IterationMode iteration_mode) { IterationMode iteration_mode) {
for (auto object_and_size : LiveObjectRange<kGreyObjects>(chunk, state)) { for (auto object_and_size : LiveObjectRange<kGreyObjects>(chunk, state)) {
HeapObject* const object = object_and_size.first; HeapObject* const object = object_and_size.first;
DCHECK(ObjectMarking::IsGrey(object, state)); DCHECK(ObjectMarking::IsGrey(object, state));
if (!visitor->Visit(object, object_and_size.second)) { const bool success = visitor->Visit(object, object_and_size.second);
UNREACHABLE(); USE(success);
} DCHECK(success);
} }
if (iteration_mode == kClearMarkbits) { if (iteration_mode == kClearMarkbits) {
state.ClearLiveness(); state.ClearLiveness();
} }
return true;
} }
void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk, void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
@ -4560,18 +4551,56 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
} }
} }
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
HeapObject* failed_object, Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
}
void MarkCompactCollector::PostProcessEvacuationCandidates() { void MarkCompactCollector::PostProcessEvacuationCandidates() {
int aborted_pages = 0; for (auto object_and_page : aborted_evacuation_candidates_) {
HeapObject* failed_object = object_and_page.first;
Page* page = object_and_page.second;
DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
// Remove outdated slots.
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
failed_object->address(),
SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
failed_object->address());
const MarkingState state = marking_state(page);
// Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, state);
// Re-record slots.
EvacuateRecordOnlyVisitor record_visitor(heap());
LiveObjectVisitor::VisitBlackObjectsNoFail(page, state, &record_visitor,
LiveObjectVisitor::kKeepMarking);
// Fix up array buffers.
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
}
const int aborted_pages =
static_cast<int>(aborted_evacuation_candidates_.size());
aborted_evacuation_candidates_.clear();
int aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) { for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// After clearing the evacuation candidate flag the page is again in a
// regular state.
p->ClearEvacuationCandidate(); p->ClearEvacuationCandidate();
aborted_pages++; aborted_pages_verified++;
} else { } else {
DCHECK(p->IsEvacuationCandidate()); DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone()); DCHECK(p->SweepingDone());
p->Unlink(); p->Unlink();
} }
} }
DCHECK_EQ(aborted_pages_verified, aborted_pages);
if (FLAG_trace_evacuation && (aborted_pages > 0)) { if (FLAG_trace_evacuation && (aborted_pages > 0)) {
PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n", PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
isolate()->time_millis_since_init(), aborted_pages); isolate()->time_millis_since_init(), aborted_pages);

View File

@ -230,28 +230,38 @@ class LiveObjectRange {
Address end_; Address end_;
}; };
class LiveObjectVisitor BASE_EMBEDDED { class LiveObjectVisitor : AllStatic {
public: public:
enum IterationMode { enum IterationMode {
kKeepMarking, kKeepMarking,
kClearMarkbits, kClearMarkbits,
}; };
// Visits black objects on a MemoryChunk until the Visitor returns for an // Visits black objects on a MemoryChunk until the Visitor returns |false| for
// object. If IterationMode::kClearMarkbits is passed the markbits and slots // an object. If IterationMode::kClearMarkbits is passed the markbits and
// for visited objects are cleared for each successfully visited object. // slots for visited objects are cleared for each successfully visited object.
template <class Visitor> template <class Visitor>
bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state, static bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state,
Visitor* visitor, IterationMode iteration_mode); Visitor* visitor, IterationMode iteration_mode,
HeapObject** failed_object);
// Visits grey objects on a Memorychunk. Is not allowed to fail visitation // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
// for an object. // visitation for an object.
template <class Visitor> template <class Visitor>
bool VisitGreyObjectsNoFail(MemoryChunk* chunk, const MarkingState& state, static void VisitBlackObjectsNoFail(MemoryChunk* chunk,
Visitor* visitor, IterationMode iteration_mode); const MarkingState& state,
Visitor* visitor,
IterationMode iteration_mode);
private: // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state); // visitation for an object.
template <class Visitor>
static void VisitGreyObjectsNoFail(MemoryChunk* chunk,
const MarkingState& state,
Visitor* visitor,
IterationMode iteration_mode);
static void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state);
}; };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD }; enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
@ -744,7 +754,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void ReleaseEvacuationCandidates(); void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates(); void PostProcessEvacuationCandidates();
void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_; base::Semaphore page_parallel_job_semaphore_;
#ifdef DEBUG #ifdef DEBUG
@ -781,10 +793,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation. // Pages that are actually processed during evacuation.
List<Page*> old_space_evacuation_pages_; List<Page*> old_space_evacuation_pages_;
List<Page*> new_space_evacuation_pages_; List<Page*> new_space_evacuation_pages_;
std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
Sweeper sweeper_; Sweeper sweeper_;
friend class CodeMarkingVisitor; friend class CodeMarkingVisitor;
friend class FullEvacuator;
friend class Heap; friend class Heap;
friend class IncrementalMarkingMarkingVisitor; friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor; friend class MarkCompactMarkingVisitor;