Use just one marking deque.

BUG=

Review URL: https://codereview.chromium.org/770453003

Cr-Commit-Position: refs/heads/master@{#25588}
This commit is contained in:
hpayer 2014-12-01 09:02:54 -08:00 committed by Commit bot
parent ba04cddf43
commit d452d8fe8d
6 changed files with 91 additions and 98 deletions

View File

@ -768,7 +768,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
mark_compact_collector()->SetFlags(kNoGCFlags);
new_space_.Shrink();
UncommitFromSpace();
incremental_marking()->UncommitMarkingDeque();
}
@ -1133,6 +1132,9 @@ bool Heap::PerformGarbageCollection(
amount_of_external_allocated_memory_;
old_generation_allocation_limit_ = OldGenerationAllocationLimit(
PromotedSpaceSizeOfObjects(), freed_global_handles);
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.UncommitMarkingDeque();
}
{
@ -4410,7 +4412,7 @@ void Heap::TryFinalizeIdleIncrementalMarking(
double idle_time_in_ms, size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
if (incremental_marking()->IsComplete() ||
(incremental_marking()->IsMarkingDequeEmpty() &&
(mark_compact_collector_.marking_deque()->IsEmpty() &&
gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
@ -5507,7 +5509,6 @@ void Heap::TearDown() {
}
store_buffer()->TearDown();
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
}

View File

@ -103,13 +103,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
}
}
marking_deque_.UnshiftGrey(obj);
heap_->mark_compact_collector()->marking_deque()->UnshiftGrey(obj);
}
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
Marking::WhiteToGrey(mark_bit);
marking_deque_.PushGrey(obj);
heap_->mark_compact_collector()->marking_deque()->PushGrey(obj);
}
}
} // namespace v8::internal

View File

@ -19,8 +19,6 @@ namespace internal {
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
state_(STOPPED),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
steps_count_(0),
old_generation_space_available_at_start_of_incremental_(0),
old_generation_space_used_at_start_of_incremental_(0),
@ -32,9 +30,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
unscanned_bytes_of_large_object_(0) {}
void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
Object* value) {
if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
@ -195,11 +190,12 @@ class IncrementalMarkingMarkingVisitor
HeapObject::RawField(object, end_offset));
start_offset = end_offset;
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
scan_until_end =
heap->mark_compact_collector()->marking_deque()->IsFull();
} while (scan_until_end && start_offset < object_size);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
heap->incremental_marking()->NotifyIncompleteScanOfObject(
object_size - (start_offset - already_scanned_offset));
}
@ -482,32 +478,6 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new base::VirtualMemory(4 * MB);
}
if (!marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Commit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size(),
false); // Not executable.
CHECK(success);
marking_deque_memory_committed_ = true;
}
}
void IncrementalMarking::UncommitMarkingDeque() {
if (state_ == STOPPED && marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Uncommit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size());
CHECK(success);
marking_deque_memory_committed_ = false;
}
}
void IncrementalMarking::Start(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
@ -550,13 +520,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
EnsureMarkingDequeIsCommitted();
// Initialize marking stack.
Address addr = static_cast<Address>(marking_deque_memory_->address());
size_t size = marking_deque_memory_->size();
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
marking_deque_.Initialize(addr, addr + size);
heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
ActivateIncrementalWriteBarrier();
@ -602,10 +566,12 @@ void IncrementalMarking::PrepareForScavenge() {
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
if (!IsMarking()) return;
int current = marking_deque_.bottom();
int mask = marking_deque_.mask();
int limit = marking_deque_.top();
HeapObject** array = marking_deque_.array();
MarkingDeque* marking_deque =
heap_->mark_compact_collector()->marking_deque();
int current = marking_deque->bottom();
int mask = marking_deque->mask();
int limit = marking_deque->top();
HeapObject** array = marking_deque->array();
int new_top = current;
Map* filler_map = heap_->one_pointer_filler_map();
@ -620,7 +586,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
HeapObject* dest = map_word.ToForwardingAddress();
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque_.bottom());
DCHECK(new_top != marking_deque->bottom());
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
DCHECK(Marking::IsGrey(mark_bit) ||
@ -632,7 +598,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// stack when we perform in place array shift.
array[new_top] = obj;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque_.bottom());
DCHECK(new_top != marking_deque->bottom());
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
@ -643,7 +609,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
#endif
}
}
marking_deque_.set_top(new_top);
marking_deque->set_top(new_top);
}
@ -670,8 +636,10 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
intptr_t bytes_processed = 0;
Map* filler_map = heap_->one_pointer_filler_map();
while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
HeapObject* obj = marking_deque_.Pop();
MarkingDeque* marking_deque =
heap_->mark_compact_collector()->marking_deque();
while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
HeapObject* obj = marking_deque->Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
@ -692,8 +660,10 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
void IncrementalMarking::ProcessMarkingDeque() {
Map* filler_map = heap_->one_pointer_filler_map();
while (!marking_deque_.IsEmpty()) {
HeapObject* obj = marking_deque_.Pop();
MarkingDeque* marking_deque =
heap_->mark_compact_collector()->marking_deque();
while (!marking_deque->IsEmpty()) {
HeapObject* obj = marking_deque->Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
@ -793,7 +763,7 @@ void IncrementalMarking::Finalize() {
PatchIncrementalMarkingRecordWriteStubs(heap_,
RecordWriteStub::STORE_BUFFER_ONLY);
DeactivateIncrementalWriteBarrier();
DCHECK(marking_deque_.IsEmpty());
DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
heap_->isolate()->stack_guard()->ClearGC();
}
@ -946,7 +916,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
}
} else if (state_ == MARKING) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
if (marking_deque_.IsEmpty()) {
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
MarkingComplete(action);

View File

@ -28,8 +28,6 @@ class IncrementalMarking {
static void Initialize();
void TearDown();
State state() {
DCHECK(state_ == STOPPED || FLAG_incremental_marking);
return state_;
@ -144,10 +142,6 @@ class IncrementalMarking {
SetNewSpacePageFlags(chunk, IsMarking());
}
MarkingDeque* marking_deque() { return &marking_deque_; }
bool IsMarkingDequeEmpty() { return marking_deque_.IsEmpty(); }
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ActivateGeneratedStub(Code* stub);
@ -170,8 +164,6 @@ class IncrementalMarking {
void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
void UncommitMarkingDeque();
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
@ -202,8 +194,6 @@ class IncrementalMarking {
static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
void EnsureMarkingDequeIsCommitted();
INLINE(void ProcessMarkingDeque());
INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
@ -217,10 +207,6 @@ class IncrementalMarking {
State state_;
bool is_compacting_;
base::VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
int steps_count_;
int64_t old_generation_space_available_at_start_of_incremental_;
int64_t old_generation_space_used_at_start_of_incremental_;

View File

@ -50,6 +50,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
evacuation_(false),
migration_slots_buffer_(NULL),
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
code_flusher_(NULL),
have_code_to_deoptimize_(false) {
}
@ -233,7 +235,10 @@ void MarkCompactCollector::SetUp() {
}
void MarkCompactCollector::TearDown() { AbortCompaction(); }
void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
@ -2009,13 +2014,18 @@ void MarkCompactCollector::MarkWeakObjectToCodeTable() {
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingDeque() {
Map* filler_map = heap_->one_pointer_filler_map();
while (!marking_deque_.IsEmpty()) {
HeapObject* object = marking_deque_.Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
Map* map = object->map();
if (map == filler_map) continue;
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
Map* map = object->map();
MarkBit map_mark = Marking::MarkBitFrom(map);
MarkObject(map, map_mark);
@ -2110,6 +2120,43 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new base::VirtualMemory(4 * MB);
}
if (!marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Commit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size(),
false); // Not executable.
CHECK(success);
marking_deque_memory_committed_ = true;
InitializeMarkingDeque();
}
}
void MarkCompactCollector::InitializeMarkingDeque() {
if (marking_deque_memory_committed_) {
Address addr = static_cast<Address>(marking_deque_memory_->address());
size_t size = marking_deque_memory_->size();
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
marking_deque_.Initialize(addr, addr + size);
}
}
void MarkCompactCollector::UncommitMarkingDeque() {
if (marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Uncommit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size());
CHECK(success);
marking_deque_memory_committed_ = false;
}
}
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
@ -2121,42 +2168,21 @@ void MarkCompactCollector::MarkLiveObjects() {
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
bool incremental_marking_overflowed = false;
IncrementalMarking* incremental_marking = heap_->incremental_marking();
if (was_marked_incrementally_) {
// Finalize the incremental marking and check whether we had an overflow.
// Both markers use grey color to mark overflowed objects so
// non-incremental marker can deal with them as if overflow
// occured during normal marking.
// But incremental marker uses a separate marking deque
// so we have to explicitly copy its overflow state.
incremental_marking->Finalize();
incremental_marking_overflowed =
incremental_marking->marking_deque()->overflowed();
incremental_marking->marking_deque()->ClearOverflowed();
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Abort();
InitializeMarkingDeque();
}
#ifdef DEBUG
DCHECK(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
// The to space contains live objects, a page in from space is used as a
// marking stack.
Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
if (FLAG_force_marking_deque_overflows) {
marking_deque_end = marking_deque_start + 64 * kPointerSize;
}
marking_deque_.Initialize(marking_deque_start, marking_deque_end);
DCHECK(!marking_deque_.overflowed());
if (incremental_marking_overflowed) {
// There are overflowed objects left in the heap after incremental marking.
marking_deque_.SetOverflowed();
}
EnsureMarkingDequeIsCommittedAndInitialize();
PrepareForCodeFlushing();

View File

@ -655,6 +655,14 @@ class MarkCompactCollector {
// to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
MarkingDeque* marking_deque() { return &marking_deque_; }
void EnsureMarkingDequeIsCommittedAndInitialize();
void InitializeMarkingDeque();
void UncommitMarkingDeque();
private:
class SweeperTask;
@ -875,6 +883,8 @@ class MarkCompactCollector {
#endif
Heap* heap_;
base::VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
bool have_code_to_deoptimize_;