[heap] Remove bailout marking worklist.

The concurrent marker can now process all objects.
This patch also eagerly visits the objects that undergo layout
changes. This is because previously such objects were pushed
onto the bailout worklist, which is gone now.
To preserve the incremental step accounting, the patch introduces
a new GC tracer scope called MC_INCREMENTAL_LAYOUT_CHANGE.

Bug: v8:8486
Change-Id: Ic1c2f0d4e2ac0602fc945f3258af9624247bd65f
Reviewed-on: https://chromium-review.googlesource.com/c/1386486
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58712}
This commit is contained in:
Ulan Degenbaev 2018-12-28 09:23:39 +01:00 committed by Commit Bot
parent e8316b64b2
commit 68a8bdd829
11 changed files with 60 additions and 152 deletions

View File

@ -352,14 +352,15 @@
#define INCREMENTAL_SCOPES(F) \ #define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \ /* MC_INCREMENTAL is the top-level incremental marking scope. */ \
F(MC_INCREMENTAL) \ F(MC_INCREMENTAL) \
F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING) \
F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \ F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \
F(MC_INCREMENTAL_EMBEDDER_TRACING) \ F(MC_INCREMENTAL_EMBEDDER_TRACING) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
F(MC_INCREMENTAL_FINALIZE) \ F(MC_INCREMENTAL_FINALIZE) \
F(MC_INCREMENTAL_FINALIZE_BODY) \ F(MC_INCREMENTAL_FINALIZE_BODY) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \ F(MC_INCREMENTAL_LAYOUT_CHANGE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING)
#define TOP_MC_SCOPES(F) \ #define TOP_MC_SCOPES(F) \
F(MC_CLEAR) \ F(MC_CLEAR) \

View File

@ -78,12 +78,10 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor( explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared, ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects, MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id, ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, unsigned mark_compact_epoch) bool embedder_tracing_enabled, unsigned mark_compact_epoch)
: shared_(shared, task_id), : shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects), weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id), embedder_objects_(embedder_objects, task_id),
marking_state_(memory_chunk_data), marking_state_(memory_chunk_data),
@ -654,7 +652,6 @@ class ConcurrentMarkingVisitor final
} }
ConcurrentMarking::MarkingWorklist::View shared_; ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_; WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_; ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_; ConcurrentMarkingState marking_state_;
@ -722,13 +719,11 @@ class ConcurrentMarking::Task : public CancelableTask {
}; };
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold, MarkingWorklist* on_hold,
WeakObjects* weak_objects, WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects) EmbedderTracingWorklist* embedder_objects)
: heap_(heap), : heap_(heap),
shared_(shared), shared_(shared),
bailout_(bailout),
on_hold_(on_hold), on_hold_(on_hold),
weak_objects_(weak_objects), weak_objects_(weak_objects),
embedder_objects_(embedder_objects) { embedder_objects_(embedder_objects) {
@ -743,10 +738,10 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING); GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB; size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000; int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor( ConcurrentMarkingVisitor visitor(shared_, &task_state->memory_chunk_data,
shared_, bailout_, &task_state->memory_chunk_data, weak_objects_, weak_objects_, embedder_objects_, task_id,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse(), heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch); task_state->mark_compact_epoch);
double time_ms; double time_ms;
size_t marked_bytes = 0; size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) { if (FLAG_trace_concurrent_marking) {
@ -812,7 +807,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
} }
shared_->FlushToGlobal(task_id); shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id); on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id); embedder_objects_->FlushToGlobal(task_id);

View File

@ -67,8 +67,7 @@ class ConcurrentMarking {
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>; using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold, MarkingWorklist* on_hold, WeakObjects* weak_objects,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects); EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the // Schedules asynchronous tasks to perform concurrent marking. Objects in the
@ -113,7 +112,6 @@ class ConcurrentMarking {
void Run(int task_id, TaskState* task_state); void Run(int task_id, TaskState* task_state);
Heap* const heap_; Heap* const heap_;
MarkingWorklist* const shared_; MarkingWorklist* const shared_;
MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_; MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_; WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_; EmbedderTracingWorklist* const embedder_objects_;

View File

@ -710,6 +710,8 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.body=%.1f " "incremental.finalize.body=%.1f "
"incremental.finalize.external.prologue=%.1f " "incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f " "incremental.finalize.external.epilogue=%.1f "
"incremental.layout_change=%.1f "
"incremental.start=%.1f "
"incremental.sweeping=%.1f " "incremental.sweeping=%.1f "
"incremental.embedder_prologue=%.1f " "incremental.embedder_prologue=%.1f "
"incremental.embedder_tracing=%.1f " "incremental.embedder_tracing=%.1f "
@ -804,6 +806,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY], current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE], current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE], current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE],
current_.scopes[Scope::MC_INCREMENTAL_START],
current_.scopes[Scope::MC_INCREMENTAL_SWEEPING], current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE], current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING], current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
@ -1129,6 +1133,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
base::MutexGuard guard(&background_counter_mutex_); base::MutexGuard guard(&background_counter_mutex_);
const double overall_duration = const double overall_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START] current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration + .duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING] current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
@ -1149,6 +1155,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
.total_duration_ms; .total_duration_ms;
const double marking_duration = const double marking_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START] current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration + .duration +
incremental_marking_duration_ + incremental_marking_duration_ +

View File

@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE GCTracer {
NUMBER_OF_SCOPES, NUMBER_OF_SCOPES,
FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL, FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE, LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_SWEEPING,
FIRST_SCOPE = MC_INCREMENTAL, FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES = NUMBER_OF_INCREMENTAL_SCOPES =
LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1, LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,

View File

@ -2967,7 +2967,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject object, int size, void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) { const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) { if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object); incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() && if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots( MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
@ -4382,12 +4382,11 @@ void Heap::SetUp() {
MarkCompactCollector::MarkingWorklist* marking_worklist = MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist(); mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking( concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(), this, marking_worklist->shared(), marking_worklist->on_hold(),
marking_worklist->on_hold(), mark_compact_collector_->weak_objects(), mark_compact_collector_->weak_objects(), marking_worklist->embedder());
marking_worklist->embedder());
} else { } else {
concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr, concurrent_marking_ =
nullptr, nullptr); new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
} }
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {

View File

@ -133,18 +133,13 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false; return false;
} }
void IncrementalMarking::MarkBlackAndPush(HeapObject obj) { void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
// Marking left-trimmable fixed array black is unsafe because left-trimming HeapObject obj) {
// re-pushes only grey arrays onto the marking worklist. TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
DCHECK(!obj->IsFixedArray() && !obj->IsFixedDoubleArray()); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
// Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj); marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) { if (marking_state()->GreyToBlack(obj)) {
if (FLAG_concurrent_marking) { RevisitObject(obj);
marking_worklist()->PushBailout(obj);
} else {
marking_worklist()->Push(obj);
}
} }
} }
@ -155,61 +150,26 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
MemoryChunk::FromAddress(to->address())); MemoryChunk::FromAddress(to->address()));
DCHECK_NE(from, to); DCHECK_NE(from, to);
MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
MarkBit new_mark_bit = marking_state()->MarkBitFrom(to); MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) { if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
// Nothing to do if the object is in black area. // Nothing to do if the object is in black area.
return; return;
} }
MarkBlackAndVisitObjectDueToLayoutChange(from);
bool marked_black_due_to_left_trimming = false; DCHECK(marking_state()->IsBlack(from));
if (FLAG_concurrent_marking) { // Mark the new address as black.
// We need to mark the array black before overwriting its map and length if (from->address() + kTaggedSize == to->address()) {
// so that the concurrent marker does not observe inconsistent state. // The old and the new markbits overlap. The |to| object has the
Marking::WhiteToGrey<kAtomicity>(old_mark_bit); // grey color. To make it black, we need to set the second bit.
if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) { DCHECK(new_mark_bit.Get<kAtomicity>());
// The concurrent marker will not mark the array. We need to push the new_mark_bit.Next().Set<kAtomicity>();
// new array start in marking deque to ensure that it will be marked. } else {
marked_black_due_to_left_trimming = true; bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
} DCHECK(success);
DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit)); USE(success);
}
if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
!marked_black_due_to_left_trimming) {
// The array was black before left trimming or was marked black by the
// concurrent marker. Simply transfer the color.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
DCHECK(new_mark_bit.Get<kAtomicity>());
new_mark_bit.Next().Set<kAtomicity>();
} else {
bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
} else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
marked_black_due_to_left_trimming) {
// The array was already grey or was marked black by this function.
// Mark the new array grey and push it to marking deque.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object is either white
// or grey. Set the first bit to make sure that it is grey.
new_mark_bit.Set<kAtomicity>();
DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
} else {
bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
// Subsequent left-trimming will re-push only grey arrays.
// Ensure that this array is grey.
DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
marking_worklist()->PushBailout(to);
RestartIfNotMarking();
} }
DCHECK(marking_state()->IsBlack(to));
} }
class IncrementalMarkingRootMarkingVisitor : public RootVisitor { class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
@ -797,17 +757,11 @@ void IncrementalMarking::VisitDescriptors(HeapObject host,
visitor.VisitDescriptors(descriptors, number_of_own_descriptors); visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
} }
template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist( intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) { intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0; intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) { while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
HeapObject obj; HeapObject obj = marking_worklist()->Pop();
if (worklist_to_process == WorklistToProcess::kBailout) {
obj = marking_worklist()->PopBailout();
} else {
obj = marking_worklist()->Pop();
}
if (obj.is_null()) break; if (obj.is_null()) break;
// Left trimming may result in white, grey, or black filler objects on the // Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects. // marking deque. Ignore these objects.
@ -1069,11 +1023,6 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_to_process = Min(bytes_to_process, step_size); bytes_to_process = Min(bytes_to_process, step_size);
size_t bytes_processed = 0; size_t bytes_processed = 0;
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
StepOrigin::kV8, WorklistToProcess::kBailout);
bytes_to_process = (bytes_processed >= bytes_to_process)
? 0
: bytes_to_process - bytes_processed;
size_t current_bytes_marked_concurrently = size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes(); heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
@ -1092,14 +1041,14 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_processed += bytes_to_process; bytes_processed += bytes_to_process;
bytes_to_process = IncrementalMarking::kMinStepSizeInBytes; bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
} }
bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8, bytes_processed +=
WorklistToProcess::kAll); Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed); bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
} }
size_t IncrementalMarking::Step(size_t bytes_to_process, size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action, StepOrigin step_origin, CompletionAction action,
WorklistToProcess worklist_to_process) { StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs(); double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) { if (state_ == SWEEPING) {
@ -1126,13 +1075,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
} }
#endif #endif
if (worklist_to_process == WorklistToProcess::kBailout) { ProcessMarkingWorklist(bytes_to_process);
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
} else {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
}
if (step_origin == StepOrigin::kTask) { if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed; bytes_marked_ahead_of_schedule_ += bytes_processed;

View File

@ -20,7 +20,6 @@ class Object;
class PagedSpace; class PagedSpace;
enum class StepOrigin { kV8, kTask }; enum class StepOrigin { kV8, kTask };
enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking { class V8_EXPORT_PRIVATE IncrementalMarking {
public: public:
@ -175,8 +174,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping(); void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action, size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin, StepOrigin step_origin);
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
void StepOnAllocation(size_t bytes_to_process, double max_step_size); void StepOnAllocation(size_t bytes_to_process, double max_step_size);
bool ShouldDoEmbedderStep(); bool ShouldDoEmbedderStep();
@ -216,7 +214,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// This function is used to color the object black before it undergoes an // This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with // unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker. // the concurrent marker.
void MarkBlackAndPush(HeapObject obj); void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
bool IsCompacting() { return IsMarking() && is_compacting_; } bool IsCompacting() { return IsMarking() && is_compacting_; }
@ -279,7 +277,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier(); void DeactivateIncrementalWriteBarrier();
template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist( V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process, intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION); ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);

View File

@ -1691,7 +1691,6 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
MarkObject(object, map); MarkObject(object, map);
visitor.Visit(map, object); visitor.Visit(map, object);
} }
DCHECK(marking_worklist()->IsBailoutEmpty());
} }
bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) { bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {

View File

@ -468,7 +468,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using NonAtomicMarkingState = MajorNonAtomicMarkingState; using NonAtomicMarkingState = MajorNonAtomicMarkingState;
// Wrapper for the shared and bailout worklists. // Wrapper for the shared worklist.
class MarkingWorklist { class MarkingWorklist {
public: public:
using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>; using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
@ -483,17 +483,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
DCHECK(success); DCHECK(success);
} }
void PushBailout(HeapObject object) {
bool success = bailout_.Push(kMainThread, object);
USE(success);
DCHECK(success);
}
HeapObject Pop() { HeapObject Pop() {
HeapObject result; HeapObject result;
#ifdef V8_CONCURRENT_MARKING
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result; if (shared_.Pop(kMainThread, &result)) return result;
#ifdef V8_CONCURRENT_MARKING #ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time // The expectation is that this work list is empty almost all the time
@ -503,29 +494,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
return HeapObject(); return HeapObject();
} }
HeapObject PopBailout() {
#ifdef V8_CONCURRENT_MARKING
HeapObject result;
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
return HeapObject();
}
void Clear() { void Clear() {
bailout_.Clear();
shared_.Clear(); shared_.Clear();
on_hold_.Clear(); on_hold_.Clear();
embedder_.Clear(); embedder_.Clear();
} }
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() { bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) && return shared_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
on_hold_.IsLocalEmpty(kMainThread) && on_hold_.IsLocalEmpty(kMainThread) &&
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() && on_hold_.IsGlobalPoolEmpty();
on_hold_.IsGlobalPoolEmpty();
} }
bool IsEmbedderEmpty() { bool IsEmbedderEmpty() {
@ -534,8 +512,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
} }
int Size() { int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) + return static_cast<int>(shared_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread) +
on_hold_.LocalSize(kMainThread)); on_hold_.LocalSize(kMainThread));
} }
@ -545,20 +522,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The callback must accept HeapObject and return HeapObject. // The callback must accept HeapObject and return HeapObject.
template <typename Callback> template <typename Callback>
void Update(Callback callback) { void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback); shared_.Update(callback);
on_hold_.Update(callback); on_hold_.Update(callback);
embedder_.Update(callback); embedder_.Update(callback);
} }
ConcurrentMarkingWorklist* shared() { return &shared_; } ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; } ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; } EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() { void Print() {
PrintWorklist("shared", &shared_); PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
PrintWorklist("on_hold", &on_hold_); PrintWorklist("on_hold", &on_hold_);
} }
@ -570,11 +544,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Worklist used for most objects. // Worklist used for most objects.
ConcurrentMarkingWorklist shared_; ConcurrentMarkingWorklist shared_;
// Concurrent marking uses this worklist to bail out of concurrently
// marking certain object types. These objects are handled later in a STW
// pause after concurrent marking has finished.
ConcurrentMarkingWorklist bailout_;
// Concurrent marking uses this worklist to bail out of marking objects // Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation // in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers // for new space. This allow the compiler to remove write barriers

View File

@ -38,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold; ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
concurrent_marking->Stop( concurrent_marking->Stop(
@ -61,11 +61,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold; ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
concurrent_marking->Stop( concurrent_marking->Stop(
@ -88,11 +88,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold; ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();