[heap] Support fixed arrays with progress bar in the concurrent marker
Currently the concurrent marker visits any fixed array in one go ignoring the progress bar and does not expect seeing black arrays in the worklist. For this reason, if the main thread marker starts visiting a large array with the progress bar, then it has to re-push the array in the bailout worklist. So all subsequent visitations of the array happen on the main thread. This patch makes the progress bar thread-safe and allows the concurrent marker to visit arrays in chunks. Change-Id: I5e8867077ed2908b7f9c5d8bde34d0401f7fe446 Reviewed-on: https://chromium-review.googlesource.com/c/1385164 Commit-Queue: Ulan Degenbaev <ulan@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Cr-Commit-Position: refs/heads/master@{#58484}
This commit is contained in:
parent
f3cd638482
commit
7ca8acd0a5
@ -315,8 +315,38 @@ class ConcurrentMarkingVisitor final
|
||||
// Fixed array object ========================================================
|
||||
// ===========================================================================
|
||||
|
||||
int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
|
||||
MemoryChunk* chunk) {
|
||||
// The concurrent marker can process larger chunks than the main thread
|
||||
// marker.
|
||||
const int kProgressBarScanningChunk =
|
||||
RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
|
||||
DCHECK(marking_state_.IsBlackOrGrey(object));
|
||||
marking_state_.GreyToBlack(object);
|
||||
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
|
||||
int start =
|
||||
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
|
||||
int end = Min(size, start + kProgressBarScanningChunk);
|
||||
if (start < end) {
|
||||
VisitPointers(object, HeapObject::RawField(object, start),
|
||||
HeapObject::RawField(object, end));
|
||||
chunk->set_progress_bar(end);
|
||||
if (end < size) {
|
||||
// The object can be pushed back onto the marking worklist only after
|
||||
// progress bar was updated.
|
||||
shared_.Push(object);
|
||||
}
|
||||
}
|
||||
return end - start;
|
||||
}
|
||||
|
||||
int VisitFixedArray(Map map, FixedArray object) {
|
||||
return VisitLeftTrimmableArray(map, object);
|
||||
// Arrays with the progress bar are not left-trimmable because they reside
|
||||
// in the large object space.
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
|
||||
return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
|
||||
? VisitFixedArrayWithProgressBar(map, object, chunk)
|
||||
: VisitLeftTrimmableArray(map, object);
|
||||
}
|
||||
|
||||
int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
|
||||
|
@ -378,44 +378,30 @@ template <FixedArrayVisitationMode fixed_array_mode,
|
||||
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
|
||||
VisitFixedArrayIncremental(Map map, FixedArray object) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
|
||||
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
|
||||
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
|
||||
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
|
||||
DCHECK(!FLAG_use_marking_progress_bar || heap_->IsLargeObject(object));
|
||||
// When using a progress bar for large fixed arrays, scan only a chunk of
|
||||
// the array and try to push it onto the marking deque again until it is
|
||||
// fully scanned. Fall back to scanning it through to the end in case this
|
||||
// fails because of a full deque.
|
||||
int start_offset =
|
||||
DCHECK(FLAG_use_marking_progress_bar);
|
||||
DCHECK(heap_->IsLargeObject(object));
|
||||
int start =
|
||||
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
|
||||
if (start_offset < object_size) {
|
||||
// Ensure that the object is either grey or black before pushing it
|
||||
// into marking worklist.
|
||||
marking_state()->WhiteToGrey(object);
|
||||
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
|
||||
marking_worklist()->PushBailout(object);
|
||||
} else {
|
||||
int end = Min(size, start + kProgressBarScanningChunk);
|
||||
if (start < end) {
|
||||
VisitPointers(object, HeapObject::RawField(object, start),
|
||||
HeapObject::RawField(object, end));
|
||||
chunk->set_progress_bar(end);
|
||||
if (end < size) {
|
||||
DCHECK(marking_state()->IsBlack(object));
|
||||
// The object can be pushed back onto the marking worklist only after
|
||||
// progress bar was updated.
|
||||
marking_worklist()->Push(object);
|
||||
}
|
||||
DCHECK(marking_state()->IsGrey(object) ||
|
||||
marking_state()->IsBlack(object));
|
||||
|
||||
int end_offset =
|
||||
Min(object_size, start_offset + kProgressBarScanningChunk);
|
||||
int already_scanned_offset = start_offset;
|
||||
VisitPointers(object, HeapObject::RawField(object, start_offset),
|
||||
HeapObject::RawField(object, end_offset));
|
||||
start_offset = end_offset;
|
||||
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
|
||||
chunk->set_progress_bar(start_offset);
|
||||
if (start_offset < object_size) {
|
||||
heap_->incremental_marking()->NotifyIncompleteScanOfObject(
|
||||
object_size - (start_offset - already_scanned_offset));
|
||||
size - (end - start));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
FixedArray::BodyDescriptor::IterateBody(map, object, object_size, this);
|
||||
FixedArray::BodyDescriptor::IterateBody(map, object, size, this);
|
||||
}
|
||||
return object_size;
|
||||
return size;
|
||||
}
|
||||
|
||||
template <FixedArrayVisitationMode fixed_array_mode,
|
||||
|
@ -999,7 +999,7 @@ class MarkingVisitor final
|
||||
private:
|
||||
// Granularity in which FixedArrays are scanned if |fixed_array_mode|
|
||||
// is true.
|
||||
static const int kProgressBarScanningChunk = 32 * 1024;
|
||||
static const int kProgressBarScanningChunk = 32 * KB;
|
||||
|
||||
template <typename TSlot>
|
||||
V8_INLINE void VisitPointerImpl(HeapObject host, TSlot p);
|
||||
|
@ -535,13 +535,13 @@ class MemoryChunk {
|
||||
Address HighWaterMark() { return address() + high_water_mark_; }
|
||||
|
||||
int progress_bar() {
|
||||
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
|
||||
return static_cast<int>(progress_bar_);
|
||||
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
|
||||
return static_cast<int>(progress_bar_.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
void set_progress_bar(int progress_bar) {
|
||||
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
|
||||
progress_bar_ = progress_bar;
|
||||
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
|
||||
progress_bar_.store(progress_bar, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ResetProgressBar() {
|
||||
@ -681,7 +681,7 @@ class MemoryChunk {
|
||||
|
||||
// Used by the incremental marker to keep track of the scanning progress in
|
||||
// large objects that have a progress bar and are scanned in increments.
|
||||
intptr_t progress_bar_;
|
||||
std::atomic<intptr_t> progress_bar_;
|
||||
|
||||
// Count of bytes marked black on page.
|
||||
std::atomic<intptr_t> live_byte_count_;
|
||||
|
Loading…
Reference in New Issue
Block a user