[heap] Extracts parts of ConcurrentMarkingVisitor into a base class

This is the first step in unification of concurrent and main thread
marking visitors. The new MarkingVisitorBase will become a base class
for all marking visitors and will remove the existing code duplication.

This is a refactoring without behavior change.

Subsequent CL will change the main thread marking visitor to derive
from the new base class.

Bug: chromium:1019218

Change-Id: I3d47030d396e0ba6706882fbd922bbcac46181b2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1886920
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64632}
This commit is contained in:
Ulan Degenbaev 2019-10-29 17:20:49 +01:00 committed by Commit Bot
parent 1dea7e42f7
commit 52a7ae362a
10 changed files with 766 additions and 590 deletions

View File

@ -2248,6 +2248,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
"src/heap/marking.cc",
"src/heap/marking.h",
"src/heap/memory-measurement.cc",

View File

@ -14,6 +14,8 @@
#include "src/heap/heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
@ -74,137 +76,32 @@ class SlotSnapshot {
};
class ConcurrentMarkingVisitor final
: public HeapVisitor<int, ConcurrentMarkingVisitor> {
: public MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState> {
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, unsigned mark_compact_epoch,
bool is_forced_gc)
: shared_(shared, task_id),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id),
ConcurrentMarkingVisitor(int task_id, MarkingWorklist* marking_worklist,
EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, marking_worklist, embedder_worklist,
weak_objects, mark_compact_epoch,
bytecode_flush_mode, embedder_tracing_enabled,
is_forced_gc),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data),
task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled),
mark_compact_epoch_(mark_compact_epoch),
is_forced_gc_(is_forced_gc) {
// It is not safe to access flags from concurrent marking visitor. So
// set the bytecode flush mode based on the flags here
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode();
}
memory_chunk_data_(memory_chunk_data) {}
template <typename T>
static V8_INLINE T Cast(HeapObject object) {
return T::cast(object);
}
bool ShouldVisit(HeapObject object) {
return marking_state_.GreyToBlack(object);
}
// HeapVisitor overrides to implement the snapshotting protocol.
bool AllowDefaultJSObjectVisit() { return false; }
template <typename THeapObjectSlot>
void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object) {
MarkObject(heap_object);
MarkCompactCollector::RecordSlot(host, slot, heap_object);
}
template <typename THeapObjectSlot>
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object) {
#ifdef THREAD_SANITIZER
MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
// reduce the processing time of weak cells during the main GC
// pause.
MarkCompactCollector::RecordSlot(host, slot, heap_object);
} else {
// If we do not know about liveness of the value, we have to process
// the reference when we know the liveness of the whole transitive
// closure.
weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
}
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
VisitPointersImpl(host, start, end);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
VisitPointersImpl(host, start, end);
}
template <typename TSlot>
V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
using THeapObjectSlot = typename TSlot::THeapObjectSlot;
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = slot.Relaxed_Load();
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
// barrier will treat the weak reference as strong, so we won't miss the
// weak reference.
ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
} else if (TSlot::kCanBeWeak &&
object.GetHeapObjectIfWeak(&heap_object)) {
ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
}
}
}
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = rinfo->target_object();
RecordRelocSlot(host, rinfo, object);
if (!marking_state_.IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
weak_objects_->weak_objects_in_code.Push(task_id_,
std::make_pair(object, host));
} else {
MarkObject(object);
}
}
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
RecordRelocSlot(host, rinfo, target);
MarkObject(target);
}
void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
for (int i = 0; i < snapshot.number_of_slots(); i++) {
ObjectSlot slot = snapshot.slot(i);
Object object = snapshot.value(i);
DCHECK(!HasWeakHeapObjectTag(object));
if (!object.IsHeapObject()) continue;
HeapObject heap_object = HeapObject::cast(object);
MarkObject(heap_object);
MarkCompactCollector::RecordSlot(host, slot, heap_object);
}
}
// ===========================================================================
// JS object =================================================================
// ===========================================================================
int VisitJSObject(Map map, JSObject object) {
return VisitJSObjectSubclass(map, object);
}
@ -217,78 +114,10 @@ class ConcurrentMarkingVisitor final
return VisitJSObjectSubclass(map, object);
}
int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
int size = VisitJSObjectSubclass(map, weak_ref);
if (size == 0) {
return 0;
}
if (weak_ref.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_ref.target());
#ifdef THREAD_SANITIZER
MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
MarkCompactCollector::RecordSlot(weak_ref, slot, target);
} else {
// JSWeakRef points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
}
}
return size;
int VisitJSWeakCollection(Map map, JSWeakCollection object) {
return VisitJSObjectSubclass(map, object);
}
int VisitWeakCell(Map map, WeakCell weak_cell) {
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
#ifdef THREAD_SANITIZER
MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody above
// didn't visit it.
ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
MarkCompactCollector::RecordSlot(weak_cell, slot, target);
} else {
// WeakCell points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
weak_objects_->weak_cells.Push(task_id_, weak_cell);
}
}
return size;
}
// Some JS objects can carry back links to embedders that contain information
// relevant to the garbage collectors.
int VisitJSApiObject(Map map, JSObject object) {
return VisitEmbedderTracingSubclass(map, object);
}
int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
return VisitEmbedderTracingSubclass(map, object);
}
int VisitJSDataView(Map map, JSDataView object) {
return VisitEmbedderTracingSubclass(map, object);
}
int VisitJSTypedArray(Map map, JSTypedArray object) {
return VisitEmbedderTracingSubclass(map, object);
}
// ===========================================================================
// Strings with pointers =====================================================
// ===========================================================================
int VisitConsString(Map map, ConsString object) {
return VisitFullyWithSnapshot(map, object);
}
@ -301,10 +130,6 @@ class ConcurrentMarkingVisitor final
return VisitFullyWithSnapshot(map, object);
}
// ===========================================================================
// Strings without pointers ==================================================
// ===========================================================================
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object);
@ -317,239 +142,21 @@ class ConcurrentMarkingVisitor final
return SeqTwoByteString::SizeFor(object.synchronized_length());
}
// ===========================================================================
// Fixed array object ========================================================
// ===========================================================================
int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
MemoryChunk* chunk) {
// The concurrent marker can process larger chunks than the main thread
// marker.
const int kProgressBarScanningChunk =
RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
DCHECK(marking_state_.IsBlackOrGrey(object));
marking_state_.GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
size_t current_progress_bar = chunk->ProgressBar();
int start = static_cast<int>(current_progress_bar);
if (start == 0) start = FixedArray::BodyDescriptor::kStartOffset;
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
bool success = chunk->TrySetProgressBar(current_progress_bar, end);
CHECK(success);
if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
shared_.Push(object);
}
}
return end - start;
}
int VisitFixedArray(Map map, FixedArray object) {
// Arrays with the progress bar are not left-trimmable because they reside
// in the large object space.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
? VisitFixedArrayWithProgressBar(map, object, chunk)
: VisitLeftTrimmableArray(map, object);
}
int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
return VisitLeftTrimmableArray(map, object);
}
// ===========================================================================
// Side-effectful visitation.
// ===========================================================================
int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info.ShouldFlushBytecode(bytecode_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
VisitPointer(shared_info, shared_info.RawField(
SharedFunctionInfo::kFunctionDataOffset));
}
return size;
}
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
object.MakeOlder();
}
return size;
}
int VisitJSFunction(Map map, JSFunction object) {
int size = VisitJSObjectSubclass(map, object);
// Check if the JSFunction needs reset due to bytecode being flushed.
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
object.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, object);
}
return size;
}
int VisitMap(Map meta_map, Map map) {
if (!ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
if (map.CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
DescriptorArray descriptors = map.synchronized_instance_descriptors();
MarkDescriptorArrayBlack(descriptors);
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
// It is possible that the concurrent marker observes the
// number_of_own_descriptors out of sync with the descriptors. In that
// case the marking write barrier for the descriptor array will ensure
// that all required descriptors are marked. The concurrent marker
// just should avoid crashing in that case. That's why we need the
// std::min<int>() below.
VisitDescriptors(descriptors,
std::min<int>(number_of_own_descriptors,
descriptors.number_of_descriptors()));
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
}
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
void VisitDescriptors(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
mark_compact_epoch_, new_marked);
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
MaybeObjectSlot(descriptor_array.GetDescriptorSlot(new_marked)));
}
}
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array);
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array.GetFirstPointerSlot(),
array.GetDescriptorSlot(0));
VisitDescriptors(array, array.number_of_descriptors());
return size;
}
int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
return size;
}
int VisitJSWeakCollection(Map map, JSWeakCollection object) {
return VisitJSObjectSubclass(map, object);
}
int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
for (InternalIndex i : table.IterateEntries()) {
ObjectSlot key_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject key = HeapObject::cast(table.KeyAt(i));
#ifdef THREAD_SANITIZER
MemoryChunk::FromHeapObject(key)->SynchronizedHeapLoad();
#endif
MarkCompactCollector::RecordSlot(table, key_slot, key);
ObjectSlot value_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state_.IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
Object value_obj = table.ValueAt(i);
if (value_obj.IsHeapObject()) {
HeapObject value = HeapObject::cast(value_obj);
#ifdef THREAD_SANITIZER
MemoryChunk::FromHeapObject(value)->SynchronizedHeapLoad();
#endif
MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state_.IsWhite(value)) {
weak_objects_->discovered_ephemerons.Push(task_id_,
Ephemeron{key, value});
}
}
}
}
return table.SizeFromMap(map);
}
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
shared_.Push(value);
marking_worklist_->Push(task_id_, value);
return true;
}
} else if (marking_state_.IsWhite(value)) {
weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
}
return false;
}
void MarkObject(HeapObject object) {
#ifdef THREAD_SANITIZER
MemoryChunk::FromHeapObject(object)->SynchronizedHeapLoad();
#endif
if (marking_state_.WhiteToGrey(object)) {
shared_.Push(object);
}
}
void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
marking_state_.WhiteToGrey(descriptors);
if (marking_state_.GreyToBlack(descriptors)) {
VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
descriptors.GetDescriptorSlot(0));
}
}
private:
// Helper class for collecting in-object slot addresses and values.
class SlotSnapshottingVisitor final : public ObjectVisitor {
@ -612,18 +219,6 @@ class ConcurrentMarkingVisitor final
used_size, size);
}
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object) {
DCHECK(object.IsApiWrapper());
int size = VisitJSObjectSubclass(map, object);
if (size && embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
// the main thread.
embedder_objects_.Push(object);
}
return size;
}
template <typename T>
int VisitLeftTrimmableArray(Map map, T object) {
// The synchronized_length() function checks that the length is a Smi.
@ -639,6 +234,18 @@ class ConcurrentMarkingVisitor final
return size;
}
void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
for (int i = 0; i < snapshot.number_of_slots(); i++) {
ObjectSlot slot = snapshot.slot(i);
Object object = snapshot.value(i);
DCHECK(!HasWeakHeapObjectTag(object));
if (!object.IsHeapObject()) continue;
HeapObject heap_object = HeapObject::cast(object);
MarkObject(host, heap_object);
RecordSlot(host, slot, heap_object);
}
}
template <typename T>
int VisitFullyWithSnapshot(Map map, T object) {
using TBodyDescriptor = typename T::BodyDescriptor;
@ -664,6 +271,11 @@ class ConcurrentMarkingVisitor final
return slot_snapshot_;
}
template <typename TSlot>
void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {
MarkCompactCollector::RecordSlot(object, slot, target);
}
void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
MarkCompactCollector::RecordRelocSlotInfo info =
MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
@ -676,17 +288,22 @@ class ConcurrentMarkingVisitor final
}
}
ConcurrentMarking::MarkingWorklist::View shared_;
WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
void SynchronizePageAccess(HeapObject heap_object) {
#ifdef THREAD_SANITIZER
// This is needed because TSAN does not process the memory fence
// emitted after page initialization.
MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
}
ConcurrentMarkingState* marking_state() { return &marking_state_; }
ConcurrentMarkingState marking_state_;
MemoryChunkDataMap* memory_chunk_data_;
int task_id_;
SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_;
const unsigned mark_compact_epoch_;
bool is_forced_gc_;
BytecodeFlushMode bytecode_flush_mode_;
friend class MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState>;
};
// Strings can change maps due to conversion to thin string or external strings.
@ -745,15 +362,16 @@ class ConcurrentMarking::Task : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(Task);
};
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
ConcurrentMarking::ConcurrentMarking(Heap* heap,
MarkingWorklist* marking_worklist,
MarkingWorklist* on_hold,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects)
EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects)
: heap_(heap),
shared_(shared),
marking_worklist_(marking_worklist),
on_hold_(on_hold),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects) {
embedder_worklist_(embedder_worklist),
weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
@ -766,9 +384,10 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(
shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
task_id, heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch, task_state->is_forced_gc);
task_id, marking_worklist_, embedder_worklist_, weak_objects_,
task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
&task_state->memory_chunk_data);
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
@ -797,7 +416,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
HeapObject object;
if (!shared_->Pop(task_id, &object)) {
if (!marking_worklist_->Pop(task_id, &object)) {
done = true;
break;
}
@ -835,9 +454,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
shared_->FlushToGlobal(task_id);
marking_worklist_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id);
embedder_worklist_->FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
@ -926,7 +545,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
return;
}
}
if (!shared_->IsGlobalPoolEmpty() ||
if (!marking_worklist_->IsGlobalPoolEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();

View File

@ -11,6 +11,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@ -65,12 +66,11 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* on_hold, WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects);
ConcurrentMarking(Heap* heap, MarkingWorklist* marking_worklist,
MarkingWorklist* on_hold,
EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
// heap should not be moved while these are active (can be stopped safely via
@ -112,10 +112,10 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
class Task;
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklist* const shared_;
MarkingWorklist* const marking_worklist_;
MarkingWorklist* const on_hold_;
EmbedderTracingWorklist* const embedder_worklist_;
WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};

View File

@ -5009,7 +5009,7 @@ void Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_.reset(new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(),
mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
marking_worklist->embedder(), mark_compact_collector_->weak_objects()));
} else {
concurrent_marking_.reset(
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));

View File

@ -21,26 +21,6 @@
namespace v8 {
namespace internal {
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
MarkBit markbit = MarkBitFrom(p, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
return true;
}
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(HeapObject obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
HeapObject obj) {
return WhiteToGrey(obj) && GreyToBlack(obj);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
MarkingVisitor<fixed_array_mode, retaining_path_mode,

View File

@ -9,13 +9,10 @@
#include <vector>
#include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 {
namespace internal {
@ -29,53 +26,6 @@ class RecordMigratedSlotVisitor;
class UpdatingItem;
class YoungGenerationMarkingVisitor;
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
Marking::ObjectColor Color(HeapObject obj) {
return Marking::Color(MarkBitFrom(obj));
}
V8_INLINE bool IsImpossible(HeapObject obj) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlack(HeapObject obj) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsWhite(HeapObject obj) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsGrey(HeapObject obj) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlackOrGrey(HeapObject obj) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool WhiteToGrey(HeapObject obj);
V8_INLINE bool WhiteToBlack(HeapObject obj);
V8_INLINE bool GreyToBlack(HeapObject obj);
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
};
class MarkBitCellIterator {
public:
MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
@ -405,57 +355,6 @@ class MajorNonAtomicMarkingState final
}
};
struct Ephemeron {
HeapObject key;
HeapObject value;
};
using EphemeronWorklist = Worklist<Ephemeron, 64>;
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<TransitionArray, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
Worklist<JSWeakRef, 64> js_weak_refs;
Worklist<WeakCell, 64> weak_cells;
Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
Worklist<JSFunction, 64> flushed_js_functions;
};
struct EphemeronMarking {
std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
size_t newly_discovered_limit;
};
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:

View File

@ -0,0 +1,429 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MARKING_VISITOR_INL_H_
#define V8_HEAP_MARKING_VISITOR_INL_H_
#include "src/heap/marking-visitor.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
// ===========================================================================
// Visiting strong and weak pointers =========================================
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
concrete_visitor()->SynchronizePageAccess(object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
marking_worklist_->Push(task_id_, object);
}
}
// class template arguments
template <typename ConcreteVisitor, typename MarkingState>
// method template arguments
template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object);
}
// class template arguments
template <typename ConcreteVisitor, typename MarkingState>
// method template arguments
template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
concrete_visitor()->SynchronizePageAccess(heap_object);
if (concrete_visitor()->marking_state()->IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
// reduce the processing time of weak cells during the main GC
// pause.
concrete_visitor()->RecordSlot(host, slot, heap_object);
} else {
// If we do not know about liveness of the value, we have to process
// the reference when we know the liveness of the whole transitive
// closure.
weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
}
}
// class template arguments
template <typename ConcreteVisitor, typename MarkingState>
// method template arguments
template <typename TSlot>
V8_INLINE void
MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitPointersImpl(
HeapObject host, TSlot start, TSlot end) {
using THeapObjectSlot = typename TSlot::THeapObjectSlot;
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = slot.Relaxed_Load();
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
// barrier will treat the weak reference as strong, so we won't miss the
// weak reference.
ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
} else if (TSlot::kCanBeWeak && object.GetHeapObjectIfWeak(&heap_object)) {
ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
}
}
}
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = rinfo->target_object();
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
weak_objects_->weak_objects_in_code.Push(task_id_,
std::make_pair(object, host));
} else {
MarkObject(host, object);
}
}
concrete_visitor()->RecordRelocSlot(host, rinfo, object);
}
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodeTarget(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
MarkObject(host, target);
concrete_visitor()->RecordRelocSlot(host, rinfo, target);
}
// ===========================================================================
// Object participating in bytecode flushing =================================
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
this->VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
object.MakeOlder();
}
return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
Map map, JSFunction object) {
int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
// Check if the JSFunction needs reset due to bytecode being flushed.
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
object.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, object);
}
return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
Map map, SharedFunctionInfo shared_info) {
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
this->VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size, this);
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info.ShouldFlushBytecode(bytecode_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
VisitPointer(shared_info,
shared_info.RawField(SharedFunctionInfo::kFunctionDataOffset));
}
return size;
}
// ===========================================================================
// Fixed arrays that need incremental processing and can be left-trimmed =====
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
VisitFixedArrayWithProgressBar(Map map, FixedArray object,
MemoryChunk* chunk) {
// The concurrent marker can process larger chunks than the main thread
// marker.
const int kProgressBarScanningChunk =
RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
DCHECK(concrete_visitor()->marking_state()->IsBlackOrGrey(object));
concrete_visitor()->marking_state()->GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
size_t current_progress_bar = chunk->ProgressBar();
int start = static_cast<int>(current_progress_bar);
if (start == 0) {
this->VisitMapPointer(object);
start = FixedArray::BodyDescriptor::kStartOffset;
}
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
bool success = chunk->TrySetProgressBar(current_progress_bar, end);
CHECK(success);
if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
marking_worklist_->Push(task_id_, object);
}
}
return end - start;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
Map map, FixedArray object) {
// Arrays with the progress bar are not left-trimmable because they reside
// in the large object space.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
? VisitFixedArrayWithProgressBar(map, object, chunk)
: concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedDoubleArray(
Map map, FixedDoubleArray object) {
return concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
// ===========================================================================
// Objects participating in embedder tracing =================================
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
template <typename T>
int MarkingVisitorBase<ConcreteVisitor,
MarkingState>::VisitEmbedderTracingSubclass(Map map,
T object) {
DCHECK(object.IsApiWrapper());
int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
if (size && is_embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
// the main thread.
embedder_worklist_->Push(task_id_, object);
}
return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSApiObject(
Map map, JSObject object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSArrayBuffer(
Map map, JSArrayBuffer object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSDataView(
Map map, JSDataView object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSTypedArray(
Map map, JSTypedArray object) {
return VisitEmbedderTracingSubclass(map, object);
}
// ===========================================================================
// Weak JavaScript objects ===================================================
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
Map map, EphemeronHashTable table) {
if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
for (InternalIndex i : table.IterateEntries()) {
ObjectSlot key_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject key = HeapObject::cast(table.KeyAt(i));
concrete_visitor()->SynchronizePageAccess(key);
concrete_visitor()->RecordSlot(table, key_slot, key);
ObjectSlot value_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
Object value_obj = table.ValueAt(i);
if (value_obj.IsHeapObject()) {
HeapObject value = HeapObject::cast(value_obj);
concrete_visitor()->SynchronizePageAccess(value);
concrete_visitor()->RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (concrete_visitor()->marking_state()->IsWhite(value)) {
weak_objects_->discovered_ephemerons.Push(task_id_,
Ephemeron{key, value});
}
}
}
}
return table.SizeFromMap(map);
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSWeakRef(
Map map, JSWeakRef weak_ref) {
int size = concrete_visitor()->VisitJSObjectSubclass(map, weak_ref);
if (size == 0) return 0;
if (weak_ref.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_ref.target());
concrete_visitor()->SynchronizePageAccess(target);
if (concrete_visitor()->marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
concrete_visitor()->RecordSlot(weak_ref, slot, target);
} else {
// JSWeakRef points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
}
}
return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
Map map, WeakCell weak_cell) {
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
this->VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
concrete_visitor()->SynchronizePageAccess(target);
if (concrete_visitor()->marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody above
// didn't visit it.
ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
concrete_visitor()->RecordSlot(weak_cell, slot, target);
} else {
// WeakCell points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
weak_objects_->weak_cells.Push(task_id_, weak_cell);
}
}
return size;
}
// ===========================================================================
// Custom weakness in descriptor arrays and transition arrays ================
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::
MarkDescriptorArrayBlack(HeapObject host, DescriptorArray descriptors) {
concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
VisitPointer(descriptors, descriptors.map_slot());
VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
descriptors.GetDescriptorSlot(0));
}
}
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptors(
DescriptorArray descriptor_array, int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
mark_compact_epoch_, new_marked);
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
MaybeObjectSlot(descriptor_array.GetDescriptorSlot(new_marked)));
}
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorArray(
Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
this->VisitMapPointer(array);
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array.GetFirstPointerSlot(), array.GetDescriptorSlot(0));
VisitDescriptors(array, array.number_of_descriptors());
return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitMap(Map meta_map,
Map map) {
if (!ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
if (map.CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
DescriptorArray descriptors = map.synchronized_instance_descriptors();
MarkDescriptorArrayBlack(map, descriptors);
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
// It is possible that the concurrent marker observes the
// number_of_own_descriptors out of sync with the descriptors. In that
// case the marking write barrier for the descriptor array will ensure
// that all required descriptors are marked. The concurrent marker
// just should avoid crashing in that case. That's why we need the
// std::min<int>() below.
VisitDescriptors(descriptors,
std::min<int>(number_of_own_descriptors,
descriptors.number_of_descriptors()));
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
}
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitTransitionArray(
Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
this->VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
return size;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARKING_VISITOR_INL_H_

248
src/heap/marking-visitor.h Normal file
View File

@ -0,0 +1,248 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MARKING_VISITOR_H_
#define V8_HEAP_MARKING_VISITOR_H_
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 {
namespace internal {
using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
struct Ephemeron {
HeapObject key;
HeapObject value;
};
using EphemeronWorklist = Worklist<Ephemeron, 64>;
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<TransitionArray, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
Worklist<JSWeakRef, 64> js_weak_refs;
Worklist<WeakCell, 64> weak_cells;
Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
Worklist<JSFunction, 64> flushed_js_functions;
};
struct EphemeronMarking {
std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
size_t newly_discovered_limit;
};
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
Marking::ObjectColor Color(HeapObject obj) {
return Marking::Color(MarkBitFrom(obj));
}
V8_INLINE bool IsImpossible(HeapObject obj) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlack(HeapObject obj) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsWhite(HeapObject obj) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsGrey(HeapObject obj) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlackOrGrey(HeapObject obj) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool WhiteToGrey(HeapObject obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool WhiteToBlack(HeapObject obj) {
return WhiteToGrey(obj) && GreyToBlack(obj);
}
V8_INLINE bool GreyToBlack(HeapObject obj) {
MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
MarkBit markbit = MarkBitFrom(p, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
return true;
}
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
};
// The base class for all marking visitors. It implements marking logic with
// support of bytecode flushing, embedder tracing, weak and references.
//
// Derived classes are expected to provide the following:
// - ConcreteVisitor::marking_state method,
// - ConcreteVisitor::VisitJSObjectSubclass method,
// - ConcreteVisitor::VisitLeftTrimmableArray method,
// - ConcreteVisitor::RecordSlot method,
// - ConcreteVisitor::RecordRelocSlot method,
// - ConcreteVisitor::SynchronizePageAccess method.
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(int task_id, MarkingWorklist* marking_worklist,
EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects, unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: marking_worklist_(marking_worklist),
embedder_worklist_(embedder_worklist),
weak_objects_(weak_objects),
task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
is_forced_gc_(is_forced_gc) {}
// HeapVisitor overrides for objects that require custom visitation.
V8_INLINE bool ShouldVisit(HeapObject object) {
return concrete_visitor()->marking_state()->GreyToBlack(object);
}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
V8_INLINE int VisitFixedArray(Map map, FixedArray object);
V8_INLINE int VisitFixedDoubleArray(Map map, FixedDoubleArray object);
V8_INLINE int VisitJSApiObject(Map map, JSObject object);
V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
V8_INLINE int VisitJSDataView(Map map, JSDataView object);
V8_INLINE int VisitJSFunction(Map map, JSFunction object);
V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
V8_INLINE int VisitJSWeakRef(Map map, JSWeakRef object);
V8_INLINE int VisitMap(Map map, Map object);
V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
V8_INLINE int VisitTransitionArray(Map map, TransitionArray object);
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
VisitPointersImpl(host, start, end);
}
V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
VisitPointersImpl(host, start, end);
}
V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
}
protected:
ConcreteVisitor* concrete_visitor() {
return static_cast<ConcreteVisitor*>(this);
}
template <typename THeapObjectSlot>
void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object);
template <typename THeapObjectSlot>
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object);
template <typename TSlot>
V8_INLINE void VisitPointerImpl(HeapObject host, TSlot p);
template <typename TSlot>
V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end);
V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
int number_of_own_descriptors);
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
MemoryChunk* chunk);
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header.
V8_INLINE void MarkDescriptorArrayBlack(HeapObject host,
DescriptorArray descriptors);
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklist* const marking_worklist_;
EmbedderTracingWorklist* const embedder_worklist_;
WeakObjects* const weak_objects_;
const int task_id_;
const unsigned mark_compact_epoch_;
const BytecodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARKING_VISITOR_H_

View File

@ -982,7 +982,8 @@ class Map : public HeapObject {
static const int kMaxFastProperties = 128;
friend class MapUpdater;
friend class ConcurrentMarkingVisitor;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
OBJECT_CONSTRUCTORS(Map, HeapObject);
};

View File

@ -18,10 +18,8 @@ namespace v8 {
namespace internal {
namespace heap {
void PublishSegment(ConcurrentMarking::MarkingWorklist* worklist,
HeapObject object) {
for (size_t i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
void PublishSegment(MarkingWorklist* worklist, HeapObject object) {
for (size_t i = 0; i <= MarkingWorklist::kSegmentCapacity; i++) {
worklist->Push(0, object);
}
CHECK(worklist->Pop(0, &object));
@ -38,11 +36,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
MarkingWorklist shared, on_hold;
EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &on_hold, &embedder_objects, &weak_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
@ -61,11 +59,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
MarkingWorklist shared, on_hold;
EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &on_hold, &embedder_objects, &weak_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
@ -88,11 +86,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
MarkingWorklist shared, on_hold;
EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &on_hold, &embedder_objects, &weak_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();