[heap] Stop scanning full client heaps for pointers into shared heap

We used to scan the full heap of all clients for pointers into the
shared heap. Now that the OLD_TO_SHARED remembered set is considered
complete, we don't need to scan objects in the old generation anymore.
Instead we just need to check slots in the OLD_TO_SHARED remembered
set.

Since we don't have write barriers for objects guaranteed to be in
new space, we still need to scan objects in the young generation.

Bug: v8:11708
Change-Id: I1121f90ee63521c9141431f4cb31973796f1a67c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3865561
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82941}
This commit is contained in:
Dominik Inführ 2022-09-02 11:35:22 +02:00 committed by V8 LUCI CQ
parent 2d14b0bdc3
commit b0a2f508c5
2 changed files with 78 additions and 35 deletions

View File

@ -1381,13 +1381,9 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
if (!heap_object.InSharedWritableHeap()) return;
DCHECK(heap_object.InSharedWritableHeap());
MemoryChunk* host_chunk = MemoryChunk::FromHeapObject(host);
if (host_chunk->InYoungGeneration()) {
DCHECK(host_chunk->InYoungGeneration());
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
host_chunk, slot.address());
} else {
CHECK(RememberedSet<OLD_TO_SHARED>::Contains(host_chunk, slot.address()));
}
collector_->MarkRootObject(Root::kClientHeap, heap_object);
}
@ -2220,17 +2216,67 @@ void MarkCompactCollector::MarkRootsFromStack(RootVisitor* root_visitor) {
void MarkCompactCollector::MarkObjectsFromClientHeaps() {
if (!isolate()->is_shared()) return;
isolate()->global_safepoint()->IterateClientIsolates(
[collector = this](Isolate* client) {
collector->MarkObjectsFromClientHeap(client);
});
}
void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
// There is no OLD_TO_SHARED remembered set for the young generation. We
// therefore need to iterate each object and check whether it points into the
// shared heap. As an optimization and to avoid a second heap iteration in the
// "update pointers" phase, all pointers into the shared heap are recorded in
// the OLD_TO_SHARED remembered set as well.
SharedHeapObjectVisitor visitor(this);
isolate()->global_safepoint()->IterateClientIsolates(
[&visitor](Isolate* client) {
Heap* heap = client->heap();
HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
PtrComprCageBase cage_base(client);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
Heap* heap = client->heap();
if (heap->new_space()) {
std::unique_ptr<ObjectIterator> iterator =
heap->new_space()->GetObjectIterator(heap);
for (HeapObject obj = iterator->Next(); !obj.is_null();
obj = iterator->Next()) {
obj.IterateFast(cage_base, &visitor);
}
}
if (heap->new_lo_space()) {
std::unique_ptr<ObjectIterator> iterator =
heap->new_lo_space()->GetObjectIterator(heap);
for (HeapObject obj = iterator->Next(); !obj.is_null();
obj = iterator->Next()) {
obj.IterateFast(cage_base, &visitor);
}
}
// In the old generation we can simply use the OLD_TO_SHARED remembered set to
// find all incoming pointers into the shared heap.
OldGenerationMemoryChunkIterator chunk_iterator(heap);
for (MemoryChunk* chunk = chunk_iterator.next(); chunk;
chunk = chunk_iterator.next()) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(
chunk, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_SHARED>::Iterate(
chunk,
[collector = this, cage_base, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
MaybeObject obj = slot.Relaxed_Load(cage_base);
HeapObject heap_object;
if (obj.GetHeapObject(&heap_object) &&
heap_object.InSharedWritableHeap()) {
collector->MarkRootObject(Root::kClientHeap, heap_object);
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
SlotSet::FREE_EMPTY_BUCKETS);
chunk->ReleaseInvalidatedSlots<OLD_TO_SHARED>();
}
#ifdef V8_COMPRESS_POINTERS
DCHECK(IsSandboxedExternalPointerType(kWaiterQueueNodeTag));
@ -2247,7 +2293,6 @@ void MarkCompactCollector::MarkObjectsFromClientHeaps() {
table.Mark(handle, reinterpret_cast<Address>(handle_location));
}
#endif // V8_COMPRESS_POINTERS
});
}
void MarkCompactCollector::VisitObject(HeapObject obj) {
@ -5185,17 +5230,14 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
MemoryChunk* chunk = chunk_iterator.Next();
CodePageMemoryModificationScope unprotect_code_page(chunk);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(
chunk, InvalidatedSlotsFilter::LivenessCheck::kNo);
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_SHARED>());
RememberedSet<OLD_TO_SHARED>::Iterate(
chunk,
[cage_base, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
[cage_base](MaybeObjectSlot slot) {
return UpdateOldToSharedSlot(cage_base, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
chunk->ReleaseInvalidatedSlots<OLD_TO_SHARED>();
if (chunk->InYoungGeneration()) chunk->ReleaseSlotSet<OLD_TO_SHARED>();
RememberedSet<OLD_TO_SHARED>::IterateTyped(chunk, [this](SlotType slot_type,

View File

@ -590,6 +590,7 @@ class MarkCompactCollector final : public CollectorBase {
// Mark all objects that are directly referenced from one of the clients
// heaps.
void MarkObjectsFromClientHeaps();
void MarkObjectsFromClientHeap(Isolate* client);
// Updates pointers to shared objects from client heaps.
void UpdatePointersInClientHeaps();