[heap] Iterate map word for promoted objects in scavenger

When iterating slots for promoted objects we now also need to visit
the map word slot since maps might get compacted. If we do not do this,
we risk losing the already recorded slot for the map word in case that
object already got marked.

Bug: v8:12578, chromium:1295239
Change-Id: I34fbf7ae4b9e36eae8e7e3df354b5fd19adcb08f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3448373
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79006}
This commit is contained in:
Dominik Inführ 2022-02-09 10:12:45 +01:00 committed by V8 LUCI CQ
parent cc7c7528ee
commit d01a024cc8
3 changed files with 42 additions and 6 deletions

View File

@ -182,7 +182,11 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
: CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
// During incremental marking we want to push every object in order to
// record slots for map words. Necessary for map space compaction.
if (object_fields == ObjectFields::kMaybePointers ||
is_compacting_including_map_space_) {
promotion_list_local_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;

View File

@ -4,6 +4,7 @@
#include "src/heap/scavenger.h"
#include "src/common/globals.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
@ -12,7 +13,9 @@
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/scavenger-inl.h"
@ -33,6 +36,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
bool record_slots)
: scavenger_(scavenger), record_slots_(record_slots) {}
V8_INLINE void VisitMapPointer(HeapObject host) final {
if (!record_slots_) return;
MapWord map_word = host.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
// Surviving new large objects have forwarding pointers in the map word.
DCHECK(MemoryChunk::FromHeapObject(host)->InNewLargeObjectSpace());
return;
}
HandleSlot(host, HeapObjectSlot(host.map_slot()), map_word.ToMap());
}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
VisitPointersImpl(host, start, end);
@ -119,10 +133,9 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
slot.address());
}
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target)));
} else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target))) {
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else if (record_slots_ &&
MarkCompactCollector::IsOnEvacuationCandidate(target)) {
// We should never try to record off-heap slots.
DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
// Code slots never appear in new space because CodeDataContainers, the
@ -498,6 +511,10 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
const bool is_compacting = heap_->incremental_marking()->IsCompacting();
MajorAtomicMarkingState* marking_state =
heap_->incremental_marking()->atomic_marking_state();
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
HeapObject object = update_info.first;
@ -505,6 +522,12 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
if (is_compacting && marking_state->IsBlack(object) &&
MarkCompactCollector::IsOnEvacuationCandidate(map)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
MemoryChunk::FromHeapObject(object), object.map_slot().address());
}
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
@ -568,6 +591,8 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
is_logging_(is_logging),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()),
is_compacting_including_map_space_(is_compacting_ &&
FLAG_compact_map_space),
shared_string_table_(shared_old_allocator_.get() != nullptr) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
@ -583,7 +608,13 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
if (is_compacting_including_map_space_) {
// When we compact map space, we also want to visit the map word.
target.IterateFast(map, size, &visitor);
} else {
target.IterateBodyFast(map, size, &visitor);
}
if (map.IsJSArrayBufferMap()) {
DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());

View File

@ -204,6 +204,7 @@ class Scavenger {
const bool is_logging_;
const bool is_incremental_marking_;
const bool is_compacting_;
const bool is_compacting_including_map_space_;
const bool shared_string_table_;
friend class IterateAndScavengePromotedObjectsVisitor;