[heap] Also promote strings in large objects into shared heap

With --shared-string-table all in-place internalizable strings are
directly promoted from new space into the shared heap. However, this
wasn't the case with large objects. This CL fixes this and adds test
to guide fuzzers.

Bug: v8:13267, chromium:1400048
Change-Id: I6f850d480956c63bfbe1a7060140df850e284933
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4096818
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84805}
This commit is contained in:
Dominik Inführ 2022-12-13 10:30:42 +01:00 committed by V8 LUCI CQ
parent f051f773f4
commit 303facf5e1
13 changed files with 162 additions and 16 deletions

View File

@ -110,6 +110,10 @@ class BasicMemoryChunk {
// A Page with code objects.
IS_EXECUTABLE = 1u << 21,
// Page will be promoted directly from the new space to the shared heap in a
// minor GC.
SHARED_HEAP_PROMOTION = 1u << 22,
};
using MainThreadFlags = base::Flags<Flag, uintptr_t>;
@ -188,6 +192,11 @@ class BasicMemoryChunk {
return heap_;
}
void set_heap(Heap* heap) {
DCHECK_NOT_NULL(heap);
heap_ = heap;
}
// Gets the chunk's owner or null if the space has been detached.
BaseSpace* owner() const { return owner_; }

View File

@ -346,6 +346,13 @@ void HeapVerification::VerifyPage(const BasicMemoryChunk* chunk) {
CHECK(!current_chunk_.has_value());
CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
CHECK(!chunk->IsFlagSet(Page::SHARED_HEAP_PROMOTION));
if (chunk->InReadOnlySpace()) {
CHECK_NULL(chunk->owner());
} else {
CHECK_EQ(chunk->heap(), heap());
CHECK_EQ(chunk->owner()->identity(), current_space_identity());
}
current_chunk_ = chunk;
}

View File

@ -259,14 +259,27 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
}
}
void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(identity() == LO_SPACE || identity() == SHARED_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
const bool promotion_into_shared_heap =
identity() == SHARED_LO_SPACE || heap()->isolate()->is_shared();
PtrComprCageBase cage_base(heap()->isolate());
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page);
page->ClearFlag(MemoryChunk::FROM_PAGE);
if (promotion_into_shared_heap) {
page->SetFlag(MemoryChunk::IN_SHARED_HEAP);
MemoryAllocator* current_allocator = page->heap()->memory_allocator();
heap()->memory_allocator()->TakeOverLargePage(page, current_allocator);
page->set_heap(heap());
}
base::RecursiveMutexGuard guard(&allocation_mutex_);
AddPage(page, static_cast<size_t>(page->GetObject().Size(cage_base)));
}

View File

@ -140,6 +140,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
void set_objects_size(size_t objects_size) { objects_size_ = objects_size; }
void PromoteNewLargeObject(LargePage* page);
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
@ -181,8 +183,6 @@ class OldLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size);
void PromoteNewLargeObject(LargePage* page);
protected:
explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,

View File

@ -4642,7 +4642,14 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
if (!marking_state->IsBlack(object)) continue;
if (uses_shared_heap_ && v8_flags.shared_string_table &&
String::IsInPlaceInternalizable(object.map().instance_type())) {
DCHECK(ReadOnlyHeap::Contains(object.map()));
DCHECK(StringShape(String::cast(object), isolate()).IsDirect());
heap_->shared_lo_allocation_space()->PromoteNewLargeObject(current);
} else {
heap()->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);
@ -6694,13 +6701,22 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
}
}
const bool uses_shared_heap = heap()->isolate()->has_shared_heap();
// Promote young generation large objects.
for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject();
if (non_atomic_marking_state()->IsBlack(object)) {
DCHECK(!non_atomic_marking_state()->IsGrey(object));
if (!non_atomic_marking_state()->IsBlack(object)) continue;
if (uses_shared_heap && v8_flags.shared_string_table &&
String::IsInPlaceInternalizable(object.map().instance_type())) {
DCHECK(ReadOnlyHeap::Contains(object.map()));
DCHECK(StringShape(String::cast(object), isolate()).IsDirect());
heap_->shared_lo_allocation_space()->PromoteNewLargeObject(current);
} else {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);

View File

@ -474,6 +474,17 @@ void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) {
UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE);
}
void MemoryAllocator::TakeOverLargePage(LargePage* page,
MemoryAllocator* current_owner) {
DCHECK_EQ(page->executable(), NOT_EXECUTABLE);
if (this == current_owner) return;
current_owner->size_ -= page->size();
current_owner->RecordLargePageDestroyed(*page);
size_ += page->size();
RecordLargePageCreated(*page);
}
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));

View File

@ -260,6 +260,7 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
void UnregisterReadOnlyPage(ReadOnlyPage* page);
void TakeOverLargePage(LargePage* page, MemoryAllocator* current_owner);
Address HandleAllocationFailure(Executability executable);

View File

@ -357,6 +357,8 @@ void ScavengerCollector::CollectGarbage() {
memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
});
PreprocessNewLargeObjects();
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
{
@ -542,6 +544,19 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
ArrayBufferSweeper::SweepingType::kYoung);
}
void ScavengerCollector::PreprocessNewLargeObjects() {
if (!heap_->isolate()->has_shared_heap() || !v8_flags.shared_string_table) {
return;
}
for (LargePage* page : *heap_->new_lo_space()) {
HeapObject object = page->GetObject();
if (String::IsInPlaceInternalizable(object.map().instance_type())) {
page->SetFlag(MemoryChunk::SHARED_HEAP_PROMOTION);
}
}
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
const bool is_compacting = heap_->incremental_marking()->IsCompacting();
AtomicMarkingState* marking_state = heap_->atomic_marking_state();
@ -549,18 +564,25 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
HeapObject object = update_info.first;
LargePage* page = LargePage::FromHeapObject(object);
Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object.set_map_word(map, kRelaxedStore);
if (is_compacting && marking_state->IsBlack(object) &&
MarkCompactCollector::IsOnEvacuationCandidate(map)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
MemoryChunk::FromHeapObject(object), object.map_slot().address());
if (page->IsFlagSet(MemoryChunk::SHARED_HEAP_PROMOTION)) {
DCHECK(ReadOnlyHeap::Contains(map));
DCHECK(StringShape(String::cast(object), heap_->isolate()).IsDirect());
page->ClearFlag(MemoryChunk::SHARED_HEAP_PROMOTION);
heap_->shared_lo_allocation_space()->PromoteNewLargeObject(page);
} else {
if (is_compacting && marking_state->IsBlack(object) &&
MarkCompactCollector::IsOnEvacuationCandidate(map)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
page, object.map_slot().address());
}
heap_->lo_space()->PromoteNewLargeObject(page);
}
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
surviving_new_large_objects_.clear();
heap_->new_lo_space()->set_objects_size(0);
@ -842,6 +864,14 @@ void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
ephemeron_table_list_local_.Push(table);
}
namespace {
bool RecordOldToSharedSlot(HeapObject heap_object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(heap_object);
return chunk->InSharedHeap() ||
chunk->IsFlagSet(MemoryChunk::SHARED_HEAP_PROMOTION);
}
} // anonymous namespace
template <typename TSlot>
void Scavenger::CheckOldToNewSlotForSharedUntyped(MemoryChunk* chunk,
TSlot slot) {
@ -849,7 +879,7 @@ void Scavenger::CheckOldToNewSlotForSharedUntyped(MemoryChunk* chunk,
HeapObject heap_object;
if (object.GetHeapObject(&heap_object) &&
heap_object.InSharedWritableHeap()) {
RecordOldToSharedSlot(heap_object)) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
}
@ -860,11 +890,9 @@ void Scavenger::CheckOldToNewSlotForSharedTyped(MemoryChunk* chunk,
Address slot_address,
MaybeObject new_target) {
HeapObject heap_object;
if (!new_target.GetHeapObject(&heap_object)) {
return;
}
if (heap_object.InSharedWritableHeap()) {
if (!new_target.GetHeapObject(&heap_object) &&
RecordOldToSharedSlot(heap_object)) {
const uintptr_t offset = slot_address - chunk->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));

View File

@ -286,6 +286,7 @@ class ScavengerCollector {
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
void ClearOldEphemerons();
void HandleSurvivingNewLargeObjects();
void PreprocessNewLargeObjects();
void SweepArrayBufferExtensions();

View File

@ -1712,6 +1712,13 @@ RUNTIME_FUNCTION(Runtime_IsSharedString) {
Handle<String>::cast(obj)->IsShared());
}
RUNTIME_FUNCTION(Runtime_InSharedHeap) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<HeapObject> obj = args.at<HeapObject>(0);
return isolate->heap()->ToBoolean(obj->InSharedWritableHeap());
}
RUNTIME_FUNCTION(Runtime_IsInPlaceInternalizableString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());

View File

@ -544,6 +544,7 @@ namespace internal {
F(HeapObjectVerify, 1, 1) \
F(ICsAreEnabled, 0, 1) \
F(InLargeObjectSpace, 1, 1) \
F(InSharedHeap, 1, 1) \
F(InYoungGeneration, 1, 1) \
F(Is64Bit, 0, 1) \
F(IsAtomicsWaitAllowed, 0, 1) \

View File

@ -0,0 +1,26 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --expose-gc --allow-natives-syntax --verify-heap --shared-string-table
const old = {};
old.bar = 100;
gc();
assertFalse(%InYoungGeneration(old));
const foo = %FlattenString('a'.repeat(512 * 1024));
assertTrue(%InYoungGeneration(foo));
assertTrue(%IsInPlaceInternalizableString(foo));
// Create old-to-new reference.
old.foo = foo;
gc();
assertTrue(!%InYoungGeneration(foo));
assertTrue(%InSharedHeap(foo));
// An additional full GC for heap verification.
gc();

View File

@ -0,0 +1,26 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --expose-gc --allow-natives-syntax --verify-heap --shared-string-table
const old = {};
old.bar = 100;
gc();
assertFalse(%InYoungGeneration(old));
const foo = %FlattenString('a'.repeat(512 * 1024));
assertTrue(%InYoungGeneration(foo));
assertTrue(%IsInPlaceInternalizableString(foo));
// Create old-to-new reference.
old.foo = foo;
gc({type: "minor"});
assertTrue(!%InYoungGeneration(foo));
assertTrue(%InSharedHeap(foo));
// An additional full GC for heap verification.
gc();