[heap] Fix inner pointer resolution for unused young pages

Inner pointer resolution, to be used in conservative stack scanning,
assumes that all pages registered with the memory allocator are
iterable. Until this CL, this was not the case for pages that were
owned by the young generation semispaces but were unused. Such pages
are either in the "from" semispace, or in the "to" semispace but have
not yet been used.

This CL ensures that all pages owned by the young generation are iterable. It also adds tests to verify that inner pointer resolution
works correctly for unused young pages and for pointers above the
page area.

Bug: v8:13257
Change-Id: Ieff7cc216853403e01f83220b96bf8ff4cdea596
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885893
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Nikolaos Papaspyrou <nikolaos@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83226}
This commit is contained in:
Nikolaos Papaspyrou 2022-09-15 12:15:00 +02:00 committed by V8 LUCI CQ
parent cbcb05c713
commit ac7edc1fdb
8 changed files with 297 additions and 54 deletions

View File

@ -1485,6 +1485,10 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
if (!v8_flags.minor_mc) {
SemiSpaceNewSpace::From(new_space())->MakeAllPagesInFromSpaceIterable();
}
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
new_space()->ClearUnusedObjectStartBitmaps();
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB

View File

@ -2246,9 +2246,11 @@ Address MarkCompactCollector::FindBasePtrForMarking(Address maybe_inner_ptr) {
if (chunk->IsLargePage()) return chunk->area_start();
// Otherwise, we have a pointer inside a normal page.
const Page* page = static_cast<const Page*>(chunk);
// Try to find the address of a previous valid object on this page.
Address base_ptr =
FindPreviousObjectForConservativeMarking(page, maybe_inner_ptr);
// If the markbit is set, then we have an object that does not need be marked.
// If the markbit is set, then we have an object that does not need to be
// marked.
if (base_ptr == kNullAddress) return kNullAddress;
// Iterate through the objects in the page forwards, until we find the object
// containing maybe_inner_ptr.

View File

@ -777,7 +777,6 @@ const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
it != normal_pages_.end()) {
// The chunk is a normal page.
DCHECK_LE(chunk->address(), addr);
DCHECK_GT(chunk->area_end(), addr);
if (chunk->Contains(addr)) return *it;
} else if (auto it = large_pages_.upper_bound(static_cast<LargePage*>(chunk));
it != large_pages_.begin()) {

View File

@ -116,22 +116,21 @@ V8_INLINE bool PagedSpaceForNewSpace::EnsureAllocation(
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator
SemiSpaceObjectIterator::SemiSpaceObjectIterator(const SemiSpaceNewSpace* space)
: current_(space->first_allocatable_address()) {}
HeapObject SemiSpaceObjectIterator::Next() {
while (current_ != limit_) {
while (true) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
DCHECK(page);
if (page == nullptr) return HeapObject();
current_ = page->area_start();
if (current_ == limit_) return HeapObject();
}
HeapObject object = HeapObject::FromAddress(current_);
current_ += object.Size();
if (!object.IsFreeSpaceOrFiller()) {
return object;
}
if (!object.IsFreeSpaceOrFiller()) return object;
}
return HeapObject();
}
} // namespace internal

View File

@ -133,6 +133,8 @@ bool SemiSpace::Commit() {
}
memory_chunk_list_.PushBack(new_page);
IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
heap()->CreateFillerObjectAt(new_page->area_start(),
static_cast<int>(new_page->area_size()));
}
Reset();
AccountCommitted(target_capacity_);
@ -195,6 +197,8 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
// Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
heap()->CreateFillerObjectAt(new_page->area_start(),
static_cast<int>(new_page->area_size()));
}
AccountCommitted(delta);
target_capacity_ = new_capacity;
@ -426,20 +430,6 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
}
#endif
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator implementation.
SemiSpaceObjectIterator::SemiSpaceObjectIterator(
const SemiSpaceNewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
}
// -----------------------------------------------------------------------------
// NewSpace implementation
@ -481,8 +471,7 @@ void NewSpace::VerifyTop() const {
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
Address current_address,
Address stop_iteration_at_address) const {
Address current_address) const {
DCHECK(current_page->ContainsLimit(current_address));
size_t external_space_bytes[kNumTypes];
@ -496,13 +485,8 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
PtrComprCageBase cage_base(isolate);
VerifyPointersVisitor visitor(heap());
const Page* page = current_page;
while (current_address != stop_iteration_at_address) {
while (true) {
if (!Page::IsAlignedToPageSize(current_address)) {
// The allocation pointer should not be in the middle of an object.
CHECK_IMPLIES(!v8_flags.minor_mc,
!Page::FromAddress(current_address)->ContainsLimit(top()) ||
current_address < top());
HeapObject object = HeapObject::FromAddress(current_address);
// The first word should be a map, and we expect all map pointers to
@ -660,6 +644,10 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
linear_area_original_data_.set_original_top_release(top());
}
// The linear allocation area should reach the end of the page, so no filler
// object is needed there to make the page iterable.
DCHECK_EQ(limit(), to_space_.page_high());
to_space_.AddRangeToActiveSystemPages(top(), limit());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@ -686,6 +674,11 @@ void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
allocation_info_.SetLimit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
// Add a filler object after the linear allocation area (if there is space
// left), to ensure that the page will be iterable.
heap()->CreateFillerObjectAt(
limit(), static_cast<int>(to_space_.page_high() - limit()));
#if DEBUG
VerifyTop();
#endif
@ -770,7 +763,7 @@ void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current, top());
VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current);
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
@ -780,6 +773,37 @@ void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
}
#endif // VERIFY_HEAP
void SemiSpaceNewSpace::MakeIterable() {
MakeAllPagesInFromSpaceIterable();
MakeUnusedPagesInToSpaceIterable();
}
void SemiSpaceNewSpace::MakeAllPagesInFromSpaceIterable() {
if (!IsFromSpaceCommitted()) return;
// Fix all pages in the "from" semispace.
for (Page* page : from_space()) {
heap()->CreateFillerObjectAt(page->area_start(),
static_cast<int>(page->area_size()));
}
}
void SemiSpaceNewSpace::MakeUnusedPagesInToSpaceIterable() {
PageIterator it(to_space().current_page());
// Fix the current page, above the LAB.
DCHECK_NOT_NULL(*it);
DCHECK((*it)->Contains(limit()));
heap()->CreateFillerObjectAt(limit(),
static_cast<int>((*it)->area_end() - limit()));
// Fix the remaining unused pages in the "to" semispace.
for (Page* page = *(++it); page != nullptr; page = *(++it)) {
heap()->CreateFillerObjectAt(page->area_start(),
static_cast<int>(page->area_size()));
}
}
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void SemiSpaceNewSpace::ClearUnusedObjectStartBitmaps() {
if (!IsFromSpaceCommitted()) return;
@ -1044,10 +1068,7 @@ PagedNewSpace::~PagedNewSpace() {
void PagedNewSpace::Verify(Isolate* isolate) const {
const Page* first_page = paged_space_.first_page();
if (first_page) {
// No bailout needed since all pages are iterable.
VerifyImpl(isolate, first_page, first_page->area_start(), kNullAddress);
}
if (first_page) VerifyImpl(isolate, first_page, first_page->area_start());
// Check paged-spaces.
VerifyPointersVisitor visitor(heap());

View File

@ -217,24 +217,17 @@ class SemiSpace final : public Space {
};
// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space. It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace. New objects allocated after the
// iterator is created are not iterated.
// semispace of the heap's new space.
class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceObjectIterator(const SemiSpaceNewSpace* space);
// Create an iterator over the objects in the given to-space.
inline explicit SemiSpaceObjectIterator(const SemiSpaceNewSpace* space);
inline HeapObject Next() final;
private:
void Initialize(Address start, Address end);
// The current iteration point.
Address current_;
// The end of iteration.
Address limit_;
};
class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
@ -294,14 +287,15 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate) const = 0;
// VerifyImpl verifies objects on the space starting from |page| and
// |address|. |address| should be a valid limit on |page| (see
// BasicMemoryChunk::ContainsLimit).
// VerifyImpl verifies objects on the space starting from |current_page| and
// |current_address|. |current_address| should be a valid limit on
// |current_page| (see BasicMemoryChunk::ContainsLimit).
void VerifyImpl(Isolate* isolate, const Page* current_page,
Address current_address,
Address stop_iteration_at_address) const;
Address current_address) const;
#endif
virtual void MakeIterable() = 0;
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
virtual void ClearUnusedObjectStartBitmaps() = 0;
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
@ -483,6 +477,11 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
void Print() override { to_space_.Print(); }
#endif
void MakeIterable() override;
void MakeAllPagesInFromSpaceIterable();
void MakeUnusedPagesInToSpaceIterable();
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void ClearUnusedObjectStartBitmaps() override;
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
@ -625,6 +624,8 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
void Verify(Isolate* isolate, ObjectVisitor* visitor) const final;
#endif
void MakeIterable() { free_list()->RepairLists(heap()); }
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void ClearUnusedObjectStartBitmaps() {}
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
@ -783,6 +784,8 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
PagedSpaceBase* paged_space() { return &paged_space_; }
void MakeIterable() override { paged_space_.MakeIterable(); }
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void ClearUnusedObjectStartBitmaps() override {
paged_space_.ClearUnusedObjectStartBitmaps();

View File

@ -75,9 +75,11 @@ bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
if (v8_flags.minor_mc && new_space()) {
paged_new_space()->paged_space()->free_list()->RepairLists(this);
}
// Ensure that all young generation pages are iterable. It must be after heap
// setup, so that the maps have been created.
if (new_space()) new_space()->MakeIterable();
CreateApiObjects();
// Create initial objects

View File

@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/mark-compact.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@ -601,6 +602,218 @@ TEST_F(InnerPointerResolutionTest, FreePages) {
TestAll();
}
using InnerPointerResolutionHeapTest = TestWithHeapInternalsAndContext;
TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
ManualGCScope manual_gc_scope(isolate());
v8_flags.page_promotion = false;
Persistent<v8::FixedArray> weak1, weak2;
Address inner_ptr1, inner_ptr2, outside_ptr1, outside_ptr2;
Page *page1, *page2;
{
PtrComprCageBase cage_base{isolate()};
HandleScope scope(isolate());
// Allocate two objects, large enough that they fall in two different young
// generation pages.
const int length =
(heap()->MaxRegularHeapObjectSize(AllocationType::kYoung) -
FixedArray::SizeFor(0)) /
kTaggedSize;
auto h1 = factory()->NewFixedArray(length, AllocationType::kYoung);
auto h2 = factory()->NewFixedArray(length, AllocationType::kYoung);
weak1.Reset(v8_isolate(), Utils::FixedArrayToLocal(h1));
weak2.Reset(v8_isolate(), Utils::FixedArrayToLocal(h2));
weak1.SetWeak();
weak2.SetWeak();
auto obj1 = h1->GetHeapObject();
auto obj2 = h2->GetHeapObject();
page1 = Page::FromHeapObject(obj1);
EXPECT_TRUE(!page1->IsLargePage());
EXPECT_TRUE(v8_flags.minor_mc || page1->IsToPage());
page2 = Page::FromHeapObject(obj2);
EXPECT_TRUE(!page2->IsLargePage());
EXPECT_TRUE(v8_flags.minor_mc || page2->IsToPage());
EXPECT_NE(page1, page2);
// Keep inner pointers to both.
inner_ptr1 = obj1.address() + 17 * kTaggedSize;
inner_ptr2 = obj2.address() + 37 * kTaggedSize;
// Keep pointers to the end of the pages, after the objects.
outside_ptr1 = page1->area_end() - 3 * kTaggedSize;
outside_ptr2 = page2->area_end() - 2 * kTaggedSize;
EXPECT_LE(obj1.address() + obj1.Size(cage_base), outside_ptr1);
EXPECT_LE(obj2.address() + obj2.Size(cage_base), outside_ptr2);
// Ensure the young generation space is iterable.
heap()->new_space()->MakeLinearAllocationAreaIterable();
// Inner pointer resolution should work now, finding the objects in the
// case of the inner pointers.
EXPECT_EQ(
obj1.address(),
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
EXPECT_EQ(
obj2.address(),
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
}
// Garbage collection should reclaim both objects.
CollectGarbage(NEW_SPACE);
EXPECT_TRUE(weak1.IsEmpty());
EXPECT_TRUE(weak2.IsEmpty());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page1->IsFromPage());
EXPECT_TRUE(v8_flags.minor_mc || page2->IsFromPage());
// Inner pointer resolution should work with pointers to unused young
// generation pages (in case of the scavenger, the two pages are now in the
// "from" semispace). There are no objects to be found.
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
// Garbage collection once more.
CollectGarbage(NEW_SPACE);
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page1->IsToPage());
EXPECT_TRUE(v8_flags.minor_mc || page2->IsToPage());
// Inner pointer resolution should work with pointers to unused young
// generation pages (in case of the scavenger, the two pages are now in the
// "to" semispace). There are no objects to be found.
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
EXPECT_EQ(
kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
}
TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
ManualGCScope manual_gc_scope(isolate());
v8_flags.page_promotion = false;
Global<v8::FixedArray> weak;
Address inner_ptr;
Page* page;
{
PtrComprCageBase cage_base{isolate()};
HandleScope scope(isolate());
// Allocate a large object in the young generation.
const int length =
std::max(1 << kPageSizeBits,
2 * heap()->MaxRegularHeapObjectSize(AllocationType::kYoung)) /
kTaggedSize;
auto h = factory()->NewFixedArray(length, AllocationType::kYoung);
weak.Reset(v8_isolate(), Utils::FixedArrayToLocal(h));
weak.SetWeak();
auto obj = h->GetHeapObject();
page = Page::FromHeapObject(obj);
EXPECT_TRUE(page->IsLargePage());
EXPECT_EQ(AllocationSpace::NEW_LO_SPACE, page->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page->IsToPage());
// Keep inner pointer.
inner_ptr = obj.address() + 17 * kTaggedSize;
// Inner pointer resolution should work now, finding the object.
EXPECT_EQ(
obj.address(),
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
}
// Garbage collection should reclaim the object.
CollectGarbage(NEW_SPACE);
EXPECT_TRUE(weak.IsEmpty());
// Inner pointer resolution should work with a pointer to an unused young
// generation large page. There is no object to be found.
EXPECT_EQ(kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
}
TEST_F(InnerPointerResolutionHeapTest, RegularPageAfterEnd) {
// Allocate a regular page.
OldSpace* old_space = heap()->old_space();
DCHECK_NE(nullptr, old_space);
auto* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, old_space, NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
// The end of the page area is expected not to coincide with the beginning of
// the next page.
const int size = (1 << kPageSizeBits) / 2;
const Address mark = page->area_start() + size;
heap()->CreateFillerObjectAt(page->area_start(), size);
heap()->CreateFillerObjectAt(mark, static_cast<int>(page->area_end() - mark));
Page::UpdateHighWaterMark(mark);
page->ShrinkToHighWaterMark();
EXPECT_FALSE(Page::IsAlignedToPageSize(page->area_end()));
// Inner pointer resolution after the end of the page area should work.
Address inner_ptr = page->area_end() + kTaggedSize;
EXPECT_FALSE(Page::IsAlignedToPageSize(inner_ptr));
EXPECT_EQ(kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
// Deallocate the page.
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
page);
}
TEST_F(InnerPointerResolutionHeapTest, LargePageAfterEnd) {
// Allocate a large page.
OldLargeObjectSpace* lo_space = heap()->lo_space();
EXPECT_NE(nullptr, lo_space);
const int size = 3 * (1 << kPageSizeBits) / 2;
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
lo_space, size, NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
// The end of the page area is expected not to coincide with the beginning of
// the next page.
EXPECT_FALSE(Page::IsAlignedToPageSize(page->area_end()));
// Inner pointer resolution after the end of the pare area should work.
Address inner_ptr = page->area_end() + kTaggedSize;
EXPECT_FALSE(Page::IsAlignedToPageSize(inner_ptr));
EXPECT_EQ(kNullAddress,
heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
// Deallocate the page.
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
page);
}
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_MB
} // namespace internal