v8/test/unittests/heap/unmapper-unittest.cc
Samuel Groß a7cb30b0e9 Introduce VirtualAddressSpace interface
This interface is meant to eventually replace the existing
v8::PageAllocator interface. Beyond general refactoring of the
PageAllocator APIs, the new interface now supports the concept of
(contiguous) address space reservations, which previously had to be
implemented through page allocations. These reservations now make better
use of provided OS primitives on Fuchsia (VMARs) and Windows
(placeholder mappings) and can be used to back many of the cages and
virtual memory regions that V8 creates.

The new interface is not yet stable and may change at any time without
deprecating the old version first.

Bug: chromium:1218005
Change-Id: I295253c42e04cf311393c5dab9f8c06bd7451ce3
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3301475
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78235}
2021-12-04 21:42:04 +00:00

371 lines
14 KiB
C++

// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <map>
#include "src/base/region-allocator.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/spaces-inl.h"
#include "src/utils/ostreams.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
// This is a v8::PageAllocator implementation that decorates provided page
// allocator object with page tracking functionality.
class TrackingPageAllocator : public ::v8::PageAllocator {
public:
explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
: page_allocator_(page_allocator),
allocate_page_size_(page_allocator_->AllocatePageSize()),
commit_page_size_(page_allocator_->CommitPageSize()),
region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
commit_page_size_) {
CHECK_NOT_NULL(page_allocator);
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
}
~TrackingPageAllocator() override = default;
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override {
return page_allocator_->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() override {
return page_allocator_->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override {
void* result =
page_allocator_->AllocatePages(address, size, alignment, access);
if (result) {
// Mark pages as used.
Address current_page = reinterpret_cast<Address>(result);
CHECK(IsAligned(current_page, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
CHECK(region_allocator_.AllocateRegionAt(current_page, size));
Address end = current_page + size;
while (current_page < end) {
page_permissions_.insert({current_page, access});
current_page += commit_page_size_;
}
}
return result;
}
bool FreePages(void* address, size_t size) override {
bool result = page_allocator_->FreePages(address, size);
if (result) {
// Mark pages as free.
Address start = reinterpret_cast<Address>(address);
CHECK(IsAligned(start, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
size_t freed_size = region_allocator_.FreeRegion(start);
CHECK(IsAligned(freed_size, commit_page_size_));
CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
auto start_iter = page_permissions_.find(start);
CHECK_NE(start_iter, page_permissions_.end());
auto end_iter = page_permissions_.lower_bound(start + size);
page_permissions_.erase(start_iter, end_iter);
}
return result;
}
bool ReleasePages(void* address, size_t size, size_t new_size) override {
bool result = page_allocator_->ReleasePages(address, size, new_size);
if (result) {
Address start = reinterpret_cast<Address>(address);
CHECK(IsAligned(start, allocate_page_size_));
CHECK(IsAligned(size, commit_page_size_));
CHECK(IsAligned(new_size, commit_page_size_));
CHECK_LT(new_size, size);
CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
auto start_iter = page_permissions_.find(start + new_size);
CHECK_NE(start_iter, page_permissions_.end());
auto end_iter = page_permissions_.lower_bound(start + size);
page_permissions_.erase(start_iter, end_iter);
}
return result;
}
bool DecommitPages(void* address, size_t size) override {
bool result = page_allocator_->DecommitPages(address, size);
if (result) {
// Mark pages as non-accessible.
UpdatePagePermissions(reinterpret_cast<Address>(address), size,
kNoAccess);
}
return result;
}
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override {
bool result = page_allocator_->SetPermissions(address, size, access);
if (result) {
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access);
}
return result;
}
// Returns true if all the allocated pages were freed.
bool IsEmpty() { return page_permissions_.empty(); }
void CheckIsFree(Address address, size_t size) {
CHECK(IsAligned(address, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
EXPECT_TRUE(region_allocator_.IsFree(address, size));
}
void CheckPagePermissions(Address address, size_t size,
PageAllocator::Permission access) {
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
EXPECT_EQ(access, value->second);
});
}
void Print(const char* comment) const {
i::StdoutStream os;
os << "\n========================================="
<< "\nTracingPageAllocator state: ";
if (comment) os << comment;
os << "\n-----------------------------------------\n";
region_allocator_.Print(os);
os << "-----------------------------------------"
<< "\nPage permissions:";
if (page_permissions_.empty()) {
os << " empty\n";
return;
}
os << "\n" << std::hex << std::showbase;
Address contiguous_region_start = static_cast<Address>(-1);
Address contiguous_region_end = contiguous_region_start;
PageAllocator::Permission contiguous_region_access =
PageAllocator::kNoAccess;
for (auto& pair : page_permissions_) {
if (contiguous_region_end == pair.first &&
pair.second == contiguous_region_access) {
contiguous_region_end += commit_page_size_;
continue;
}
if (contiguous_region_start != contiguous_region_end) {
PrintRegion(os, contiguous_region_start, contiguous_region_end,
contiguous_region_access);
}
contiguous_region_start = pair.first;
contiguous_region_end = pair.first + commit_page_size_;
contiguous_region_access = pair.second;
}
if (contiguous_region_start != contiguous_region_end) {
PrintRegion(os, contiguous_region_start, contiguous_region_end,
contiguous_region_access);
}
}
private:
using PagePermissionsMap = std::map<Address, PageAllocator::Permission>;
using ForEachFn = std::function<void(PagePermissionsMap::value_type*)>;
static void PrintRegion(std::ostream& os, Address start, Address end,
PageAllocator::Permission access) {
os << " page: [" << start << ", " << end << "), access: ";
switch (access) {
case PageAllocator::kNoAccess:
case PageAllocator::kNoAccessWillJitLater:
os << "--";
break;
case PageAllocator::kRead:
os << "R";
break;
case PageAllocator::kReadWrite:
os << "RW";
break;
case PageAllocator::kReadWriteExecute:
os << "RWX";
break;
case PageAllocator::kReadExecute:
os << "RX";
break;
}
os << "\n";
}
void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
CHECK(IsAligned(address, commit_page_size_));
CHECK(IsAligned(size, commit_page_size_));
auto start_iter = page_permissions_.find(address);
// Start page must exist in page_permissions_.
CHECK_NE(start_iter, page_permissions_.end());
auto end_iter = page_permissions_.find(address + size - commit_page_size_);
// Ensure the last but one page exists in page_permissions_.
CHECK_NE(end_iter, page_permissions_.end());
// Now make it point to the next element in order to also process is by the
// following for loop.
++end_iter;
for (auto iter = start_iter; iter != end_iter; ++iter) {
PagePermissionsMap::value_type& pair = *iter;
fn(&pair);
}
}
void UpdatePagePermissions(Address address, size_t size,
PageAllocator::Permission access) {
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
value->second = access;
});
}
v8::PageAllocator* const page_allocator_;
const size_t allocate_page_size_;
const size_t commit_page_size_;
// Region allocator tracks page allocation/deallocation requests.
base::RegionAllocator region_allocator_;
// This map keeps track of allocated pages' permissions.
PagePermissionsMap page_permissions_;
};
// This test is currently incompatible with the VirtualMemoryCage. Enable it
// once the VirtualMemorySpace interface is stable.
#if !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
~SequentialUnmapperTest() override = default;
SequentialUnmapperTest(const SequentialUnmapperTest&) = delete;
SequentialUnmapperTest& operator=(const SequentialUnmapperTest&) = delete;
static void SetUpTestCase() {
CHECK_NULL(tracking_page_allocator_);
old_page_allocator_ = GetPlatformPageAllocator();
tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
CHECK(tracking_page_allocator_->IsEmpty());
CHECK_EQ(old_page_allocator_,
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
// The pointer cage must be destroyed before the virtual memory cage.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Reinitialze the virtual memory cage so it uses the TrackingPageAllocator.
GetProcessWideVirtualMemoryCage()->TearDown();
constexpr bool use_guard_regions = false;
CHECK(GetProcessWideVirtualMemoryCage()->Initialize(
tracking_page_allocator_, kVirtualMemoryCageMinimumSize,
use_guard_regions));
#endif
IsolateAllocator::InitializeOncePerProcess();
#endif
TestWithIsolate::SetUpTestCase();
}
static void TearDownTestCase() {
TestWithIsolate::TearDownTestCase();
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Free the process-wide cage reservation, otherwise the pages won't be
// freed until process teardown.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#endif
#ifdef V8_VIRTUAL_MEMORY_CAGE
GetProcessWideVirtualMemoryCage()->TearDown();
#endif
i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty());
// Restore the original v8::PageAllocator and delete the tracking one.
CHECK_EQ(tracking_page_allocator_,
SetPlatformPageAllocatorForTesting(old_page_allocator_));
delete tracking_page_allocator_;
tracking_page_allocator_ = nullptr;
}
Heap* heap() { return isolate()->heap(); }
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
TrackingPageAllocator* tracking_page_allocator() {
return tracking_page_allocator_;
}
private:
static TrackingPageAllocator* tracking_page_allocator_;
static v8::PageAllocator* old_page_allocator_;
static bool old_flag_;
};
TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
nullptr;
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
unmapper()->TearDown();
#ifdef V8_COMPRESS_POINTERS
// In this mode Isolate uses bounded page allocator which allocates pages
// inside prereserved region. Thus these pages are kept reserved until
// the Isolate dies.
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
#else
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS
}
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();
#ifdef V8_COMPRESS_POINTERS
// In this mode Isolate uses bounded page allocator which allocates pages
// inside prereserved region. Thus these pages are kept reserved until
// the Isolate dies.
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
#else
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS
}
#endif // !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
} // namespace internal
} // namespace v8