[rwx][muc] Prepare BoundedPageAllocator for fast W^X on M1

This CL extends BoundedPageAllocator with PageFreeingMode parameter
which controls how pages should be freed: by setting permissions to
kNoAccess (preferred) or by discarding pages (Apple Silicon specific
behavior for RWX pages). The latter mode allows to ensure that once
pages are configured with RWX permissions they are never reconfigured
to anything else again.

The new mode will be used in a follow-up CL.

Bug: v8:12797
Change-Id: I3277f56ea6fee9c9b38b1682e68c22e66e9a02a4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3606228
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80162}
This commit is contained in:
Igor Sheludko 2022-04-25 20:01:28 +02:00 committed by V8 LUCI CQ
parent bb5c4b5101
commit 3eead7e32e
6 changed files with 60 additions and 18 deletions

View File

@ -9,12 +9,14 @@ namespace base {
BoundedPageAllocator::BoundedPageAllocator(
v8::PageAllocator* page_allocator, Address start, size_t size,
size_t allocate_page_size, PageInitializationMode page_initialization_mode)
size_t allocate_page_size, PageInitializationMode page_initialization_mode,
PageFreeingMode page_freeing_mode)
: allocate_page_size_(allocate_page_size),
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
region_allocator_(start, size, allocate_page_size_),
page_initialization_mode_(page_initialization_mode) {
page_initialization_mode_(page_initialization_mode),
page_freeing_mode_(page_freeing_mode) {
DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
@ -57,10 +59,15 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
}
void* ptr = reinterpret_cast<void*>(address);
if (!page_allocator_->SetPermissions(ptr, size, access)) {
// This most likely means that we ran out of memory.
CHECK_EQ(region_allocator_.FreeRegion(address), size);
return nullptr;
// It's assumed that free regions are in kNoAccess/kNoAccessWillJitLater
// state.
if (access != PageAllocator::kNoAccess &&
access != PageAllocator::kNoAccessWillJitLater) {
if (!page_allocator_->SetPermissions(ptr, size, access)) {
// This most likely means that we ran out of memory.
CHECK_EQ(region_allocator_.FreeRegion(address), size);
return nullptr;
}
}
return ptr;
@ -121,14 +128,20 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
CHECK_EQ(size, region_allocator_.FreeRegion(address));
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
DCHECK_NE(page_freeing_mode_, PageFreeingMode::kDiscard);
// When we are required to return zero-initialized pages, we decommit the
// pages here, which will cause any wired pages to be removed by the OS.
CHECK(page_allocator_->DecommitPages(raw_address, size));
} else {
DCHECK_EQ(page_initialization_mode_,
PageInitializationMode::kAllocatedPagesCanBeUninitialized);
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
if (page_freeing_mode_ == PageFreeingMode::kMakeInaccessible) {
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
} else {
CHECK_EQ(page_freeing_mode_, PageFreeingMode::kDiscard);
CHECK(page_allocator_->DiscardSystemPages(raw_address, size));
}
}
return true;
}
@ -161,18 +174,23 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
}
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
void* free_address = reinterpret_cast<void*>(address + new_size);
size_t free_size = size - new_size;
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
DCHECK_NE(page_freeing_mode_, PageFreeingMode::kDiscard);
// See comment in FreePages().
CHECK(page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
free_size));
CHECK(page_allocator_->DecommitPages(free_address, free_size));
} else {
DCHECK_EQ(page_initialization_mode_,
PageInitializationMode::kAllocatedPagesCanBeUninitialized);
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess));
if (page_freeing_mode_ == PageFreeingMode::kMakeInaccessible) {
CHECK(page_allocator_->SetPermissions(free_address, free_size,
PageAllocator::kNoAccess));
} else {
CHECK_EQ(page_freeing_mode_, PageFreeingMode::kDiscard);
CHECK(page_allocator_->DiscardSystemPages(free_address, free_size));
}
}
return true;
}

View File

@ -23,6 +23,24 @@ enum class PageInitializationMode {
kAllocatedPagesCanBeUninitialized,
};
// Defines how BoundedPageAllocator frees pages when FreePages or ReleasePages
// is requested.
enum class PageFreeingMode {
// Pages are freed/released by setting permissions to kNoAccess. This is the
// preferred mode when current platform/configuration allows any page
// permissions reconfiguration.
kMakeInaccessible,
// Pages are freed/released by using DiscardSystemPages of the underlying
// page allocator. This mode should be used for the cases when page permission
// reconfiguration is not allowed. In particular, on MacOS on ARM64 ("Apple
// M1"/Apple Silicon) it's not allowed to reconfigure RWX pages to anything
// else.
// This mode is not compatible with kAllocatedPagesMustBeZeroInitialized
// page initialization mode.
kDiscard,
};
// This is a v8::PageAllocator implementation that allocates pages within the
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
@ -40,7 +58,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size,
PageInitializationMode page_initialization_mode);
PageInitializationMode page_initialization_mode,
PageFreeingMode page_freeing_mode);
BoundedPageAllocator(const BoundedPageAllocator&) = delete;
BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default;
@ -92,6 +111,7 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
const PageInitializationMode page_initialization_mode_;
const PageFreeingMode page_freeing_mode_;
};
} // namespace base

View File

@ -73,7 +73,8 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
bounded_allocator_ = std::make_unique<v8::base::BoundedPageAllocator>(
&platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible);
}
} // namespace internal

View File

@ -462,7 +462,8 @@ bool VirtualMemoryCage::InitReservation(
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
params.page_size,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized,
base::PageFreeingMode::kMakeInaccessible);
return true;
}

View File

@ -55,7 +55,8 @@ std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, reservation_start, ZoneCompression::kReservationSize,
kZonePageSize,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized,
base::PageFreeingMode::kMakeInaccessible);
// Exclude first page from allocation to ensure that accesses through
// decompressed null pointer will seg-fault.

View File

@ -176,7 +176,8 @@ TEST(MemoryChunk) {
base::BoundedPageAllocator code_page_allocator(
page_allocator, code_range_reservation.address(),
code_range_reservation.size(), MemoryChunk::kAlignment,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized,
base::PageFreeingMode::kMakeInaccessible);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
EXECUTABLE, PageSize::kLarge, heap->code_lo_space());