From 3eead7e32ebac2e10abafdba1f9c6e913b6902be Mon Sep 17 00:00:00 2001 From: Igor Sheludko Date: Mon, 25 Apr 2022 20:01:28 +0200 Subject: [PATCH] [rwx][muc] Prepare BoundedPageAllocator for fast W^X on M1 This CL extends BoundedPageAllocator with PageFreeingMode parameter which controls how pages should be freed: by setting permissions to kNoAccess (preferred) or by discarding pages (Apple Silicon specific behavior for RWX pages). The latter mode allows to ensure that once pages are configured with RWX permissions they are never reconfigured to anything else again. The new mode will be used in a follow-up CL. Bug: v8:12797 Change-Id: I3277f56ea6fee9c9b38b1682e68c22e66e9a02a4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3606228 Reviewed-by: Michael Lippautz Commit-Queue: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#80162} --- src/base/bounded-page-allocator.cc | 44 +++++++++++++++++++++--------- src/base/bounded-page-allocator.h | 22 ++++++++++++++- src/heap/cppgc/caged-heap.cc | 3 +- src/utils/allocation.cc | 3 +- src/zone/accounting-allocator.cc | 3 +- test/cctest/heap/test-spaces.cc | 3 +- 6 files changed, 60 insertions(+), 18 deletions(-) diff --git a/src/base/bounded-page-allocator.cc b/src/base/bounded-page-allocator.cc index 37924a6d67..aa1da239d3 100644 --- a/src/base/bounded-page-allocator.cc +++ b/src/base/bounded-page-allocator.cc @@ -9,12 +9,14 @@ namespace base { BoundedPageAllocator::BoundedPageAllocator( v8::PageAllocator* page_allocator, Address start, size_t size, - size_t allocate_page_size, PageInitializationMode page_initialization_mode) + size_t allocate_page_size, PageInitializationMode page_initialization_mode, + PageFreeingMode page_freeing_mode) : allocate_page_size_(allocate_page_size), commit_page_size_(page_allocator->CommitPageSize()), page_allocator_(page_allocator), region_allocator_(start, size, allocate_page_size_), - page_initialization_mode_(page_initialization_mode) { + page_initialization_mode_(page_initialization_mode), + page_freeing_mode_(page_freeing_mode) { DCHECK_NOT_NULL(page_allocator); DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize())); DCHECK(IsAligned(allocate_page_size_, commit_page_size_)); @@ -57,10 +59,15 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size, } void* ptr = reinterpret_cast(address); - if (!page_allocator_->SetPermissions(ptr, size, access)) { - // This most likely means that we ran out of memory. - CHECK_EQ(region_allocator_.FreeRegion(address), size); - return nullptr; + // It's assumed that free regions are in kNoAccess/kNoAccessWillJitLater + // state. + if (access != PageAllocator::kNoAccess && + access != PageAllocator::kNoAccessWillJitLater) { + if (!page_allocator_->SetPermissions(ptr, size, access)) { + // This most likely means that we ran out of memory. + CHECK_EQ(region_allocator_.FreeRegion(address), size); + return nullptr; + } } return ptr; @@ -121,14 +128,20 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) { CHECK_EQ(size, region_allocator_.FreeRegion(address)); if (page_initialization_mode_ == PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) { + DCHECK_NE(page_freeing_mode_, PageFreeingMode::kDiscard); // When we are required to return zero-initialized pages, we decommit the // pages here, which will cause any wired pages to be removed by the OS. CHECK(page_allocator_->DecommitPages(raw_address, size)); } else { DCHECK_EQ(page_initialization_mode_, PageInitializationMode::kAllocatedPagesCanBeUninitialized); - CHECK(page_allocator_->SetPermissions(raw_address, size, - PageAllocator::kNoAccess)); + if (page_freeing_mode_ == PageFreeingMode::kMakeInaccessible) { + CHECK(page_allocator_->SetPermissions(raw_address, size, + PageAllocator::kNoAccess)); + } else { + CHECK_EQ(page_freeing_mode_, PageFreeingMode::kDiscard); + CHECK(page_allocator_->DiscardSystemPages(raw_address, size)); + } } return true; } @@ -161,18 +174,23 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, } // Keep the region in "used" state just uncommit some pages. - Address free_address = address + new_size; + void* free_address = reinterpret_cast(address + new_size); size_t free_size = size - new_size; if (page_initialization_mode_ == PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) { + DCHECK_NE(page_freeing_mode_, PageFreeingMode::kDiscard); // See comment in FreePages(). - CHECK(page_allocator_->DecommitPages(reinterpret_cast(free_address), - free_size)); + CHECK(page_allocator_->DecommitPages(free_address, free_size)); } else { DCHECK_EQ(page_initialization_mode_, PageInitializationMode::kAllocatedPagesCanBeUninitialized); - CHECK(page_allocator_->SetPermissions(reinterpret_cast(free_address), - free_size, PageAllocator::kNoAccess)); + if (page_freeing_mode_ == PageFreeingMode::kMakeInaccessible) { + CHECK(page_allocator_->SetPermissions(free_address, free_size, + PageAllocator::kNoAccess)); + } else { + CHECK_EQ(page_freeing_mode_, PageFreeingMode::kDiscard); + CHECK(page_allocator_->DiscardSystemPages(free_address, free_size)); + } } return true; } diff --git a/src/base/bounded-page-allocator.h b/src/base/bounded-page-allocator.h index ade9aa2d34..0360763dfb 100644 --- a/src/base/bounded-page-allocator.h +++ b/src/base/bounded-page-allocator.h @@ -23,6 +23,24 @@ enum class PageInitializationMode { kAllocatedPagesCanBeUninitialized, }; +// Defines how BoundedPageAllocator frees pages when FreePages or ReleasePages +// is requested. +enum class PageFreeingMode { + // Pages are freed/released by setting permissions to kNoAccess. This is the + // preferred mode when current platform/configuration allows any page + // permissions reconfiguration. + kMakeInaccessible, + + // Pages are freed/released by using DiscardSystemPages of the underlying + // page allocator. This mode should be used for the cases when page permission + // reconfiguration is not allowed. In particular, on MacOS on ARM64 ("Apple + // M1"/Apple Silicon) it's not allowed to reconfigure RWX pages to anything + // else. + // This mode is not compatible with kAllocatedPagesMustBeZeroInitialized + // page initialization mode. + kDiscard, +}; + // This is a v8::PageAllocator implementation that allocates pages within the // pre-reserved region of virtual space. This class requires the virtual space // to be kept reserved during the lifetime of this object. @@ -40,7 +58,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start, size_t size, size_t allocate_page_size, - PageInitializationMode page_initialization_mode); + PageInitializationMode page_initialization_mode, + PageFreeingMode page_freeing_mode); BoundedPageAllocator(const BoundedPageAllocator&) = delete; BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete; ~BoundedPageAllocator() override = default; @@ -92,6 +111,7 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { v8::PageAllocator* const page_allocator_; v8::base::RegionAllocator region_allocator_; const PageInitializationMode page_initialization_mode_; + const PageFreeingMode page_freeing_mode_; }; } // namespace base diff --git a/src/heap/cppgc/caged-heap.cc b/src/heap/cppgc/caged-heap.cc index f1f031778b..e6bf4758e2 100644 --- a/src/heap/cppgc/caged-heap.cc +++ b/src/heap/cppgc/caged-heap.cc @@ -73,7 +73,8 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator) bounded_allocator_ = std::make_unique( &platform_allocator, caged_heap_start, reserved_area_.size() - local_data_size_with_padding, kPageSize, - v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized); + v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized, + v8::base::PageFreeingMode::kMakeInaccessible); } } // namespace internal diff --git a/src/utils/allocation.cc b/src/utils/allocation.cc index 41c0ac5dbe..1d99279ac0 100644 --- a/src/utils/allocation.cc +++ b/src/utils/allocation.cc @@ -462,7 +462,8 @@ bool VirtualMemoryCage::InitReservation( page_allocator_ = std::make_unique( params.page_allocator, allocatable_base, allocatable_size, params.page_size, - base::PageInitializationMode::kAllocatedPagesCanBeUninitialized); + base::PageInitializationMode::kAllocatedPagesCanBeUninitialized, + base::PageFreeingMode::kMakeInaccessible); return true; } diff --git a/src/zone/accounting-allocator.cc b/src/zone/accounting-allocator.cc index 141e111206..cdfe0304b9 100644 --- a/src/zone/accounting-allocator.cc +++ b/src/zone/accounting-allocator.cc @@ -55,7 +55,8 @@ std::unique_ptr CreateBoundedAllocator( auto allocator = std::make_unique( platform_allocator, reservation_start, ZoneCompression::kReservationSize, kZonePageSize, - base::PageInitializationMode::kAllocatedPagesCanBeUninitialized); + base::PageInitializationMode::kAllocatedPagesCanBeUninitialized, + base::PageFreeingMode::kMakeInaccessible); // Exclude first page from allocation to ensure that accesses through // decompressed null pointer will seg-fault. diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc index 37261c4cc0..ad86489f73 100644 --- a/test/cctest/heap/test-spaces.cc +++ b/test/cctest/heap/test-spaces.cc @@ -176,7 +176,8 @@ TEST(MemoryChunk) { base::BoundedPageAllocator code_page_allocator( page_allocator, code_range_reservation.address(), code_range_reservation.size(), MemoryChunk::kAlignment, - base::PageInitializationMode::kAllocatedPagesCanBeUninitialized); + base::PageInitializationMode::kAllocatedPagesCanBeUninitialized, + base::PageFreeingMode::kMakeInaccessible); VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size, EXECUTABLE, PageSize::kLarge, heap->code_lo_space());