diff --git a/include/v8-internal.h b/include/v8-internal.h
index 3c9a21e639..e1aee508bb 100644
--- a/include/v8-internal.h
+++ b/include/v8-internal.h
@@ -494,13 +494,13 @@ constexpr bool VirtualMemoryCageIsEnabled() {
#endif
}
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-#define GB (1ULL << 30)
-#define TB (1ULL << 40)
-
+#ifdef V8_VIRTUAL_MEMORY_CAGE
// Size of the virtual memory cage, excluding the guard regions surrounding it.
-constexpr size_t kVirtualMemoryCageSize = 1ULL * TB;
+constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
+
+static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
+ "The virtual memory cage must be larger than the pointer "
+ "compression cage contained within it.");
// Required alignment of the virtual memory cage. For simplicity, we require the
// size of the guard regions to be a multiple of this, so that this specifies
@@ -513,7 +513,7 @@ constexpr size_t kVirtualMemoryCageAlignment =
// Size of the guard regions surrounding the virtual memory cage. This assumes a
// worst-case scenario of a 32-bit unsigned index being used to access an array
// of 64-bit values.
-constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB;
+constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
static_assert((kVirtualMemoryCageGuardRegionSize %
kVirtualMemoryCageAlignment) == 0,
@@ -525,31 +525,7 @@ static_assert((kVirtualMemoryCageGuardRegionSize %
// until either the reservation succeeds or the minimum size is reached. A
// minimum of 32GB allows the 4GB pointer compression region as well as the
// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
-constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB;
-
-static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize,
- "The minimal size of the virtual memory cage must be smaller or "
- "equal to the regular size.");
-
-// On OSes where reservation virtual memory is too expensive to create a real
-// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually
-// reserve most of the memory, and so doesn't have the desired security
-// properties, but still ensures that objects that should be located inside the
-// cage are allocated within kVirtualMemoryCageSize bytes from the start of the
-// cage, and so appear to be inside the cage. The minimum size of the virtual
-// memory range that is actually reserved for a fake cage is specified by this
-// constant and should be big enough to contain the pointer compression region
-// as well as the ArrayBuffer partition.
-constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB;
-
-static_assert(kVirtualMemoryCageMinimumSize >
- Internals::kPtrComprCageReservationSize,
- "The virtual memory cage must be larger than the pointer "
- "compression cage contained within it.");
-static_assert(kFakeVirtualMemoryCageMinReservationSize >
- Internals::kPtrComprCageReservationSize,
- "The reservation for a fake virtual memory cage must be larger "
- "than the pointer compression cage contained within it.");
+constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB
// For now, even if the virtual memory cage is enabled, we still allow backing
// stores to be allocated outside of it as fallback. This will simplify the
@@ -561,10 +537,7 @@ constexpr bool kAllowBackingStoresOutsideCage = false;
constexpr bool kAllowBackingStoresOutsideCage = true;
#endif // V8_HEAP_SANDBOX
-#undef GB
-#undef TB
-
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
+#endif // V8_VIRTUAL_MEMORY_CAGE
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
diff --git a/include/v8config.h b/include/v8config.h
index 4923183825..b010b65dfd 100644
--- a/include/v8config.h
+++ b/include/v8config.h
@@ -553,13 +553,6 @@ V8 shared library set USING_V8_SHARED.
#endif // V8_OS_WIN
-// The virtual memory cage is available (i.e. defined) when pointer compression
-// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as
-// well. This allows better test coverage of the cage.
-#if defined(V8_COMPRESS_POINTERS)
-#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-#endif
-
// clang-format on
#undef V8_HAS_CPP_ATTRIBUTE
diff --git a/src/base/bounded-page-allocator.cc b/src/base/bounded-page-allocator.cc
index d33857845a..e5f090682f 100644
--- a/src/base/bounded-page-allocator.cc
+++ b/src/base/bounded-page-allocator.cc
@@ -33,25 +33,16 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
DCHECK(IsAligned(alignment, allocate_page_size_));
- Address address = RegionAllocator::kAllocationFailure;
-
- Address hint_address = reinterpret_cast
(hint);
- if (hint_address && IsAligned(hint_address, alignment) &&
- region_allocator_.contains(hint_address, size)) {
- if (region_allocator_.AllocateRegionAt(hint_address, size)) {
- address = hint_address;
- }
+ Address address;
+ if (alignment <= allocate_page_size_) {
+ // TODO(ishell): Consider using randomized version here.
+ address = region_allocator_.AllocateRegion(size);
+ } else {
+ // Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
+ // enabled, in which case a bounded page allocator is used to allocate WASM
+ // memory buffers, which have a larger alignment.
+ address = region_allocator_.AllocateAlignedRegion(size, alignment);
}
-
- if (address == RegionAllocator::kAllocationFailure) {
- if (alignment <= allocate_page_size_) {
- // TODO(ishell): Consider using randomized version here.
- address = region_allocator_.AllocateRegion(size);
- } else {
- address = region_allocator_.AllocateAlignedRegion(size, alignment);
- }
- }
-
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
diff --git a/src/init/isolate-allocator.cc b/src/init/isolate-allocator.cc
index c311011d87..c790f5c09a 100644
--- a/src/init/isolate-allocator.cc
+++ b/src/init/isolate-allocator.cc
@@ -98,15 +98,8 @@ void IsolateAllocator::InitializeOncePerProcess() {
// runs, and so this will be guaranteed. Currently however, it is possible
// that the embedder accidentally uses the cage's page allocator prior to
// initializing V8, in which case this CHECK will likely fail.
- // TODO(chromium:12180) here we rely on our BoundedPageAllocators to
- // respect the hint parameter. Instead, it would probably be better to add
- // a new API that guarantees this, either directly to the PageAllocator
- // interface or to a derived one.
- void* hint = reinterpret_cast(cage->base());
- void* base = cage->page_allocator()->AllocatePages(
- hint, params.reservation_size, params.base_alignment,
- PageAllocator::kNoAccess);
- CHECK_EQ(base, hint);
+ CHECK(cage->page_allocator()->AllocatePagesAt(
+ cage->base(), params.reservation_size, PageAllocator::kNoAccess));
existing_reservation =
base::AddressRegion(cage->base(), params.reservation_size);
params.page_allocator = cage->page_allocator();
diff --git a/src/init/vm-cage.cc b/src/init/vm-cage.cc
index c587c529e4..9d95c9b91e 100644
--- a/src/init/vm-cage.cc
+++ b/src/init/vm-cage.cc
@@ -8,8 +8,6 @@
#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h"
-#include "src/base/utils/random-number-generator.h"
-#include "src/flags/flags.h"
#include "src/utils/allocation.h"
#if defined(V8_OS_WIN)
@@ -21,199 +19,22 @@
namespace v8 {
namespace internal {
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-
-// A PageAllocator that allocates pages inside a given virtual address range
-// like the BoundedPageAllocator, except that only a (small) part of the range
-// has actually been reserved. As such, this allocator relies on page
-// allocation hints for the OS to obtain pages inside the non-reserved part.
-// This allocator is used on OSes where reserving virtual address space (and
-// thus a virtual memory cage) is too expensive, notabley Windows pre 8.1.
-class FakeBoundedPageAllocator : public v8::PageAllocator {
- public:
- FakeBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
- size_t size, size_t reserved_size)
- : page_allocator_(page_allocator),
- start_(start),
- size_(size),
- reserved_size_(reserved_size),
- end_of_reserved_region_(start + reserved_size) {
- // The size is required to be a power of two so that obtaining a random
- // address inside the managed region simply requires a fixed number of
- // random bits as offset.
- DCHECK(base::bits::IsPowerOfTwo(size));
- DCHECK_LT(reserved_size, size);
-
- if (FLAG_random_seed != 0) {
- rng_.SetSeed(FLAG_random_seed);
- }
-
- reserved_region_page_allocator_ =
- std::make_unique(
- page_allocator_, start_, reserved_size_,
- page_allocator_->AllocatePageSize(),
- base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
- }
-
- ~FakeBoundedPageAllocator() override = default;
-
- size_t AllocatePageSize() override {
- return page_allocator_->AllocatePageSize();
- }
-
- size_t CommitPageSize() override { return page_allocator_->CommitPageSize(); }
-
- void SetRandomMmapSeed(int64_t seed) override { rng_.SetSeed(seed); }
-
- void* GetRandomMmapAddr() override {
- // Generate a random number between 0 and size_, then add that to the start
- // address to obtain a random mmap address. We deliberately don't use our
- // provided page allocator's GetRandomMmapAddr here since that could be
- // biased, while we want uniformly distributed random numbers here.
- Address addr = rng_.NextInt64() % size_ + start_;
- addr = RoundDown(addr, AllocatePageSize());
- void* ptr = reinterpret_cast(addr);
- DCHECK(Contains(ptr, 1));
- return ptr;
- }
-
- void* AllocatePages(void* hint, size_t size, size_t alignment,
- Permission access) override {
- DCHECK(IsAligned(size, AllocatePageSize()));
- DCHECK(IsAligned(alignment, AllocatePageSize()));
-
- // First, try allocating the memory inside the reserved region.
- void* ptr = reserved_region_page_allocator_->AllocatePages(
- hint, size, alignment, access);
- if (ptr) return ptr;
-
- // Then, fall back to allocating memory outside of the reserved region
- // through page allocator hints.
-
- // Somewhat arbitrary size limitation to ensure that the loop below for
- // finding a fitting base address hint terminates quickly.
- if (size >= size_ / 2) return nullptr;
-
- if (!hint || !Contains(hint, size)) hint = GetRandomMmapAddr();
-
- static constexpr int kMaxAttempts = 10;
- for (int i = 0; i < kMaxAttempts; i++) {
- // If the hint wouldn't result in the entire allocation being inside the
- // managed region, simply retry. There is at least a 50% chance of
- // getting a usable address due to the size restriction above.
- while (!Contains(hint, size)) {
- hint = GetRandomMmapAddr();
- }
-
- ptr = page_allocator_->AllocatePages(hint, size, alignment, access);
- if (ptr && Contains(ptr, size)) {
- return ptr;
- } else if (ptr) {
- page_allocator_->FreePages(ptr, size);
- }
-
- // Retry at a different address.
- hint = GetRandomMmapAddr();
- }
-
- return nullptr;
- }
-
- bool FreePages(void* address, size_t size) override {
- return AllocatorFor(address)->FreePages(address, size);
- }
-
- bool ReleasePages(void* address, size_t size, size_t new_length) override {
- return AllocatorFor(address)->ReleasePages(address, size, new_length);
- }
-
- bool SetPermissions(void* address, size_t size,
- Permission permissions) override {
- return AllocatorFor(address)->SetPermissions(address, size, permissions);
- }
-
- bool DiscardSystemPages(void* address, size_t size) override {
- return AllocatorFor(address)->DiscardSystemPages(address, size);
- }
-
- bool DecommitPages(void* address, size_t size) override {
- return AllocatorFor(address)->DecommitPages(address, size);
- }
-
- private:
- bool Contains(void* ptr, size_t length) {
- Address addr = reinterpret_cast(ptr);
- return (addr >= start_) && ((addr + length) < (start_ + size_));
- }
-
- v8::PageAllocator* AllocatorFor(void* ptr) {
- Address addr = reinterpret_cast(ptr);
- if (addr < end_of_reserved_region_) {
- DCHECK_GE(addr, start_);
- return reserved_region_page_allocator_.get();
- } else {
- return page_allocator_;
- }
- }
-
- // The page allocator through which pages inside the region are allocated.
- v8::PageAllocator* const page_allocator_;
- // The bounded page allocator managing the sub-region that was actually
- // reserved.
- std::unique_ptr reserved_region_page_allocator_;
-
- // Random number generator for generating random addresses.
- base::RandomNumberGenerator rng_;
-
- // The start of the virtual memory region in which to allocate pages. This is
- // also the start of the sub-region that was reserved.
- const Address start_;
- // The total size of the address space in which to allocate pages.
- const size_t size_;
- // The size of the sub-region that has actually been reserved.
- const size_t reserved_size_;
- // The end of the sub-region that has actually been reserved.
- const Address end_of_reserved_region_;
-};
-
-static uintptr_t DetermineAddressSpaceLimit() {
- // TODO(saelo) should this also take things like rlimits into account?
-#ifdef V8_TARGET_ARCH_64_BIT
- // TODO(saelo) this should be deteremined based on the CPU model being used
- // and its number of virtual address bits.
- uintptr_t virtual_address_bits = 48;
- // Virtual address space is split 50/50 between userspace and kernel
- uintptr_t userspace_virtual_address_bits = virtual_address_bits / 2;
- uintptr_t address_space_limit = 1UL << userspace_virtual_address_bits;
- return address_space_limit;
-#else
-#error Unsupported target architecture.
-#endif
-}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
- // TODO(saelo) We need to take the number of virtual address bits of the CPU
- // into account when deteriming the size of the cage. For example, if there
- // are only 39 bits available (some older Intel CPUs), split evenly between
- // userspace and kernel, then userspace can only address 256GB and so the
- // maximum cage size should probably be something around 64GB to 128GB.
- const size_t size = kVirtualMemoryCageSize;
+ bool use_guard_regions = true;
+ size_t size = kVirtualMemoryCageSize;
#if defined(V8_OS_WIN)
if (!IsWindows8Point1OrGreater()) {
// On Windows pre 8.1, reserving virtual memory is an expensive operation,
- // apparently because the OS already charges for the memory required for
- // all page table entries. For example, a 1TB reservation increases private
- // memory usage by 2GB. As such, it is not possible to create a proper
- // virtual memory cage there and so a fake cage is created which doesn't
- // reserve most of the virtual memory, and so doesn't incur the cost, but
- // also doesn't provide the desired security benefits.
- const size_t size_to_reserve = kFakeVirtualMemoryCageMinReservationSize;
- return InitializeAsFakeCage(page_allocator, size, size_to_reserve);
+ // possibly because page table entries are created for the address range.
+ // For example, a 1TB reservation increases private memory usage by 2GB. As
+ // such, we can unfortunately only create a minimal cage on these version,
+ // without guard regions and without our desired security properties.
+ use_guard_regions = false;
+ size = kVirtualMemoryCageMinimumSize;
}
#endif
- // TODO(saelo) if this fails, we could still fall back to creating a fake
- // cage.
- const bool use_guard_regions = true;
return Initialize(page_allocator, size, use_guard_regions);
}
@@ -233,109 +54,34 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
// doesn't reduce the cage's security properties if it has a smaller size.
// Which of these options is ultimately taken likey depends on how frequently
// cage reservation failures occur in practice.
- size_t reservation_size;
- while (!reservation_base_ && size >= kVirtualMemoryCageMinimumSize) {
- reservation_size = size;
+ while (!base_ && size >= kVirtualMemoryCageMinimumSize) {
+ size_t reservation_size = size;
if (use_guard_regions) {
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
}
-
- // Technically, we should use kNoAccessWillJitLater here instead since the
- // cage will contain JIT pages. However, currently this is not required as
- // PA anyway uses MAP_JIT for V8 mappings. Further, we want to eventually
- // move JIT pages out of the cage, at which point we'd like to forbid
- // making pages inside the cage executable, and so don't want MAP_JIT.
- void* hint = page_allocator->GetRandomMmapAddr();
- reservation_base_ = reinterpret_cast(page_allocator->AllocatePages(
- hint, reservation_size, kVirtualMemoryCageAlignment,
+ base_ = reinterpret_cast(page_allocator->AllocatePages(
+ nullptr, reservation_size, kVirtualMemoryCageAlignment,
PageAllocator::kNoAccess));
- if (!reservation_base_) {
+ if (!base_) {
size /= 2;
}
}
- if (!reservation_base_) return false;
+ if (!base_) return false;
- base_ = reservation_base_;
if (use_guard_regions) {
base_ += kVirtualMemoryCageGuardRegionSize;
+ has_guard_regions_ = true;
}
page_allocator_ = page_allocator;
size_ = size;
- reservation_size_ = reservation_size;
cage_page_allocator_ = std::make_unique(
page_allocator_, base_, size_, page_allocator_->AllocatePageSize(),
base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
initialized_ = true;
- is_fake_cage_ = false;
-
- return true;
-}
-
-bool V8VirtualMemoryCage::InitializeAsFakeCage(
- v8::PageAllocator* page_allocator, size_t size, size_t size_to_reserve) {
- CHECK(!initialized_);
- CHECK(!disabled_);
- CHECK(base::bits::IsPowerOfTwo(size));
- CHECK(base::bits::IsPowerOfTwo(size_to_reserve));
- CHECK_GE(size, kVirtualMemoryCageMinimumSize);
- CHECK_LT(size_to_reserve, size);
-
- // Use a custom random number generator here to ensure that we get uniformly
- // distributed random numbers. We figure out the available address space
- // ourselves, and so are potentially better positioned to determine a good
- // base address for the cage than the embedder-provided GetRandomMmapAddr().
- base::RandomNumberGenerator rng;
- if (FLAG_random_seed != 0) {
- rng.SetSeed(FLAG_random_seed);
- }
-
- // We try to ensure that base + size is still fully within the process'
- // address space, even though we only reserve a fraction of the memory.
- Address address_space_end = DetermineAddressSpaceLimit();
- DCHECK(base::bits::IsPowerOfTwo(address_space_end));
- Address highest_possible_address = address_space_end - size;
- constexpr int kMaxAttempts = 10;
- for (int i = 1; i <= kMaxAttempts; i++) {
- // The size of the cage is small relative to the size of the usable address
- // space, so we can just retry until we get a usable hint.
- Address hint;
- do {
- hint = rng.NextInt64() % address_space_end;
- } while (hint > highest_possible_address);
-
- // Align to page size.
- hint = RoundDown(hint, page_allocator->AllocatePageSize());
-
- reservation_base_ = reinterpret_cast(page_allocator->AllocatePages(
- reinterpret_cast(hint), size_to_reserve,
- kVirtualMemoryCageAlignment, PageAllocator::kNoAccess));
-
- if (!reservation_base_) return false;
-
- // Take this base if it meets the requirements or if this is the last
- // attempt.
- if (reservation_base_ <= highest_possible_address || i == kMaxAttempts)
- break;
-
- // Can't use this base, so free the reservation and try again
- page_allocator_->FreePages(reinterpret_cast(reservation_base_),
- size_to_reserve);
- reservation_base_ = kNullAddress;
- }
- DCHECK(reservation_base_);
-
- base_ = reservation_base_;
- size_ = size;
- reservation_size_ = size_to_reserve;
- initialized_ = true;
- is_fake_cage_ = true;
- page_allocator_ = page_allocator;
- cage_page_allocator_ = std::make_unique(
- page_allocator_, base_, size_, reservation_size_);
return true;
}
@@ -343,24 +89,26 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
void V8VirtualMemoryCage::TearDown() {
if (initialized_) {
cage_page_allocator_.reset();
- CHECK(page_allocator_->FreePages(reinterpret_cast(reservation_base_),
- reservation_size_));
+ Address reservation_base = base_;
+ size_t reservation_size = size_;
+ if (has_guard_regions_) {
+ reservation_base -= kVirtualMemoryCageGuardRegionSize;
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+ CHECK(page_allocator_->FreePages(reinterpret_cast(reservation_base),
+ reservation_size));
+ page_allocator_ = nullptr;
base_ = kNullAddress;
size_ = 0;
- reservation_base_ = kNullAddress;
- reservation_size_ = 0;
initialized_ = false;
- is_fake_cage_ = false;
- page_allocator_ = nullptr;
+ has_guard_regions_ = false;
}
disabled_ = false;
}
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE
DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
GetProcessWideVirtualMemoryCage)
+
#endif
} // namespace internal
diff --git a/src/init/vm-cage.h b/src/init/vm-cage.h
index 3a35cde46f..d7e0728ca1 100644
--- a/src/init/vm-cage.h
+++ b/src/init/vm-cage.h
@@ -15,7 +15,7 @@ class PageAllocator;
namespace internal {
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
+#ifdef V8_VIRTUAL_MEMORY_CAGE
/**
* V8 Virtual Memory Cage.
@@ -70,12 +70,11 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
bool is_initialized() const { return initialized_; }
bool is_disabled() const { return disabled_; }
bool is_enabled() const { return !disabled_; }
- bool is_fake_cage() const { return is_fake_cage_; }
Address base() const { return base_; }
size_t size() const { return size_; }
- v8::PageAllocator* page_allocator() const {
+ base::BoundedPageAllocator* page_allocator() const {
return cage_page_allocator_.get();
}
@@ -92,48 +91,27 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
// cage without guard regions, which would otherwise consume too much memory.
friend class SequentialUnmapperTest;
- // These tests call the private Initialize methods below.
- FRIEND_TEST(VirtualMemoryCageTest, InitializationWithSize);
- FRIEND_TEST(VirtualMemoryCageTest, InitializationAsFakeCage);
- FRIEND_TEST(VirtualMemoryCageTest, FakeCagePageAllocation);
-
// We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions.
- bool Initialize(v8::PageAllocator* page_allocator, size_t size,
+ bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
bool use_guard_regions);
- // Used on OSes where reserving virtual memory is too expensive. A fake cage
- // does not reserve all of the virtual memory and so doesn't have the desired
- // security properties.
- bool InitializeAsFakeCage(v8::PageAllocator* page_allocator, size_t size,
- size_t size_to_reserve);
-
Address base_ = kNullAddress;
size_t size_ = 0;
-
- // Base and size of the virtual memory reservation backing this cage. These
- // can be different from the cage base and size due to guard regions or when a
- // fake cage is used.
- Address reservation_base_ = kNullAddress;
- size_t reservation_size_ = 0;
-
+ bool has_guard_regions_ = false;
bool initialized_ = false;
bool disabled_ = false;
- bool is_fake_cage_ = false;
-
- // The allocator through which the virtual memory of the cage was allocated.
+ // The PageAllocator through which the virtual memory of the cage was
+ // allocated.
v8::PageAllocator* page_allocator_ = nullptr;
- // The allocator to allocate pages inside the cage.
- std::unique_ptr cage_page_allocator_;
+ // The BoundedPageAllocator to allocate pages inside the cage.
+ std::unique_ptr cage_page_allocator_;
};
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE
-// This function is only available when the cage is actually used.
V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
-#endif
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn
index 79e30787c6..ae2492c6e7 100644
--- a/test/cctest/BUILD.gn
+++ b/test/cctest/BUILD.gn
@@ -291,6 +291,7 @@ v8_source_set("cctest_sources") {
"test-utils.cc",
"test-verifiers.cc",
"test-version.cc",
+ "test-virtual-memory-cage.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
"test-web-snapshots.cc",
diff --git a/test/cctest/test-virtual-memory-cage.cc b/test/cctest/test-virtual-memory-cage.cc
new file mode 100644
index 0000000000..d5afed0590
--- /dev/null
+++ b/test/cctest/test-virtual-memory-cage.cc
@@ -0,0 +1,36 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/init/vm-cage.h"
+#include "test/cctest/cctest.h"
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+namespace v8 {
+namespace internal {
+
+UNINITIALIZED_TEST(VirtualMemoryCageCreation) {
+ base::PageAllocator page_allocator;
+
+ V8VirtualMemoryCage cage;
+
+ CHECK(!cage.is_initialized());
+ CHECK(!cage.is_disabled());
+ CHECK_EQ(cage.size(), 0);
+
+ CHECK(cage.Initialize(&page_allocator));
+
+ CHECK(cage.is_initialized());
+ CHECK_GT(cage.base(), 0);
+ CHECK_GT(cage.size(), 0);
+
+ cage.TearDown();
+
+ CHECK(!cage.is_initialized());
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn
index d159f34b0d..e2ea833cf9 100644
--- a/test/unittests/BUILD.gn
+++ b/test/unittests/BUILD.gn
@@ -376,7 +376,6 @@ v8_source_set("unittests_sources") {
"regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
"runtime/runtime-debug-unittest.cc",
- "security/virtual-memory-cage-unittest.cc",
"strings/char-predicates-unittest.cc",
"strings/unicode-unittest.cc",
"tasks/background-compile-task-unittest.cc",
diff --git a/test/unittests/security/virtual-memory-cage-unittest.cc b/test/unittests/security/virtual-memory-cage-unittest.cc
deleted file mode 100644
index 641a763fbf..0000000000
--- a/test/unittests/security/virtual-memory-cage-unittest.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include
-
-#include "src/init/vm-cage.h"
-#include "test/unittests/test-utils.h"
-
-#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-
-namespace v8 {
-namespace internal {
-
-TEST(VirtualMemoryCageTest, Initialization) {
- base::PageAllocator page_allocator;
-
- V8VirtualMemoryCage cage;
-
- EXPECT_FALSE(cage.is_initialized());
- EXPECT_FALSE(cage.is_disabled());
- EXPECT_FALSE(cage.is_fake_cage());
- EXPECT_EQ(cage.size(), 0UL);
-
- EXPECT_TRUE(cage.Initialize(&page_allocator));
-
- EXPECT_TRUE(cage.is_initialized());
- EXPECT_NE(cage.base(), 0UL);
- EXPECT_GT(cage.size(), 0UL);
-
- cage.TearDown();
-
- EXPECT_FALSE(cage.is_initialized());
-}
-
-TEST(VirtualMemoryCageTest, InitializationWithSize) {
- base::PageAllocator page_allocator;
- V8VirtualMemoryCage cage;
- size_t size = kVirtualMemoryCageMinimumSize;
- const bool use_guard_regions = false;
- EXPECT_TRUE(cage.Initialize(&page_allocator, size, use_guard_regions));
-
- EXPECT_TRUE(cage.is_initialized());
- EXPECT_FALSE(cage.is_fake_cage());
- EXPECT_EQ(cage.size(), size);
-
- cage.TearDown();
-}
-
-TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
- base::PageAllocator page_allocator;
- V8VirtualMemoryCage cage;
- // Total size of the fake cage.
- size_t size = kVirtualMemoryCageSize;
- // Size of the virtual memory that is actually reserved at the start of the
- // cage.
- size_t reserved_size = 2 * page_allocator.AllocatePageSize();
- EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
-
- EXPECT_TRUE(cage.is_initialized());
- EXPECT_TRUE(cage.is_fake_cage());
- EXPECT_NE(cage.base(), 0UL);
- EXPECT_EQ(cage.size(), size);
-
- cage.TearDown();
-
- EXPECT_FALSE(cage.is_initialized());
-}
-
-TEST(VirtualMemloryCageTest, Contains) {
- base::PageAllocator page_allocator;
- V8VirtualMemoryCage cage;
- EXPECT_TRUE(cage.Initialize(&page_allocator));
-
- Address base = cage.base();
- size_t size = cage.size();
- base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
-
- EXPECT_TRUE(cage.Contains(base));
- EXPECT_TRUE(cage.Contains(base + size - 1));
- for (int i = 0; i < 10; i++) {
- size_t offset = rng.NextInt64() % size;
- EXPECT_TRUE(cage.Contains(base + offset));
- }
-
- EXPECT_FALSE(cage.Contains(base - 1));
- EXPECT_FALSE(cage.Contains(base + size));
- for (int i = 0; i < 10; i++) {
- Address addr = rng.NextInt64();
- if (addr < base || addr >= base + size) {
- EXPECT_FALSE(cage.Contains(addr));
- }
- }
-
- cage.TearDown();
-}
-
-void TestCagePageAllocation(V8VirtualMemoryCage& cage) {
- const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
- constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
-
- PageAllocator* allocator = cage.page_allocator();
- size_t page_size = allocator->AllocatePageSize();
- std::vector allocations;
- for (int i = 0; i < kNumAllocations; i++) {
- size_t length = page_size * kAllocatinSizesInPages[i];
- size_t alignment = page_size;
- void* ptr = allocator->AllocatePages(nullptr, length, alignment,
- PageAllocator::kNoAccess);
- EXPECT_NE(ptr, nullptr);
- EXPECT_TRUE(cage.Contains(ptr));
- allocations.push_back(ptr);
- }
-
- for (int i = 0; i < kNumAllocations; i++) {
- size_t length = page_size * kAllocatinSizesInPages[i];
- allocator->FreePages(allocations[i], length);
- }
-}
-
-TEST(VirtualMemoryCageTest, PageAllocation) {
- base::PageAllocator page_allocator;
- V8VirtualMemoryCage cage;
- EXPECT_TRUE(cage.Initialize(&page_allocator));
-
- TestCagePageAllocation(cage);
-
- cage.TearDown();
-}
-
-TEST(VirtualMemoryCageTest, FakeCagePageAllocation) {
- base::PageAllocator page_allocator;
- V8VirtualMemoryCage cage;
- size_t size = kVirtualMemoryCageSize;
- // Only reserve two pages so the test will allocate memory inside and outside
- // of the reserved region.
- size_t reserved_size = 2 * page_allocator.AllocatePageSize();
- EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
-
- TestCagePageAllocation(cage);
-
- cage.TearDown();
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE