[ptr-cage] Introduce PtrComprCage

The pointer compression cage is the virtual memory reservation
that all compressed pointers fall within. This CL splits pointer
compression into two modes: a per-Isolate cage and a shared cage
among multiple Isolates.

When multiple Isolates are sharing a cage, they can decompress
each others' pointers and share the same virtual memory range.

Bug: v8:11460
Change-Id: I7b89b7413b8e7ca6b8b6faafd083dc387542a8b4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2783674
Reviewed-by: Dan Elphick <delphick@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73916}
This commit is contained in:
Shu-yu Guo 2021-04-09 19:09:41 -07:00 committed by Commit Bot
parent 5e0b94c4dc
commit 3ada6f2740
32 changed files with 454 additions and 167 deletions

View File

@ -383,9 +383,10 @@ if (v8_multi_arch_build &&
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
}
if (v8_enable_short_builtin_calls &&
(!v8_enable_pointer_compression || v8_control_flow_integrity)) {
# Disable short calls when pointer compression is not enabled.
# Or when CFI is enabled (until the CFI-related issues are fixed).
(!v8_enable_pointer_compression ||
v8_enable_pointer_compression_shared_cage || v8_control_flow_integrity)) {
# Disable short calls when pointer compression in a per-Isolate cage is not
# enabled. Or when CFI is enabled (until the CFI-related issues are fixed).
v8_enable_short_builtin_calls = false
}
if (v8_enable_shared_ro_heap == "") {
@ -585,11 +586,11 @@ if (v8_enable_v8_checks) {
}
if (v8_enable_pointer_compression) {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS" ]
}
if (v8_enable_pointer_compression_shared_cage) {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ]
} else if (v8_enable_pointer_compression) {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ]
if (v8_enable_pointer_compression_shared_cage) {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ]
} else {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ]
}
}
if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
enabled_external_v8_defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
@ -1915,6 +1916,8 @@ action("v8_dump_build_config") {
"v8_enable_lite_mode=$v8_enable_lite_mode",
"v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats",
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
"v8_enable_pointer_compression_shared_cage=" +
"$v8_enable_pointer_compression_shared_cage",
"v8_enable_webassembly=$v8_enable_webassembly",
"v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_target_cpu=\"$v8_target_cpu\"",
@ -2665,6 +2668,7 @@ v8_header_set("v8_internal_headers") {
"src/init/heap-symbols.h",
"src/init/icu_util.h",
"src/init/isolate-allocator.h",
"src/init/ptr-compr-cage.h",
"src/init/setup-isolate.h",
"src/init/startup-data-util.h",
"src/init/v8.h",
@ -3752,6 +3756,7 @@ v8_source_set("v8_base_without_compiler") {
"src/init/bootstrapper.cc",
"src/init/icu_util.cc",
"src/init/isolate-allocator.cc",
"src/init/ptr-compr-cage.cc",
"src/init/startup-data-util.cc",
"src/init/v8.cc",
"src/interpreter/bytecode-array-builder.cc",

View File

@ -224,8 +224,10 @@ class Internals {
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateFastApiCallTargetOffset =
kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
static const int kIsolateStackGuardOffset =
static const int kIsolateCageBaseOffset =
kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
static const int kIsolateStackGuardOffset =
kIsolateCageBaseOffset + kApiSystemPointerSize;
static const int kIsolateRootsOffset =
kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;

View File

@ -639,7 +639,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
__ Mov(kPtrComprCageBaseRegister, x0);
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
}

View File

@ -374,7 +374,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base)));
// Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset.
DCHECK_EQ(
isolate()->isolate_root(),
isolate()->cage_base(),
JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate()));
// See JSTypedArray::SetOnHeapDataPtr() for details.
offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base));

View File

@ -384,7 +384,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
// TODO(syg): Actually make a cage.
__ movq(kPtrComprCageBaseRegister, arg_reg_1);
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
}

View File

@ -7,12 +7,12 @@
#include <ctype.h>
#include "src/common/globals.h"
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/macro-assembler.h"
#include "src/common/globals.h"
#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
@ -1037,7 +1037,7 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Mov(kRootRegister, Operand(isolate_root));
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
Mov(kPtrComprCageBaseRegister, Operand(isolate_root));
LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset());
#endif
}

View File

@ -14,6 +14,7 @@
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/common/globals.h"
#include "src/execution/isolate-data.h"
#include "src/objects/contexts.h"
#include "src/objects/tagged-index.h"
@ -579,7 +580,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, isolate_root);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
Move(kPtrComprCageBaseRegister, isolate_root);
LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
}

View File

@ -101,10 +101,11 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
// Determine whether the short builtin calls optimization is enabled.
#ifdef V8_SHORT_BUILTIN_CALLS
#ifndef V8_COMPRESS_POINTERS
#ifndef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
// TODO(11527): Fix this by passing Isolate* to Code::OffHeapInstructionStart()
// and friends.
#error Short builtin calls feature requires pointer compression
#error Short builtin calls feature require pointer compression with per- \
Isolate cage
#endif
#endif
@ -1758,6 +1759,10 @@ class PtrComprCageBase {
inline Address address() const;
bool operator==(const PtrComprCageBase& other) const {
return address_ == other.address_;
}
private:
Address address_;
};

View File

@ -15,30 +15,10 @@ namespace internal {
#ifdef V8_COMPRESS_POINTERS
#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
: address_(isolate->isolate_root()) {}
: address_(isolate->cage_base()) {}
PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
: address_(isolate->isolate_root()) {}
#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
: address_(isolate->isolate_root()) {
UNIMPLEMENTED();
}
PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
: address_(isolate->isolate_root()) {
UNIMPLEMENTED();
}
#else
#error "Pointer compression build configuration error"
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE,
// V8_COMPRESS_POINTERS_IN_SHARED_CAGE
: address_(isolate->cage_base()) {}
Address PtrComprCageBase::address() const {
Address ret = address_;

View File

@ -967,7 +967,7 @@ void Deoptimizer::DoComputeOutputFrames() {
isolate()->isolate_root());
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
topmost->GetRegisterValues()->SetRegister(kPtrComprCageBaseRegister.code(),
isolate()->isolate_root());
isolate()->cage_base());
#endif
// Print some helpful diagnostic information.

View File

@ -29,13 +29,19 @@ class Isolate;
// register.
class IsolateData final {
public:
explicit IsolateData(Isolate* isolate) : stack_guard_(isolate) {}
IsolateData(Isolate* isolate, Address cage_base)
: cage_base_(cage_base), stack_guard_(isolate) {}
IsolateData(const IsolateData&) = delete;
IsolateData& operator=(const IsolateData&) = delete;
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
// The value of kPointerCageBaseRegister
Address cage_base() const {
return COMPRESS_POINTERS_BOOL ? cage_base_ : kNullAddress;
}
// The value of the kRootRegister.
Address isolate_root() const {
return reinterpret_cast<Address>(this) + kIsolateRootBias;
@ -82,6 +88,10 @@ class IsolateData final {
return kFastApiCallTargetOffset - kIsolateRootBias;
}
static constexpr int cage_base_offset() {
return kCageBaseOffset - kIsolateRootBias;
}
// Root-register-relative offset of the given builtin table entry.
// TODO(ishell): remove in favour of typified id version.
static int builtin_slot_offset(int builtin_index) {
@ -142,6 +152,7 @@ class IsolateData final {
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kFastApiCallTargetOffset, kSystemPointerSize) \
V(kCageBaseOffset, kSystemPointerSize) \
V(kStackGuardOffset, StackGuard::kSizeInBytes) \
V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
@ -180,6 +191,8 @@ class IsolateData final {
Address fast_c_call_caller_pc_ = kNullAddress;
Address fast_api_call_target_ = kNullAddress;
Address cage_base_ = kNullAddress;
// Fields related to the system and JS stack. In particular, this contains
// the stack limit used by stack checks in generated code.
StackGuard stack_guard_;
@ -245,6 +258,7 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerPCOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_api_call_target_) ==
kFastApiCallTargetOffset);
STATIC_ASSERT(offsetof(IsolateData, cage_base_) == kCageBaseOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
#ifdef V8_HEAP_SANDBOX
STATIC_ASSERT(offsetof(IsolateData, external_pointer_table_) ==

View File

@ -51,7 +51,7 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->GetHeap();
#endif // V8_COMPRESS_POINTERS || V8_ENABLE_THIRD_PARTY_HEAP
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
}
V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
@ -64,26 +64,30 @@ V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
return isolate;
#else
return Isolate::FromHeap(GetHeapFromWritableObject(object));
#endif // V8_COMPRESS_POINTERS, V8_ENABLE_THIRD_PARTY_HEAP
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
}
V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
*isolate = Heap::GetIsolateFromWritableObject(object);
return true;
#elif defined V8_COMPRESS_POINTERS
#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
*isolate = GetIsolateFromWritableObject(object);
return true;
#else
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// TODO(syg): Share RO space across Isolates for shared cage; need to fix
// Symbol::Description.
if (chunk->InReadOnlySpace()) {
*isolate = nullptr;
return false;
}
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
*isolate = Isolate::FromHeap(chunk->GetHeap());
return true;
#endif // V8_COMPRESS_POINTERS, V8_ENABLE_THIRD_PARTY_HEAP
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
}
// Use this function instead of Internals::GetIsolateForHeapSandbox for internal

View File

@ -53,6 +53,7 @@
#include "src/heap/read-only-heap.h"
#include "src/ic/stub-cache.h"
#include "src/init/bootstrapper.h"
#include "src/init/ptr-compr-cage.h"
#include "src/init/setup-isolate.h"
#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
@ -2887,6 +2888,7 @@ Isolate* Isolate::New() {
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
DCHECK(IsAligned(isolate->isolate_root(), kPtrComprCageBaseAlignment));
DCHECK_EQ(isolate->isolate_root(), isolate->cage_base());
#endif
#ifdef DEBUG
@ -2947,7 +2949,7 @@ v8::PageAllocator* Isolate::page_allocator() {
}
Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_data_(this),
: isolate_data_(this, isolate_allocator->GetPtrComprCageBaseAddress()),
isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(new TracingAccountingAllocator(this)),
@ -3002,6 +3004,8 @@ void Isolate::CheckIsolateLayout() {
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_pc_)),
Internals::kIsolateFastCCallCallerPcOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.cage_base_)),
Internals::kIsolateCageBaseOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
Internals::kIsolateStackGuardOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),

View File

@ -1021,6 +1021,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const IsolateData* isolate_data() const { return &isolate_data_; }
IsolateData* isolate_data() { return &isolate_data_; }
// When pointer compression is on, this is the base address of the pointer
// compression cage, and the kPtrComprCageBaseRegister is set to this
// value. When pointer compression is off, this is always kNullAddress.
Address cage_base() const {
DCHECK_IMPLIES(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL &&
!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL,
isolate_data()->cage_base() == kNullAddress);
return isolate_data()->cage_base();
}
// Generated code can embed this address to get access to the isolate-specific
// data (for example, roots, external references, builtins, etc.).
// The kRootRegister is set to this value.

View File

@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
Address LocalIsolate::isolate_root() const { return isolate_->isolate_root(); }
Address LocalIsolate::cage_base() const { return isolate_->cage_base(); }
ReadOnlyHeap* LocalIsolate::read_only_heap() const {
return isolate_->read_only_heap();
}

View File

@ -50,7 +50,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalHeap* heap() { return &heap_; }
inline Address isolate_root() const;
inline Address cage_base() const;
inline ReadOnlyHeap* read_only_heap() const;
inline Object root(RootIndex index) const;

View File

@ -13,29 +13,47 @@ namespace v8 {
namespace internal {
IsolateAllocator::IsolateAllocator() {
#ifdef V8_COMPRESS_POINTERS
Address heap_reservation_address = InitReservation();
CommitPagesForIsolate(heap_reservation_address);
#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
isolate_cage_.InitReservationOrDie();
page_allocator_ = isolate_cage_.page_allocator();
CommitPagesForIsolate(isolate_cage_.base());
#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
// Allocate Isolate in C++ heap when sharing a cage.
CHECK(PtrComprCage::GetProcessWideCage()->IsReserved());
page_allocator_ = PtrComprCage::GetProcessWideCage()->page_allocator();
isolate_memory_ = ::operator new(sizeof(Isolate));
#else
// Allocate Isolate in C++ heap.
page_allocator_ = GetPlatformPageAllocator();
isolate_memory_ = ::operator new(sizeof(Isolate));
DCHECK(!reservation_.IsReserved());
#endif // V8_COMPRESS_POINTERS
CHECK_NOT_NULL(page_allocator_);
}
IsolateAllocator::~IsolateAllocator() {
if (reservation_.IsReserved()) {
// The actual memory will be freed when the |reservation_| will die.
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
if (isolate_cage_.reservation_.IsReserved()) {
// The actual memory will be freed when the |isolate_cage_| will die.
return;
}
#endif
// The memory was allocated in C++ heap.
::operator delete(isolate_memory_);
}
#ifdef V8_COMPRESS_POINTERS
Address IsolateAllocator::GetPtrComprCageBaseAddress() const {
#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
return isolate_cage_.base();
#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
return PtrComprCage::GetProcessWideCage()->base();
#else
return kNullAddress;
#endif
}
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
namespace {
// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
@ -50,109 +68,18 @@ inline size_t GetIsolateRootBiasPageSize(
} // namespace
Address IsolateAllocator::InitReservation() {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
const size_t kIsolateRootBiasPageSize =
GetIsolateRootBiasPageSize(platform_page_allocator);
// Reserve a |4Gb + kIsolateRootBiasPageSize| region such as that the
// resevation address plus |kIsolateRootBiasPageSize| is 4Gb aligned.
const size_t reservation_size =
kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
const size_t base_alignment = kPtrComprCageBaseAlignment;
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
Address hint = RoundDown(reinterpret_cast<Address>(
platform_page_allocator->GetRandomMmapAddr()),
base_alignment) -
kIsolateRootBiasPageSize;
// Within this reservation there will be a sub-region with proper alignment.
VirtualMemory padded_reservation(platform_page_allocator,
reservation_size * 2,
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) break;
// Find properly aligned sub-region inside the reservation.
Address address =
RoundUp(padded_reservation.address() + kIsolateRootBiasPageSize,
base_alignment) -
kIsolateRootBiasPageSize;
CHECK(padded_reservation.InVM(address, reservation_size));
#if defined(V8_OS_FUCHSIA)
// Fuchsia does not respect given hints so as a workaround we will use
// overreserved address space region instead of trying to re-reserve
// a subregion.
bool overreserve = true;
#else
// For the last attempt use the overreserved region to avoid an OOM crash.
// This case can happen if there are many isolates being created in
// parallel that race for reserving the regions.
bool overreserve = (attempt == kMaxAttempts - 1);
#endif
if (overreserve) {
if (padded_reservation.InVM(address, reservation_size)) {
reservation_ = std::move(padded_reservation);
return address;
}
} else {
// Now free the padded reservation and immediately try to reserve an exact
// region at aligned address. We have to do this dancing because the
// reservation address requirement is more complex than just a certain
// alignment and not all operating systems support freeing parts of
// reserved address space regions.
padded_reservation.Free();
VirtualMemory reservation(platform_page_allocator, reservation_size,
reinterpret_cast<void*>(address));
if (!reservation.IsReserved()) break;
// The reservation could still be somewhere else but we can accept it
// if it has the required alignment.
Address address =
RoundUp(reservation.address() + kIsolateRootBiasPageSize,
base_alignment) -
kIsolateRootBiasPageSize;
if (reservation.address() == address) {
reservation_ = std::move(reservation);
CHECK_EQ(reservation_.size(), reservation_size);
return address;
}
}
}
V8::FatalProcessOutOfMemory(nullptr,
"Failed to reserve memory for new V8 Isolate");
return kNullAddress;
}
void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
const size_t kIsolateRootBiasPageSize =
GetIsolateRootBiasPageSize(platform_page_allocator);
GetIsolateRootBiasPageSize(page_allocator_);
Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize;
CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
CHECK(reservation_.InVM(
CHECK(isolate_cage_.reservation_.InVM(
heap_reservation_address,
kPtrComprCageReservationSize + kIsolateRootBiasPageSize));
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
platform_page_allocator->AllocatePageSize());
page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
platform_page_allocator, isolate_root, kPtrComprCageReservationSize,
page_size);
page_allocator_ = page_allocator_instance_.get();
size_t page_size = page_allocator_->AllocatePageSize();
Address isolate_address = isolate_root - Isolate::isolate_root_bias();
Address isolate_end = isolate_address + sizeof(Isolate);
@ -162,24 +89,24 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
size_t reserved_region_size =
RoundUp(isolate_end, page_size) - reserved_region_address;
CHECK(page_allocator_instance_->AllocatePagesAt(
CHECK(isolate_cage_.page_allocator()->AllocatePagesAt(
reserved_region_address, reserved_region_size,
PageAllocator::Permission::kNoAccess));
}
// Commit pages where the Isolate will be stored.
{
size_t commit_page_size = platform_page_allocator->CommitPageSize();
size_t commit_page_size = page_allocator_->CommitPageSize();
Address committed_region_address =
RoundDown(isolate_address, commit_page_size);
size_t committed_region_size =
RoundUp(isolate_end, commit_page_size) - committed_region_address;
// We are using |reservation_| directly here because |page_allocator_| has
// bigger commit page size than we actually need.
CHECK(reservation_.SetPermissions(committed_region_address,
committed_region_size,
PageAllocator::kReadWrite));
// We are using |isolate_cage_.reservation_| directly here because
// |page_allocator_| has bigger commit page size than we actually need.
CHECK(isolate_cage_.reservation_.SetPermissions(committed_region_address,
committed_region_size,
PageAllocator::kReadWrite));
if (Heap::ShouldZapGarbage()) {
MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
@ -188,7 +115,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
}
#endif // V8_COMPRESS_POINTERS
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
} // namespace internal
} // namespace v8

View File

@ -10,6 +10,7 @@
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/common/globals.h"
#include "src/init/ptr-compr-cage.h"
#include "src/utils/allocation.h"
namespace v8 {
@ -24,9 +25,13 @@ namespace internal {
// IsolateAllocator object is responsible for allocating memory for one (!)
// Isolate object. Depending on the whether pointer compression is enabled,
// the memory can be allocated
// 1) in the C++ heap (when pointer compression is disabled)
//
// 1) in the C++ heap (when pointer compression is disabled or when multiple
// Isolates share a pointer compression cage)
//
// 2) in a proper part of a properly aligned region of a reserved address space
// (when pointer compression is enabled).
// (when pointer compression is enabled and each Isolate has its own pointer
// compression cage).
//
// Isolate::New() first creates IsolateAllocator object which allocates the
// memory and then it constructs Isolate object in this memory. Once it's done
@ -44,15 +49,19 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
v8::PageAllocator* page_allocator() const { return page_allocator_; }
// When pointer compression is on, returns the base address of the pointer
// compression cage reservation. Otherwise returns kNullAddress.
Address GetPtrComprCageBaseAddress() const;
private:
Address InitReservation();
void CommitPagesForIsolate(Address heap_reservation_address);
// The allocated memory for Isolate instance.
void* isolate_memory_ = nullptr;
v8::PageAllocator* page_allocator_ = nullptr;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_instance_;
VirtualMemory reservation_;
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
PtrComprCage isolate_cage_;
#endif
};
} // namespace internal

136
src/init/ptr-compr-cage.cc Normal file
View File

@ -0,0 +1,136 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/ptr-compr-cage.h"
#include "src/common/ptr-compr-inl.h"
namespace v8 {
namespace internal {
PtrComprCage::PtrComprCage() = default;
// static
void PtrComprCage::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
GetProcessWideCage()->InitReservationOrDie();
#endif
}
#ifdef V8_COMPRESS_POINTERS
PtrComprCage::~PtrComprCage() { Free(); }
bool PtrComprCage::InitReservation() {
CHECK(!IsReserved());
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
// Reserve a 4Gb region such as that the reservation address is 4Gb aligned.
const size_t reservation_size = kPtrComprCageReservationSize;
const size_t base_alignment = kPtrComprCageBaseAlignment;
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
Address hint = RoundDown(
reinterpret_cast<Address>(platform_page_allocator->GetRandomMmapAddr()),
base_alignment);
// Within this reservation there will be a sub-region with proper alignment.
VirtualMemory padded_reservation(platform_page_allocator,
reservation_size * 2,
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) break;
// Find properly aligned sub-region inside the reservation.
Address address = RoundUp(padded_reservation.address(), base_alignment);
CHECK(padded_reservation.InVM(address, reservation_size));
#if defined(V8_OS_FUCHSIA)
// Fuchsia does not respect given hints so as a workaround we will use
// overreserved address space region instead of trying to re-reserve
// a subregion.
bool overreserve = true;
#else
// For the last attempt use the overreserved region to avoid an OOM crash.
// This case can happen if there are many isolates being created in
// parallel that race for reserving the regions.
bool overreserve = (attempt == kMaxAttempts - 1);
#endif
if (overreserve) {
if (padded_reservation.InVM(address, reservation_size)) {
reservation_ = std::move(padded_reservation);
base_ = address;
break;
}
} else {
// Now free the padded reservation and immediately try to reserve an exact
// region at aligned address. We have to do this dancing because the
// reservation address requirement is more complex than just a certain
// alignment and not all operating systems support freeing parts of
// reserved address space regions.
padded_reservation.Free();
VirtualMemory reservation(platform_page_allocator, reservation_size,
reinterpret_cast<void*>(address));
if (!reservation.IsReserved()) break;
// The reservation could still be somewhere else but we can accept it
// if it has the required alignment.
Address address = RoundUp(reservation.address(), base_alignment);
if (reservation.address() == address) {
reservation_ = std::move(reservation);
CHECK_EQ(reservation_.size(), reservation_size);
base_ = address;
break;
}
}
}
if (base_ == kNullAddress) return false;
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
platform_page_allocator->AllocatePageSize());
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
platform_page_allocator, base_, kPtrComprCageReservationSize, page_size);
return true;
}
void PtrComprCage::InitReservationOrDie() {
if (!InitReservation()) {
V8::FatalProcessOutOfMemory(
nullptr, "Failed to reserve memory for V8 pointer compression cage");
}
}
void PtrComprCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
reservation_.Free();
}
}
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PtrComprCage, GetSharedProcessWideCage)
} // anonymous namespace
// static
PtrComprCage* PtrComprCage::GetProcessWideCage() {
return GetSharedProcessWideCage();
}
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8

67
src/init/ptr-compr-cage.h Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INIT_PTR_COMPR_CAGE_H_
#define V8_INIT_PTR_COMPR_CAGE_H_
#include <memory>
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/common/globals.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
class V8_EXPORT_PRIVATE PtrComprCage final {
public:
PtrComprCage();
~PtrComprCage();
PtrComprCage(const PtrComprCage&) = delete;
PtrComprCage& operator=(PtrComprCage&) = delete;
PtrComprCage(PtrComprCage&& other) V8_NOEXCEPT { *this = std::move(other); }
PtrComprCage& operator=(PtrComprCage&& other) V8_NOEXCEPT {
base_ = other.base_;
other.base_ = kNullAddress;
page_allocator_ = std::move(other.page_allocator_);
reservation_ = std::move(other.reservation_);
return *this;
}
Address base() const { return base_; }
base::BoundedPageAllocator* page_allocator() const {
return page_allocator_.get();
}
const VirtualMemory* reservation() const { return &reservation_; }
bool IsReserved() const {
DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
return base_ != kNullAddress;
}
bool InitReservation();
void InitReservationOrDie();
void Free();
static void InitializeOncePerProcess();
static PtrComprCage* GetProcessWideCage();
private:
friend class IsolateAllocator;
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
VirtualMemory reservation_;
};
} // namespace internal
} // namespace v8
#endif // V8_INIT_PTR_COMPR_CAGE_H_

View File

@ -151,6 +151,7 @@ void V8::InitializeOncePerProcessImpl() {
#if defined(V8_USE_PERFETTO)
if (perfetto::Tracing::IsInitialized()) TrackEvent::Register();
#endif
PtrComprCage::InitializeOncePerProcess();
Isolate::InitializeOncePerProcess();
#if defined(USE_SIMULATOR)

View File

@ -250,7 +250,7 @@ class VirtualMemory final {
// can be called on a VirtualMemory that is itself not writable.
V8_EXPORT_PRIVATE void FreeReadOnly();
bool InVM(Address address, size_t size) {
bool InVM(Address address, size_t size) const {
return region_.contains(address, size);
}

View File

@ -260,6 +260,7 @@ v8_source_set("cctest_sources") {
"test-platform.cc",
"test-profile-generator.cc",
"test-property-details.cc",
"test-ptr-compr-cage.cc",
"test-random-number-generator.cc",
"test-regexp.cc",
"test-representation.cc",

View File

@ -939,6 +939,13 @@ TEST(ExtensionsRegistration) {
const int kNThreads = 10;
#elif V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT
const int kNThreads = 10;
#elif V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// TODO(syg): Support larger cages or tweak kMaximalCodeRangeSize.
//
// Isolates reserve kMaximalCodeRangeSize of virtual memory. A shared pointer
// compression cage is 4GB and kMaximalCodeRangeSize is 128MB on arm64 and
// x64, giving us a maximum of ~33.
const int kNThreads = 30;
#else
const int kNThreads = 40;
#endif

View File

@ -0,0 +1,80 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/common/globals.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h"
#include "src/init/ptr-compr-cage.h"
#include "test/cctest/cctest.h"
#ifdef V8_COMPRESS_POINTERS
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(PtrComprCageAndIsolateRoot) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate1 = v8::Isolate::New(create_params);
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
v8::Isolate* isolate2 = v8::Isolate::New(create_params);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
CHECK_EQ(i_isolate1->isolate_root(), i_isolate1->cage_base());
CHECK_EQ(i_isolate2->isolate_root(), i_isolate2->cage_base());
CHECK_NE(i_isolate1->cage_base(), i_isolate2->cage_base());
#endif
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
CHECK_NE(i_isolate1->isolate_root(), i_isolate1->cage_base());
CHECK_NE(i_isolate2->isolate_root(), i_isolate2->cage_base());
CHECK_NE(i_isolate1->isolate_root(), i_isolate2->isolate_root());
CHECK_EQ(i_isolate1->cage_base(), i_isolate2->cage_base());
#endif
isolate1->Dispose();
isolate2->Dispose();
}
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
UNINITIALIZED_TEST(SharedPtrComprCage) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate1 = v8::Isolate::New(create_params);
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
v8::Isolate* isolate2 = v8::Isolate::New(create_params);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
Factory* factory1 = i_isolate1->factory();
Factory* factory2 = i_isolate2->factory();
{
HandleScope scope1(i_isolate1);
HandleScope scope2(i_isolate2);
Handle<FixedArray> isolate1_object = factory1->NewFixedArray(100);
Handle<FixedArray> isolate2_object = factory2->NewFixedArray(100);
CHECK_EQ(GetPtrComprCageBase(*isolate1_object),
GetPtrComprCageBase(*isolate2_object));
const PtrComprCage* cage = PtrComprCage::GetProcessWideCage();
CHECK(cage->reservation()->InVM(isolate1_object->ptr(),
isolate1_object->Size()));
CHECK(cage->reservation()->InVM(isolate2_object->ptr(),
isolate2_object->Size()));
}
isolate1->Dispose();
isolate2->Dispose();
}
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
} // namespace internal
} // namespace v8
#endif // V8_COMPRESS_POINTERS

View File

@ -1462,4 +1462,11 @@
'concurrent-initial-prototype-change-1': [SKIP],
}], # variant == concurrent_inlining
##############################################################################
['pointer_compression_shared_cage', {
# kMaximalCodeRangeSize causing VM exhaustion with 50 workers when sharing a
# pointer cage.
'regress/wasm/regress-1010272': [SKIP],
}],
]

View File

@ -145,11 +145,16 @@ TEST_F(HeapTest, HeapLayout) {
"}"
"ar.push(Array(32 * 1024 * 1024));");
Address cage_base = i_isolate()->cage_base();
EXPECT_TRUE(IsAligned(cage_base, size_t{4} * GB));
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
Address isolate_root = i_isolate()->isolate_root();
EXPECT_TRUE(IsAligned(isolate_root, size_t{4} * GB));
EXPECT_EQ(cage_base, isolate_root);
#endif
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(isolate_root, size_t{4} * GB);
base::AddressRegion heap_reservation(cage_base, size_t{4} * GB);
SafepointScope scope(i_isolate()->heap());
OldGenerationMemoryChunkIterator iter(i_isolate()->heap());

View File

@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/spaces-inl.h"
#include "src/init/ptr-compr-cage.h"
#include "src/utils/ostreams.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@ -239,11 +240,22 @@ class SequentialUnmapperTest : public TestWithIsolate {
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
PtrComprCage::GetProcessWideCage()->Free();
PtrComprCage::GetProcessWideCage()->InitReservationOrDie();
#endif
TestWithIsolate::SetUpTestCase();
}
static void TearDownTestCase() {
TestWithIsolate::TearDownTestCase();
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Free the process-wide cage reservation, otherwise the pages won't be
// freed until process teardown.
PtrComprCage::GetProcessWideCage()->Free();
#endif
i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty());

View File

@ -189,6 +189,7 @@ class BuildConfig(object):
self.verify_csa = build_config['v8_enable_verify_csa']
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
self.pointer_compression_shared_cage = build_config['v8_enable_pointer_compression_shared_cage']
self.webassembly = build_config['v8_enable_webassembly']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
@ -229,6 +230,8 @@ class BuildConfig(object):
detected_options.append('lite_mode')
if self.pointer_compression:
detected_options.append('pointer_compression')
if self.pointer_compression_shared_cage:
detected_options.append('pointer_compression_shared_cage')
if self.webassembly:
detected_options.append('webassembly')
@ -686,6 +689,7 @@ class BaseTestRunner(object):
"verify_csa": self.build_config.verify_csa,
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
"pointer_compression_shared_cage": self.build_config.pointer_compression_shared_cage,
}
def _runner_flags(self):

View File

@ -334,7 +334,8 @@ class SystemTest(unittest.TestCase):
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False)
v8_enable_pointer_compression=False,
v8_enable_pointer_compression_shared_cage=False)
result = run_tests(
basedir,
'--progress=verbose',

View File

@ -21,6 +21,7 @@
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_control_flow_integrity": false,
"v8_enable_webassembly": true
}

View File

@ -21,6 +21,7 @@
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": false,
"v8_enable_pointer_compression_shared_cage": false,
"v8_control_flow_integrity": false,
"v8_enable_webassembly": true
}