[ptr-cage] Share RO heap when sharing pointer compression cage

Bug: v8:11460
Change-Id: I97a21d158ad057334cc7fe5f53edc5c6c23d1355
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2861711
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Dan Elphick <delphick@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74365}
This commit is contained in:
Shu-yu Guo 2021-05-04 13:42:48 -07:00 committed by V8 LUCI CQ
parent 7dce6a2633
commit dc9eca8a6e
9 changed files with 94 additions and 48 deletions

View File

@ -419,7 +419,8 @@ if (v8_enable_short_builtin_calls &&
v8_enable_short_builtin_calls = false
}
if (v8_enable_shared_ro_heap == "") {
v8_enable_shared_ro_heap = !v8_enable_pointer_compression
v8_enable_shared_ro_heap = !v8_enable_pointer_compression ||
v8_enable_pointer_compression_shared_cage
}
assert(!v8_disable_write_barriers || v8_enable_single_generation,
@ -443,10 +444,6 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
assert(!v8_enable_map_packing || v8_current_cpu == "x64",
"Map packing is only supported on x64")
assert(
!v8_enable_pointer_compression_shared_cage || !v8_enable_shared_ro_heap,
"Sharing read-only heap is not yet supported when sharing a pointer compression cage")
assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
"Control-flow integrity does not support multisnapshots")

View File

@ -77,14 +77,10 @@ V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
#else
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// TODO(syg): Share RO space across Isolates for shared cage; need to fix
// Symbol::Description.
if (chunk->InReadOnlySpace()) {
*isolate = nullptr;
return false;
}
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
*isolate = Isolate::FromHeap(chunk->GetHeap());
return true;
#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP

View File

@ -87,8 +87,8 @@ class ReadOnlyHeap {
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
return V8_SHARED_RO_HEAP_BOOL && (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL ||
IsSharedMemoryAvailable());
return V8_SHARED_RO_HEAP_BOOL &&
(!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL);
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}

View File

@ -70,11 +70,10 @@ SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
// TearDown requires MemoryAllocator which itself is tied to an Isolate.
shared_read_only_space_->pages_.resize(0);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
CHECK(page_allocator->FreePages(chunk_address, size));
size_t size = RoundUp(chunk->size(), page_allocator_->AllocatePageSize());
CHECK(page_allocator_->FreePages(chunk_address, size));
}
}
@ -86,6 +85,12 @@ ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) {
// Do not use the platform page allocator when sharing a pointer compression
// cage, as the Isolate's page allocator is a BoundedPageAllocator tied to the
// shared cage.
page_allocator_ = COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
? isolate->page_allocator()
: GetPlatformPageAllocator();
pages_ = std::move(pages);
set_accounting_stats(stats);
set_shared_read_only_space(
@ -304,11 +309,12 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
Heap* heap = ReadOnlySpace::heap();
// Without pointer compression, ReadOnlySpace pages are directly shared
// between all heaps and so must be unregistered from their originating
// allocator.
Seal(COMPRESS_POINTERS_BOOL ? SealMode::kDetachFromHeap
: SealMode::kDetachFromHeapAndUnregisterMemory);
// Without pointer compression in a per-Isolate cage, ReadOnlySpace pages are
// directly shared between all heaps and so must be unregistered from their
// originating allocator.
Seal(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
? SealMode::kDetachFromHeap
: SealMode::kDetachFromHeapAndUnregisterMemory);
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
}
@ -790,9 +796,9 @@ SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
SingleCopyReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
// This constructor should only be used when RO_SPACE is shared without
// pointer compression.
// pointer compression in a per-Isolate cage.
DCHECK(V8_SHARED_RO_HEAP_BOOL);
DCHECK(!COMPRESS_POINTERS_BOOL);
DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
accounting_stats_ = artifacts->accounting_stats();
pages_ = artifacts->pages();
}

View File

@ -132,6 +132,9 @@ class SingleCopyReadOnlyArtifacts : public ReadOnlyArtifacts {
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
private:
v8::PageAllocator* page_allocator_ = nullptr;
};
// -----------------------------------------------------------------------------

View File

@ -145,22 +145,41 @@ int Code::OffHeapInstructionSize() const {
return d.InstructionSizeOfBuiltin(builtin_index());
}
namespace {
// Helper function for getting an EmbeddedData that can handle un-embedded
// builtins when short builtin calls are enabled.
inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(Code code) {
#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
// GetIsolateFromWritableObject(*this) works for both read-only and writable
// objects when pointer compression is enabled with a per-Isolate cage.
return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
// When pointer compression is enabled with a shared cage, there is also a
// shared CodeRange. When short builtin calls are enabled, there is a single
// copy of the re-embedded builtins in the shared CodeRange, so use that if
// it's present.
CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
return (code_range && code_range->embedded_blob_code_copy() != nullptr)
? EmbeddedData::FromBlob(code_range)
: EmbeddedData::FromBlob();
#else
// Otherwise there is a single copy of the blob across all Isolates, use the
// global atomic variables.
return EmbeddedData::FromBlob();
#endif
}
} // namespace
Address Code::OffHeapInstructionStart() const {
DCHECK(is_off_heap_trampoline());
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
// TODO(11527): pass Isolate as an argument.
// GetIsolateFromWritableObject(*this) works for both read-only and writable
// objects here because short builtin calls feature requires pointer
// compression.
// We don't have to check the Isolate::is_short_builtin_calls_enabled() value
// because if the short builtin calls wasn't actually enabled because of not
// enough memory, the FromBlob(isolate) would still be the correct one to use.
EmbeddedData d =
FLAG_short_builtin_calls
? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
: EmbeddedData::FromBlob();
// TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin_index());
}
@ -169,17 +188,9 @@ Address Code::OffHeapInstructionEnd() const {
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
// TODO(11527): pass Isolate as an argument.
// GetIsolateFromWritableObject(*this) works for both read-only and writable
// objects here because short builtin calls feature requires pointer
// compression.
// We don't have to check the Isolate::is_short_builtin_calls_enabled() value
// because if the short builtin calls wasn't actually enabled because of not
// enough memory, the FromBlob(isolate) would still be the correct one to use.
EmbeddedData d =
FLAG_short_builtin_calls
? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
: EmbeddedData::FromBlob();
// TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}

View File

@ -563,7 +563,7 @@ void SwissNameDictionary::Initialize(IsolateT* isolate, ByteArray meta_table,
SwissNameDictionary::IndexIterator::IndexIterator(
Handle<SwissNameDictionary> dict, int start)
: enum_index_{start}, dict_{dict} {
if (!COMPRESS_POINTERS_BOOL && dict.is_null()) {
if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict.is_null()) {
used_capacity_ = 0;
} else {
used_capacity_ = dict->UsedCapacity();
@ -608,7 +608,7 @@ SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::begin() {
}
SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
if (!COMPRESS_POINTERS_BOOL && dict_.is_null()) {
if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict_.is_null()) {
return IndexIterator(dict_, 0);
} else {
DCHECK(!dict_.is_null());
@ -619,12 +619,12 @@ SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
SwissNameDictionary::IndexIterable
SwissNameDictionary::IterateEntriesOrdered() {
// If we are supposed to iterate the empty dictionary (which is non-writable)
// and pointer compression is disabled, we have no simple way to get the
// isolate, which we would need to create a handle.
// and pointer compression with a per-Isolate cage is disabled, we have no
// simple way to get the isolate, which we would need to create a handle.
// TODO(emrich): Consider always using roots.empty_swiss_dictionary_handle()
// in the condition once this function gets Isolate as a parameter in order to
// avoid empty dict checks.
if (!COMPRESS_POINTERS_BOOL && Capacity() == 0)
if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && Capacity() == 0)
return IndexIterable(Handle<SwissNameDictionary>::null());
Isolate* isolate;

View File

@ -9,6 +9,7 @@
#include "src/builtins/builtins.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
namespace v8 {
namespace internal {
@ -62,6 +63,13 @@ class EmbeddedData final {
isolate->embedded_blob_data(), isolate->embedded_blob_data_size());
}
static EmbeddedData FromBlob(CodeRange* code_range) {
return EmbeddedData(code_range->embedded_blob_code_copy(),
Isolate::CurrentEmbeddedBlobCodeSize(),
Isolate::CurrentEmbeddedBlobData(),
Isolate::CurrentEmbeddedBlobDataSize());
}
const uint8_t* code() const { return code_; }
uint32_t code_size() const { return code_size_; }
const uint8_t* data() const { return data_; }

View File

@ -100,6 +100,31 @@ UNINITIALIZED_TEST(SharedPtrComprCageCodeRange) {
isolate1->Dispose();
isolate2->Dispose();
}
#ifdef V8_SHARED_RO_HEAP
UNINITIALIZED_TEST(SharedPtrComprCageImpliesSharedReadOnlyHeap) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate1 = v8::Isolate::New(create_params);
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
v8::Isolate* isolate2 = v8::Isolate::New(create_params);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
CHECK_EQ(i_isolate1->read_only_heap(), i_isolate2->read_only_heap());
// Spot check that some read-only roots are the same.
CHECK_EQ(ReadOnlyRoots(i_isolate1).the_hole_value(),
ReadOnlyRoots(i_isolate2).the_hole_value());
CHECK_EQ(ReadOnlyRoots(i_isolate1).code_map(),
ReadOnlyRoots(i_isolate2).code_map());
CHECK_EQ(ReadOnlyRoots(i_isolate1).exception(),
ReadOnlyRoots(i_isolate2).exception());
isolate1->Dispose();
isolate2->Dispose();
}
#endif // V8_SHARED_RO_HEAP
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
} // namespace internal