[heap] Share RO_SPACE pages with pointer compression

This allows the configuration v8_enable_shared_ro_heap and
v8_enable_pointer_compression on Linux and Android, although it still
defaults to off.

When pointer compression and read-only heap sharing are enabled, sharing
is achieved by allocating ReadOnlyPages in shared memory that are
retained in the shared ReadOnlyArtifacts object. These ReadOnlyPages are
then remapped into the address space of the Isolate ultimately using
mremap.

To simplify the creation process the ReadOnlySpace memory for the first
Isolate is created as before without any sharing. It is only when the
ReadOnlySpace memory has been finalized that the shared memory is
allocated and has its contents copied into it. The original memory is
then released (with PC this means it's just released back to the
BoundedPageAllocator) and immediately re-allocated as a shared mapping.

Because we would like to make v8_enable_shared_ro_heap default to true
at some point but can't make this conditional on the value returned by
a method in the code we are yet to compile, the code required for
sharing has been mostly changed to use ifs with
ReadOnlyHeap::IsReadOnlySpaceShared() instead of #ifdefs except where
a compile error would result due to the absence of a class members
without sharing. IsReadOnlySpaceShared() will evaluate
CanAllocateSharedPages in the platform PageAllocator (with pointer
compression and sharing enabled) once and cache that value so sharing
cannot be toggled during the lifetime of the process.

Bug: v8:10454
Change-Id: I0236d752047ecce71bd64c159430517a712bc1e2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2267300
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69174}
This commit is contained in:
Dan Elphick 2020-07-31 13:52:57 +01:00 committed by Commit Bot
parent 0ae4ef0536
commit c7d22c4991
19 changed files with 798 additions and 244 deletions

View File

@ -339,9 +339,11 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
assert(
!v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
"Pointer compression is not supported with shared read-only heap enabled")
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
assert(
is_linux || is_android,
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
}
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")

View File

@ -8499,12 +8499,13 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_global_handles_size_ = heap->TotalGlobalHandlesSize();
heap_statistics->used_global_handles_size_ = heap->UsedGlobalHandlesSize();
#ifndef V8_SHARED_RO_HEAP
i::ReadOnlySpace* ro_space = heap->read_only_space();
heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory();
heap_statistics->used_heap_size_ += ro_space->Size();
#endif // V8_SHARED_RO_HEAP
if (!i::ReadOnlyHeap::IsReadOnlySpaceShared()) {
i::ReadOnlySpace* ro_space = heap->read_only_space();
heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
heap_statistics->total_physical_size_ +=
ro_space->CommittedPhysicalMemory();
heap_statistics->used_heap_size_ += ro_space->Size();
}
heap_statistics->total_heap_size_executable_ =
heap->CommittedMemoryExecutable();
@ -8542,7 +8543,7 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
space_statistics->space_name_ = i::BaseSpace::GetSpaceName(allocation_space);
if (allocation_space == i::RO_SPACE) {
if (V8_SHARED_RO_HEAP_BOOL) {
if (i::ReadOnlyHeap::IsReadOnlySpaceShared()) {
// RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
space_statistics->space_size_ = 0;
space_statistics->space_used_size_ = 0;

View File

@ -59,6 +59,26 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
return true;
}
bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
size_t size) {
Address address = reinterpret_cast<Address>(ptr);
CHECK(IsAligned(address, allocate_page_size_));
CHECK(IsAligned(size, commit_page_size_));
CHECK(region_allocator_.contains(address, size));
// Region allocator requires page size rather than commit size so just over-
// allocate there since any extra space couldn't be used anyway.
size_t region_size = RoundUp(size, allocate_page_size_);
if (!region_allocator_.AllocateRegionAt(
address, region_size, RegionAllocator::RegionState::kExcluded)) {
return false;
}
CHECK(page_allocator_->SetPermissions(ptr, size,
PageAllocator::Permission::kNoAccess));
return true;
}
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
MutexGuard guard(&mutex_);

View File

@ -56,6 +56,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override;
bool ReserveForSharedMemoryMapping(void* address, size_t size) override;
// Allocates pages at given address, returns true on success.
bool AllocatePagesAt(Address address, size_t size, Permission access);

View File

@ -80,6 +80,10 @@ STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
V8_INLINE Address GetIsolateRoot(Address on_heap_addr) { UNREACHABLE(); }
V8_INLINE Address GetIsolateRoot(const Isolate* isolate) { UNREACHABLE(); }
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { UNREACHABLE(); }
template <typename TOnHeapAddress>

View File

@ -2933,14 +2933,17 @@ void Isolate::Delete(Isolate* isolate) {
}
void Isolate::SetUpFromReadOnlyArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts) {
artifacts_ = artifacts;
DCHECK_NOT_NULL(artifacts);
ReadOnlyHeap* ro_heap = artifacts->read_only_heap();
std::shared_ptr<ReadOnlyArtifacts> artifacts, ReadOnlyHeap* ro_heap) {
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
DCHECK_NOT_NULL(artifacts);
artifacts_ = artifacts;
} else {
DCHECK_NULL(artifacts);
}
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
read_only_heap_ = ro_heap;
heap_.SetUpFromReadOnlyHeap(ro_heap);
heap_.SetUpFromReadOnlyHeap(read_only_heap_);
}
v8::PageAllocator* Isolate::page_allocator() {
@ -3221,6 +3224,13 @@ Isolate::~Isolate() {
default_microtask_queue_ == default_microtask_queue_->next());
delete default_microtask_queue_;
default_microtask_queue_ = nullptr;
// The ReadOnlyHeap should not be destroyed when sharing without pointer
// compression as the object itself is shared.
if (read_only_heap_->IsOwnedByIsolate()) {
delete read_only_heap_;
read_only_heap_ = nullptr;
}
}
void Isolate::InitializeThreadLocal() {

View File

@ -533,7 +533,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts);
void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts,
ReadOnlyHeap* ro_heap);
void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();

View File

@ -3062,6 +3062,8 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
return false; // currently unsupported
#else
BaseSpace* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
// Detached RO_SPACE chunks have no owner set.
if (owner == nullptr) return false;
if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space();
@ -5309,7 +5311,10 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_;
if (read_only_space_) {
read_only_space_->TearDown(memory_allocator());
delete read_only_space_;
}
read_only_space_ = space;
}
@ -5611,13 +5616,14 @@ void Heap::TearDown() {
tracer_.reset();
isolate()->read_only_heap()->OnHeapTearDown();
read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
}
isolate()->read_only_heap()->OnHeapTearDown(this);
read_only_space_ = nullptr;
memory_allocator()->TearDown();
StrongRootsList* next = nullptr;

View File

@ -15,6 +15,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
#include "src/logging/log.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@ -536,6 +537,14 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
size_ -= released_bytes;
}
void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
}
void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
@ -543,6 +552,7 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
if (executable == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
@ -559,15 +569,19 @@ void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
chunk->SetFlag(MemoryChunk::PRE_FREED);
UnregisterSharedMemory(chunk);
v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
reservation->Free();
reservation->FreeReadOnly();
} else {
// Only read-only pages can have non-initialized reservation object.
FreeMemory(page_allocator(NOT_EXECUTABLE), chunk->address(), chunk->size());
// Only read-only pages can have a non-initialized reservation object. This
// happens when the pages are remapped to multiple locations and where the
// reservation would therefore be invalid.
FreeMemory(allocator, chunk->address(),
RoundUp(chunk->size(), allocator->AllocatePageSize()));
}
}
@ -671,6 +685,12 @@ ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
return owner->InitializePage(chunk);
}
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
MemoryAllocator::RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address) {
return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
}
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {

View File

@ -11,6 +11,7 @@
#include <unordered_set>
#include <vector>
#include "include/v8-platform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/export-template.h"
#include "src/base/macros.h"
@ -195,6 +196,9 @@ class MemoryAllocator {
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
template <MemoryAllocator::FreeMode mode = kFull>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk);
@ -303,6 +307,9 @@ class MemoryAllocator {
void UnregisterMemory(MemoryChunk* chunk);
void UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable = NOT_EXECUTABLE);
void UnregisterSharedMemory(BasicMemoryChunk* chunk);
void RegisterReadOnlyMemory(ReadOnlyPage* page);
private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,

View File

@ -21,8 +21,9 @@ ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
// copied into the read-only heap.
if (shared_ro_heap_ != nullptr && shared_ro_heap_->init_complete_) {
return ReadOnlyRoots(shared_ro_heap_->read_only_roots_);
auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_;
if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
}
#endif // V8_SHARED_RO_HEAP
return ReadOnlyRoots(GetHeapFromWritableObject(object));

View File

@ -4,12 +4,12 @@
#include "src/heap/read-only-heap.h"
#include <cstddef>
#include <cstring>
#include "include/v8.h"
#include "src/base/lazy-instance.h"
#include "src/base/lsan.h"
#include "src/base/platform/mutex.h"
#include "src/common/ptr-compr-inl.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/memory-chunk.h"
@ -19,11 +19,11 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
#ifdef V8_SHARED_RO_HEAP
namespace {
// Mutex used to ensure that ReadOnlyArtifacts creation is only done once.
base::LazyMutex read_only_heap_creation_mutex_ = LAZY_MUTEX_INITIALIZER;
@ -36,82 +36,73 @@ base::LazyInstance<std::weak_ptr<ReadOnlyArtifacts>>::type
read_only_artifacts_ = LAZY_INSTANCE_INITIALIZER;
std::shared_ptr<ReadOnlyArtifacts> InitializeSharedReadOnlyArtifacts() {
auto artifacts = std::make_shared<ReadOnlyArtifacts>();
std::shared_ptr<ReadOnlyArtifacts> artifacts;
if (COMPRESS_POINTERS_BOOL) {
artifacts = std::make_shared<PointerCompressedReadOnlyArtifacts>();
} else {
artifacts = std::make_shared<SingleCopyReadOnlyArtifacts>();
}
*read_only_artifacts_.Pointer() = artifacts;
return artifacts;
}
} // namespace
bool ReadOnlyHeap::IsSharedMemoryAvailable() {
static bool shared_memory_allocation_supported =
GetPlatformPageAllocator()->CanAllocateSharedPages();
return shared_memory_allocation_supported;
}
// This ReadOnlyHeap instance will only be accessed by Isolates that are already
// set up. As such it doesn't need to be guarded by a mutex or shared_ptrs,
// since an already set up Isolate will hold a shared_ptr to
// read_only_artifacts_.
ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
#endif
SoleReadOnlyHeap* SoleReadOnlyHeap::shared_ro_heap_ = nullptr;
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
bool read_only_heap_created = false;
if (des != nullptr) {
base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer());
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (!artifacts) {
artifacts = InitializeSharedReadOnlyArtifacts();
shared_ro_heap_ = CreateAndAttachToIsolate(isolate, artifacts);
#ifdef DEBUG
shared_ro_heap_->read_only_blob_checksum_ = des->GetChecksum();
#endif // DEBUG
shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
read_only_heap_created = true;
if (IsReadOnlySpaceShared()) {
ReadOnlyHeap* ro_heap;
if (des != nullptr) {
bool read_only_heap_created = false;
base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer());
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (!artifacts) {
artifacts = InitializeSharedReadOnlyArtifacts();
artifacts->InitializeChecksum(des);
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
ro_heap->DeseralizeIntoIsolate(isolate, des);
read_only_heap_created = true;
} else {
// With pointer compression, there is one ReadOnlyHeap per Isolate.
// Without PC, there is only one shared between all Isolates.
ro_heap = artifacts->GetReadOnlyHeapForIsolate(isolate);
isolate->SetUpFromReadOnlyArtifacts(artifacts, ro_heap);
}
artifacts->VerifyChecksum(des, read_only_heap_created);
ro_heap->InitializeIsolateRoots(isolate);
} else {
isolate->SetUpFromReadOnlyArtifacts(artifacts);
// This path should only be taken in mksnapshot, should only be run once
// before tearing down the Isolate that holds this ReadOnlyArtifacts and
// is not thread-safe.
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
CHECK(!artifacts);
artifacts = InitializeSharedReadOnlyArtifacts();
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
artifacts->VerifyChecksum(des, true);
}
} else {
// This path should only be taken in mksnapshot, should only be run once
// before tearing down the Isolate that holds this ReadOnlyArtifacts and is
// not thread-safe.
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
CHECK(!artifacts);
artifacts = InitializeSharedReadOnlyArtifacts();
shared_ro_heap_ = CreateAndAttachToIsolate(isolate, artifacts);
read_only_heap_created = true;
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->SetUpFromReadOnlyArtifacts(nullptr, ro_heap);
if (des != nullptr) {
ro_heap->DeseralizeIntoIsolate(isolate, des);
}
}
#ifdef DEBUG
const base::Optional<uint32_t> last_checksum =
shared_ro_heap_->read_only_blob_checksum_;
if (last_checksum) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
CHECK_WITH_MSG(des->GetChecksum(),
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
CHECK_EQ(last_checksum, des->GetChecksum());
} else {
// The read-only heap objects were created. Make sure this happens only
// once, during this call.
CHECK(read_only_heap_created);
}
#endif // DEBUG
USE(read_only_heap_created);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(isolate_ro_roots, shared_ro_heap_->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
auto artifacts = std::make_shared<ReadOnlyArtifacts>();
auto* ro_heap = CreateAndAttachToIsolate(isolate, artifacts);
if (des != nullptr) ro_heap->DeseralizeIntoIsolate(isolate, des);
#endif // V8_SHARED_RO_HEAP
}
void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate,
@ -126,41 +117,78 @@ void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
InitFromIsolate(isolate);
}
// Only for compressed spaces
ReadOnlyHeap::ReadOnlyHeap(ReadOnlyHeap* ro_heap, ReadOnlySpace* ro_space)
: read_only_space_(ro_space),
read_only_object_cache_(ro_heap->read_only_object_cache_) {
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
DCHECK(COMPRESS_POINTERS_BOOL);
}
// static
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
ReadOnlyHeap* ReadOnlyHeap::CreateInitalHeapForBootstrapping(
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts) {
std::unique_ptr<ReadOnlyHeap> ro_heap(
new ReadOnlyHeap(new ReadOnlySpace(isolate->heap())));
DCHECK(IsReadOnlySpaceShared());
std::unique_ptr<ReadOnlyHeap> ro_heap;
auto* ro_space = new ReadOnlySpace(isolate->heap());
if (COMPRESS_POINTERS_BOOL) {
ro_heap.reset(new ReadOnlyHeap(ro_space));
} else {
std::unique_ptr<SoleReadOnlyHeap> sole_ro_heap(
new SoleReadOnlyHeap(ro_space));
// The global shared ReadOnlyHeap is only used without pointer compression.
SoleReadOnlyHeap::shared_ro_heap_ = sole_ro_heap.get();
ro_heap = std::move(sole_ro_heap);
}
artifacts->set_read_only_heap(std::move(ro_heap));
isolate->SetUpFromReadOnlyArtifacts(artifacts);
isolate->SetUpFromReadOnlyArtifacts(artifacts, artifacts->read_only_heap());
return artifacts->read_only_heap();
}
void SoleReadOnlyHeap::InitializeIsolateRoots(Isolate* isolate) {
void* const isolate_ro_roots =
isolate->roots_table().read_only_roots_begin().location();
std::memcpy(isolate_ro_roots, read_only_roots_,
kEntriesCount * sizeof(Address));
}
void SoleReadOnlyHeap::InitializeFromIsolateRoots(Isolate* isolate) {
void* const isolate_ro_roots =
isolate->roots_table().read_only_roots_begin().location();
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
}
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
read_only_space_->ShrinkPages();
#ifdef V8_SHARED_RO_HEAP
std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
read_only_space_ = artifacts->shared_read_only_space();
if (IsReadOnlySpaceShared()) {
InitializeFromIsolateRoots(isolate);
std::shared_ptr<ReadOnlyArtifacts> artifacts(
*read_only_artifacts_.Pointer());
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
// N.B. Since pages are manually allocated with mmap, Lsan doesn't track their
// pointers. Seal explicitly ignores the necessary objects.
LSAN_IGNORE_OBJECT(this);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
artifacts->ReinstallReadOnlySpace(isolate);
read_only_space_ = artifacts->shared_read_only_space();
#ifdef DEBUG
artifacts->VerifyHeapAndSpaceRelationships(isolate);
#endif
} else {
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
}
init_complete_ = true;
}
void ReadOnlyHeap::OnHeapTearDown() {
#ifndef V8_SHARED_RO_HEAP
void ReadOnlyHeap::OnHeapTearDown(Heap* heap) {
read_only_space_->TearDown(heap->memory_allocator());
delete read_only_space_;
#endif
}
void SoleReadOnlyHeap::OnHeapTearDown(Heap* heap) {
// Do nothing as ReadOnlyHeap is shared between all Isolates.
}
// static
@ -169,17 +197,17 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
statistics->read_only_space_size_ = 0;
statistics->read_only_space_used_size_ = 0;
statistics->read_only_space_physical_size_ = 0;
#ifdef V8_SHARED_RO_HEAP
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (artifacts) {
auto ro_space = artifacts->shared_read_only_space();
statistics->read_only_space_size_ = ro_space->CommittedMemory();
statistics->read_only_space_used_size_ = ro_space->Size();
statistics->read_only_space_physical_size_ =
ro_space->CommittedPhysicalMemory();
if (IsReadOnlySpaceShared()) {
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (artifacts) {
auto* ro_space = artifacts->shared_read_only_space();
statistics->read_only_space_size_ = ro_space->CommittedMemory();
statistics->read_only_space_used_size_ = ro_space->Size();
statistics->read_only_space_physical_size_ =
ro_space->CommittedPhysicalMemory();
}
}
#endif // V8_SHARED_RO_HEAP
}
// static
@ -220,7 +248,7 @@ ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space->pages().begin()),
current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
? Address()
: (*current_page_)->area_start()) {}
: (*current_page_)->GetAreaStart()) {}
HeapObject ReadOnlyHeapObjectIterator::Next() {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
@ -231,17 +259,19 @@ HeapObject ReadOnlyHeapObjectIterator::Next() {
return HeapObject();
}
BasicMemoryChunk* current_page = *current_page_;
ReadOnlyPage* current_page = *current_page_;
for (;;) {
DCHECK_LE(current_addr_, current_page->area_end());
if (current_addr_ == current_page->area_end()) {
Address end = current_page->address() + current_page->area_size() +
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(RO_SPACE);
DCHECK_LE(current_addr_, end);
if (current_addr_ == end) {
// Progress to the next page.
++current_page_;
if (current_page_ == ro_space_->pages().end()) {
return HeapObject();
}
current_page = *current_page_;
current_addr_ = current_page->area_start();
current_addr_ = current_page->GetAreaStart();
}
if (current_addr_ == ro_space_->top() &&

View File

@ -28,14 +28,17 @@ class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
class ReadOnlyPage;
class ReadOnlySpace;
class SharedReadOnlySpace;
// This class transparently manages read-only space, roots and cache creation
// and destruction.
class ReadOnlyHeap final {
class ReadOnlyHeap {
public:
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
virtual ~ReadOnlyHeap() = default;
// If necessary creates read-only heap and initializes its artifacts (if the
// deserializer is provided). Then attaches the read-only heap to the isolate.
// If the deserializer is not provided, then the read-only heap will be only
@ -52,7 +55,7 @@ class ReadOnlyHeap final {
void OnCreateHeapObjectsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
void OnHeapTearDown();
virtual void OnHeapTearDown(Heap* heap);
// If the read-only heap is shared, then populate |statistics| with its stats,
// otherwise the read-only heap stats are set to 0.
static void PopulateReadOnlySpaceStatistics(
@ -77,9 +80,24 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
return V8_SHARED_RO_HEAP_BOOL &&
(!COMPRESS_POINTERS_BOOL || IsSharedMemoryAvailable());
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}
virtual void InitializeFromIsolateRoots(Isolate* isolate) {}
virtual bool IsOwnedByIsolate() { return true; }
protected:
friend class ReadOnlyArtifacts;
friend class PointerCompressedReadOnlyArtifacts;
// Creates a new read-only heap and attaches it to the provided isolate. Only
// used the first time when creating a ReadOnlyHeap for sharing.
static ReadOnlyHeap* CreateInitalHeapForBootstrapping(
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Runs the read-only deserializer and calls InitFromIsolate to complete
// read-only heap initialization.
@ -94,21 +112,33 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;
#ifdef V8_SHARED_RO_HEAP
#ifdef DEBUG
// The checksum of the blob the read-only heap was deserialized from, if any.
base::Optional<uint32_t> read_only_blob_checksum_;
#endif // DEBUG
Address read_only_roots_[kEntriesCount];
V8_EXPORT_PRIVATE static ReadOnlyHeap* shared_ro_heap_;
#endif // V8_SHARED_RO_HEAP
// Returns whether shared memory can be allocated and then remapped to
// additional addresses.
static bool IsSharedMemoryAvailable();
explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
ReadOnlyHeap(ReadOnlyHeap* ro_heap, ReadOnlySpace* ro_space);
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
// This is used without pointer compression when there is just a single
// ReadOnlyHeap object shared between all Isolates.
class SoleReadOnlyHeap : public ReadOnlyHeap {
public:
void InitializeIsolateRoots(Isolate* isolate) override;
void InitializeFromIsolateRoots(Isolate* isolate) override;
void OnHeapTearDown(Heap* heap) override;
bool IsOwnedByIsolate() override { return false; }
private:
friend class ReadOnlyHeap;
explicit SoleReadOnlyHeap(ReadOnlySpace* ro_space) : ReadOnlyHeap(ro_space) {}
Address read_only_roots_[kEntriesCount];
V8_EXPORT_PRIVATE static SoleReadOnlyHeap* shared_ro_heap_;
};
// This class enables iterating over all read-only heap objects.
class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public:

View File

@ -4,10 +4,15 @@
#include "src/heap/read-only-spaces.h"
#include <memory>
#include "include/v8-internal.h"
#include "src/base/lsan.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/execution/isolate.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
@ -15,11 +20,251 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/string.h"
#include "src/snapshot/read-only-deserializer.h"
namespace v8 {
namespace internal {
void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
Address src_base = GetIsolateRoot(src[0]);
for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
dst[i] = src[i] - src_base + new_base;
}
}
void ReadOnlyArtifacts::set_read_only_heap(
std::unique_ptr<ReadOnlyHeap> read_only_heap) {
read_only_heap_ = std::move(read_only_heap);
}
void ReadOnlyArtifacts::InitializeChecksum(ReadOnlyDeserializer* des) {
#ifdef DEBUG
read_only_blob_checksum_ = des->GetChecksum();
#endif // DEBUG
}
void ReadOnlyArtifacts::VerifyChecksum(ReadOnlyDeserializer* des,
bool read_only_heap_created) {
#ifdef DEBUG
if (read_only_blob_checksum_) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
CHECK_WITH_MSG(des->GetChecksum(),
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
CHECK_EQ(read_only_blob_checksum_, des->GetChecksum());
} else {
// If there's no checksum, then that means the read-only heap objects are
// being created.
CHECK(read_only_heap_created);
}
#endif // DEBUG
}
SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
// This particular SharedReadOnlySpace should not destroy its own pages as
// TearDown requires MemoryAllocator which itself is tied to an Isolate.
shared_read_only_space_->pages_.resize(0);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
CHECK(page_allocator->FreePages(chunk_address, size));
}
}
ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
Isolate* isolate) {
return read_only_heap();
}
void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) {
pages_ = std::move(pages);
set_accounting_stats(stats);
set_shared_read_only_space(
std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
}
void SingleCopyReadOnlyArtifacts::ReinstallReadOnlySpace(Isolate* isolate) {
isolate->heap()->ReplaceReadOnlySpace(shared_read_only_space());
}
void SingleCopyReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
Isolate* isolate) {
DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
// Confirm the Isolate is using the shared ReadOnlyHeap and ReadOnlySpace.
DCHECK_EQ(read_only_heap(), isolate->read_only_heap());
DCHECK_EQ(shared_read_only_space(), isolate->heap()->read_only_space());
}
void PointerCompressedReadOnlyArtifacts::InitializeRootsFrom(Isolate* isolate) {
auto isolate_ro_roots =
isolate->roots_table().read_only_roots_begin().location();
CopyAndRebaseRoots(isolate_ro_roots, read_only_roots_, 0);
}
void PointerCompressedReadOnlyArtifacts::InitializeRootsIn(Isolate* isolate) {
auto isolate_ro_roots =
isolate->roots_table().read_only_roots_begin().location();
CopyAndRebaseRoots(read_only_roots_, isolate_ro_roots,
GetIsolateRoot(isolate));
}
SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
Isolate* isolate) {
AllocationStats new_stats;
new_stats.IncreaseCapacity(accounting_stats().Capacity());
std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>> mappings;
std::vector<ReadOnlyPage*> pages;
Address isolate_root = GetIsolateRoot(isolate);
for (size_t i = 0; i < pages_.size(); ++i) {
const ReadOnlyPage* page = pages_[i];
const Tagged_t offset = OffsetForPage(i);
Address new_address = isolate_root + offset;
ReadOnlyPage* new_page = nullptr;
bool success = isolate->heap()
->memory_allocator()
->data_page_allocator()
->ReserveForSharedMemoryMapping(
reinterpret_cast<void*>(new_address), page->size());
CHECK(success);
auto shared_memory = RemapPageTo(i, new_address, new_page);
// Later it's possible that this might fail, but for now on Linux this is
// not possible. When we move onto windows, it's not possible to reserve
// memory and then map into the middle of it at which point we will have to
// reserve the memory free it and then attempt to remap to it which could
// fail. At that point this will need to change.
CHECK(shared_memory);
CHECK_NOT_NULL(new_page);
new_stats.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
mappings.push_back(std::move(shared_memory));
pages.push_back(new_page);
}
auto* shared_read_only_space =
new SharedReadOnlySpace(isolate->heap(), std::move(pages),
std::move(mappings), std::move(new_stats));
return shared_read_only_space;
}
ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
Isolate* isolate) {
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
InitializeRootsIn(isolate);
SharedReadOnlySpace* shared_read_only_space = CreateReadOnlySpace(isolate);
ReadOnlyHeap* read_only_heap = new ReadOnlyHeap(shared_read_only_space);
// TODO(v8:10699): The cache should just live uncompressed in
// ReadOnlyArtifacts and be decompressed on the fly.
auto original_cache = read_only_heap_->read_only_object_cache_;
auto& cache = read_only_heap->read_only_object_cache_;
Address isolate_root = GetIsolateRoot(isolate);
for (Object original_object : original_cache) {
Address original_address = original_object.ptr();
Address new_address = isolate_root + CompressTagged(original_address);
Object new_object = Object(new_address);
cache.push_back(new_object);
}
return read_only_heap;
}
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
PointerCompressedReadOnlyArtifacts::RemapPageTo(size_t i, Address new_address,
ReadOnlyPage*& new_page) {
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> mapping =
shared_memory_[i]->RemapTo(reinterpret_cast<void*>(new_address));
if (mapping) {
new_page = static_cast<ReadOnlyPage*>(reinterpret_cast<void*>(new_address));
return mapping;
} else {
return {};
}
}
void PointerCompressedReadOnlyArtifacts::Initialize(
Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) {
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
DCHECK(pages_.empty());
DCHECK(!pages.empty());
// It's not possible to copy the AllocationStats directly as the new pages
// will be mapped to different addresses.
stats_.IncreaseCapacity(stats.Capacity());
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK(page_allocator->CanAllocateSharedPages());
for (const ReadOnlyPage* page : pages) {
size_t size = RoundUp(page->size(), page_allocator->AllocatePageSize());
// 1. Allocate some new memory for a shared copy of the page and copy the
// original contents into it. Doesn't need to be V8 page aligned, since
// we'll never use it directly.
auto shared_memory = page_allocator->AllocateSharedPages(size, page);
void* ptr = shared_memory->GetMemory();
CHECK_NOT_NULL(ptr);
// 2. Copy the contents of the original page into the shared page.
ReadOnlyPage* new_page = reinterpret_cast<ReadOnlyPage*>(ptr);
pages_.push_back(new_page);
shared_memory_.push_back(std::move(shared_memory));
// This is just CompressTagged but inlined so it will always compile.
Tagged_t compressed_address = CompressTagged(page->address());
page_offsets_.push_back(compressed_address);
// 3. Update the accounting stats so the allocated bytes are for the new
// shared page rather than the original.
stats_.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
}
InitializeRootsFrom(isolate);
set_shared_read_only_space(
std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
}
void PointerCompressedReadOnlyArtifacts::ReinstallReadOnlySpace(
Isolate* isolate) {
// We need to build a new SharedReadOnlySpace that occupies the same memory as
// the original one, so first the original space's pages must be freed.
Heap* heap = isolate->heap();
heap->read_only_space()->TearDown(heap->memory_allocator());
heap->ReplaceReadOnlySpace(CreateReadOnlySpace(heap->isolate()));
DCHECK_NE(heap->read_only_space(), shared_read_only_space());
// Also recreate the ReadOnlyHeap using the this space.
auto* ro_heap = new ReadOnlyHeap(isolate->read_only_heap(),
isolate->heap()->read_only_space());
isolate->set_read_only_heap(ro_heap);
DCHECK_NE(*isolate->roots_table().read_only_roots_begin().location(), 0);
}
void PointerCompressedReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
Isolate* isolate) {
// Confirm the canonical versions of the ReadOnlySpace/ReadOnlyHeap from the
// ReadOnlyArtifacts are not accidentally present in a real Isolate (which
// might destroy them) and the ReadOnlyHeaps and Spaces are correctly
// associated with each other.
DCHECK_NE(shared_read_only_space(), isolate->heap()->read_only_space());
DCHECK_NE(read_only_heap(), isolate->read_only_heap());
DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
DCHECK_EQ(isolate->read_only_heap()->read_only_space(),
isolate->heap()->read_only_space());
}
// -----------------------------------------------------------------------------
// ReadOnlySpace implementation
@ -31,62 +276,44 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap)
capacity_(0),
area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
ReadOnlySpace::~ReadOnlySpace() {
Unseal();
// Needs to be defined in the cc file to force the vtable to be emitted in
// component builds.
ReadOnlySpace::~ReadOnlySpace() = default;
void SharedReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
// SharedReadOnlySpaces do not tear down their own pages since they are either
// freed down by the ReadOnlyArtifacts that contains them or in the case of
// pointer compression, they are freed when the SharedMemoryMappings are
// freed.
pages_.resize(0);
accounting_stats_.Clear();
}
void ReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
for (ReadOnlyPage* chunk : pages_) {
heap()->memory_allocator()->FreeReadOnlyPage(chunk);
memory_allocator->FreeReadOnlyPage(chunk);
}
pages_.resize(0);
accounting_stats_.Clear();
}
ReadOnlyArtifacts::~ReadOnlyArtifacts() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
page_allocator->SetPermissions(chunk_address, chunk->size(),
PageAllocator::kReadWrite);
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
CHECK(page_allocator->FreePages(chunk_address, size));
}
}
void ReadOnlyArtifacts::set_read_only_heap(
std::unique_ptr<ReadOnlyHeap> read_only_heap) {
read_only_heap_ = std::move(read_only_heap);
}
SharedReadOnlySpace::~SharedReadOnlySpace() {
// Clear the chunk list before the space is deleted, so that the inherited
// destructors don't try to destroy the BasicMemoryChunks themselves.
pages_.resize(0);
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts)
: ReadOnlySpace(heap) {
pages_ = artifacts->pages();
is_marked_read_only_ = true;
accounting_stats_ = artifacts->accounting_stats();
top_ = kNullAddress;
limit_ = kNullAddress;
}
void ReadOnlySpace::DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts) {
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
Heap* heap = ReadOnlySpace::heap();
Seal(SealMode::kDetachFromHeapAndForget);
artifacts->set_accounting_stats(accounting_stats_);
artifacts->TransferPages(std::move(pages_));
artifacts->set_shared_read_only_space(
std::make_unique<SharedReadOnlySpace>(heap, artifacts));
heap->ReplaceReadOnlySpace(artifacts->shared_read_only_space());
// Without pointer compression, ReadOnlySpace pages are directly shared
// between all heaps and so must be unregistered from their originating
// allocator.
Seal(COMPRESS_POINTERS_BOOL ? SealMode::kDetachFromHeap
: SealMode::kDetachFromHeapAndUnregisterMemory);
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
}
void ReadOnlyPage::MakeHeaderRelocatable() {
heap_ = nullptr;
owner_ = nullptr;
reservation_.Reset();
}
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
@ -147,11 +374,15 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
is_marked_read_only_ = true;
auto* memory_allocator = heap()->memory_allocator();
if (ro_mode == SealMode::kDetachFromHeapAndForget) {
if (ro_mode != SealMode::kDoNotDetachFromHeap) {
DetachFromHeap();
for (BasicMemoryChunk* chunk : pages_) {
memory_allocator->UnregisterMemory(chunk);
static_cast<ReadOnlyPage*>(chunk)->MakeHeaderRelocatable();
for (ReadOnlyPage* p : pages_) {
if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
memory_allocator->UnregisterMemory(p);
}
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
p->MakeHeaderRelocatable();
}
}
}
@ -252,11 +483,11 @@ void ReadOnlySpace::Verify(Isolate* isolate) {
VerifyReadOnlyPointersVisitor visitor(isolate->heap());
for (BasicMemoryChunk* page : pages_) {
#ifdef V8_SHARED_RO_HEAP
CHECK_NULL(page->owner());
#else
CHECK_EQ(page->owner(), this);
#endif
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
CHECK_NULL(page->owner());
} else {
CHECK_EQ(page->owner(), this);
}
if (page == Page::FromAllocationAreaAddress(top_)) {
allocation_pointer_found_in_space = true;
@ -517,5 +748,48 @@ ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
return page;
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
// This constructor should only be used when RO_SPACE is shared with pointer
// compression.
DCHECK(V8_SHARED_RO_HEAP_BOOL);
DCHECK(COMPRESS_POINTERS_BOOL);
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
DCHECK(!artifacts->pages().empty());
accounting_stats_.IncreaseCapacity(artifacts->accounting_stats().Capacity());
for (ReadOnlyPage* page : artifacts->pages()) {
pages_.push_back(page);
accounting_stats_.IncreaseAllocatedBytes(page->allocated_bytes(), page);
}
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
mappings,
AllocationStats&& new_stats)
: SharedReadOnlySpace(heap) {
DCHECK(V8_SHARED_RO_HEAP_BOOL);
DCHECK(COMPRESS_POINTERS_BOOL);
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
accounting_stats_ = std::move(new_stats);
pages_ = std::move(new_pages);
shared_memory_mappings_ = std::move(mappings);
}
SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
SingleCopyReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
// This constructor should only be used when RO_SPACE is shared without
// pointer compression.
DCHECK(V8_SHARED_RO_HEAP_BOOL);
DCHECK(!COMPRESS_POINTERS_BOOL);
accounting_stats_ = artifacts->accounting_stats();
pages_ = artifacts->pages();
}
} // namespace internal
} // namespace v8

View File

@ -20,6 +20,7 @@
namespace v8 {
namespace internal {
class ReadOnlyDeserializer;
class MemoryAllocator;
class ReadOnlyHeap;
@ -46,6 +47,13 @@ class ReadOnlyPage : public BasicMemoryChunk {
return address_in_page;
}
// Returns the start area of the page without using area_start() which cannot
// return the correct result when the page is remapped multiple times.
Address GetAreaStart() const {
return address() +
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(RO_SPACE);
}
private:
friend class ReadOnlySpace;
};
@ -54,9 +62,32 @@ class ReadOnlyPage : public BasicMemoryChunk {
// Artifacts used to construct a new SharedReadOnlySpace
class ReadOnlyArtifacts {
public:
~ReadOnlyArtifacts();
virtual ~ReadOnlyArtifacts() = default;
// Initialize the ReadOnlyArtifacts from an Isolate that has just been created
// either by serialization or by creating the objects directly.
virtual void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) = 0;
// This replaces the ReadOnlySpace in the given Heap with a newly constructed
// SharedReadOnlySpace that has pages created from the ReadOnlyArtifacts. This
// is only called for the first Isolate, where the ReadOnlySpace is created
// during the bootstrap process.
virtual void ReinstallReadOnlySpace(Isolate* isolate) = 0;
// Creates a ReadOnlyHeap for a specific Isolate. This will be populated with
// a SharedReadOnlySpace object that points to the Isolate's heap. Should only
// be used when the read-only heap memory is shared with or without pointer
// compression. This is called for all subsequent Isolates created after the
// first one.
virtual ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) = 0;
virtual void VerifyHeapAndSpaceRelationships(Isolate* isolate) = 0;
std::vector<ReadOnlyPage*>& pages() { return pages_; }
void set_accounting_stats(const AllocationStats& stats) { stats_ = stats; }
const AllocationStats& accounting_stats() const { return stats_; }
void set_shared_read_only_space(
std::unique_ptr<SharedReadOnlySpace> shared_space) {
@ -66,21 +97,68 @@ class ReadOnlyArtifacts {
return shared_read_only_space_.get();
}
std::vector<ReadOnlyPage*>& pages() { return pages_; }
void TransferPages(std::vector<ReadOnlyPage*>&& pages) {
pages_ = std::move(pages);
}
const AllocationStats& accounting_stats() const { return stats_; }
void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
ReadOnlyHeap* read_only_heap() const { return read_only_heap_.get(); }
void InitializeChecksum(ReadOnlyDeserializer* des);
void VerifyChecksum(ReadOnlyDeserializer* des, bool read_only_heap_created);
protected:
ReadOnlyArtifacts() = default;
private:
std::vector<ReadOnlyPage*> pages_;
AllocationStats stats_;
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
std::unique_ptr<ReadOnlyHeap> read_only_heap_;
#ifdef DEBUG
// The checksum of the blob the read-only heap was deserialized from, if
// any.
base::Optional<uint32_t> read_only_blob_checksum_;
#endif // DEBUG
};
// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace when pointer
// compression is disabled and so there is a single ReadOnlySpace with one set
// of pages shared between all Isolates.
class SingleCopyReadOnlyArtifacts : public ReadOnlyArtifacts {
public:
~SingleCopyReadOnlyArtifacts() override;
ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) override;
void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
};
// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace when pointer
// compression is enabled and so there is a ReadOnlySpace for each Isolate with
// with its own set of pages mapped from the canonical set stored here.
class PointerCompressedReadOnlyArtifacts : public ReadOnlyArtifacts {
public:
ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) override;
void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
private:
SharedReadOnlySpace* CreateReadOnlySpace(Isolate* isolate);
Tagged_t OffsetForPage(size_t index) const { return page_offsets_[index]; }
void InitializeRootsIn(Isolate* isolate);
void InitializeRootsFrom(Isolate* isolate);
std::unique_ptr<v8::PageAllocator::SharedMemoryMapping> RemapPageTo(
size_t i, Address new_address, ReadOnlyPage*& new_page);
static constexpr size_t kReadOnlyRootsCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
Address read_only_roots_[kReadOnlyRootsCount];
std::vector<Tagged_t> page_offsets_;
std::vector<std::unique_ptr<PageAllocator::SharedMemory>> shared_memory_;
};
// -----------------------------------------------------------------------------
@ -89,12 +167,14 @@ class ReadOnlySpace : public BaseSpace {
public:
V8_EXPORT_PRIVATE explicit ReadOnlySpace(Heap* heap);
// Detach the pages and them to artifacts for using in creating a
// SharedReadOnlySpace.
// Detach the pages and add them to artifacts for using in creating a
// SharedReadOnlySpace. Since the current space no longer has any pages, it
// should be replaced straight after this in its Heap.
void DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts);
V8_EXPORT_PRIVATE ~ReadOnlySpace() override;
V8_EXPORT_PRIVATE virtual void TearDown(MemoryAllocator* memory_allocator);
bool IsDetached() const { return heap_ == nullptr; }
@ -109,7 +189,11 @@ class ReadOnlySpace : public BaseSpace {
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
enum class SealMode {
kDetachFromHeap,
kDetachFromHeapAndUnregisterMemory,
kDoNotDetachFromHeap
};
// Seal the space by marking it read-only, optionally detaching it
// from the heap and forgetting it for memory bookkeeping purposes (e.g.
@ -145,6 +229,8 @@ class ReadOnlySpace : public BaseSpace {
Address FirstPageAddress() const { return pages_.front()->address(); }
protected:
friend class SingleCopyReadOnlyArtifacts;
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access);
@ -184,8 +270,26 @@ class ReadOnlySpace : public BaseSpace {
class SharedReadOnlySpace : public ReadOnlySpace {
public:
SharedReadOnlySpace(Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts);
~SharedReadOnlySpace() override;
explicit SharedReadOnlySpace(Heap* heap) : ReadOnlySpace(heap) {
is_marked_read_only_ = true;
}
SharedReadOnlySpace(Heap* heap,
PointerCompressedReadOnlyArtifacts* artifacts);
SharedReadOnlySpace(
Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
mappings,
AllocationStats&& new_stats);
SharedReadOnlySpace(Heap* heap, SingleCopyReadOnlyArtifacts* artifacts);
SharedReadOnlySpace(const SharedReadOnlySpace&) = delete;
void TearDown(MemoryAllocator* memory_allocator) override;
// Holds any shared memory mapping that must be freed when the space is
// deallocated.
std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>>
shared_memory_mappings_;
};
} // namespace internal

View File

@ -527,9 +527,11 @@ class RootsTable {
friend class Isolate;
friend class Heap;
friend class Factory;
friend class PointerCompressedReadOnlyArtifacts;
friend class ReadOnlyHeap;
friend class ReadOnlyRoots;
friend class RootsSerializer;
friend class SoleReadOnlyHeap;
};
class ReadOnlyRoots {

View File

@ -276,5 +276,18 @@ void VirtualMemory::Free() {
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
void VirtualMemory::FreeReadOnly() {
DCHECK(IsReserved());
// The only difference to Free is that it doesn't call Reset which would write
// to the VirtualMemory object.
v8::PageAllocator* page_allocator = page_allocator_;
base::AddressRegion region = region_;
// FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
} // namespace internal
} // namespace v8

View File

@ -233,6 +233,10 @@ class VirtualMemory final {
// Frees all memory.
V8_EXPORT_PRIVATE void Free();
// As with Free but does not write to the VirtualMemory object itself so it
// can be called on a VirtualMemory that is itself not writable.
V8_EXPORT_PRIVATE void FreeReadOnly();
bool InVM(Address address, size_t size) {
return region_.contains(address, size);
}

View File

@ -793,6 +793,25 @@ TEST(NoMemoryForNewPage) {
CHECK_NULL(page);
}
namespace {
// ReadOnlySpace cannot be torn down by a destructor because the destructor
// cannot take an argument. Since these tests create ReadOnlySpaces not attached
// to the Heap directly, they need to be destroyed to ensure the
// MemoryAllocator's stats are all 0 at exit.
class ReadOnlySpaceScope {
public:
explicit ReadOnlySpaceScope(Heap* heap) : ro_space_(heap) {}
~ReadOnlySpaceScope() {
ro_space_.TearDown(CcTest::heap()->memory_allocator());
}
ReadOnlySpace* space() { return &ro_space_; }
private:
ReadOnlySpace ro_space_;
};
} // namespace
TEST(ReadOnlySpaceMetrics_OnePage) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
@ -800,34 +819,35 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
ReadOnlySpaceScope scope(heap);
ReadOnlySpace* faked_space = scope.space();
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
CHECK_EQ(faked_space->Size(), 0);
CHECK_EQ(faked_space->Capacity(), 0);
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
faked_space.AllocateRaw(16, kWordAligned);
faked_space->AllocateRaw(16, kWordAligned);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
MemoryAllocator* allocator = heap->memory_allocator();
// Allocated objects size.
CHECK_EQ(faked_space.Size(), 16);
CHECK_EQ(faked_space->Size(), 16);
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
allocator->GetCommitPageSize());
// Amount of OS allocated memory.
CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
// Capacity will be one OS page minus the page header.
CHECK_EQ(faked_space.Capacity(),
CHECK_EQ(faked_space->Capacity(),
committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
@ -838,13 +858,14 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
ReadOnlySpaceScope scope(heap);
ReadOnlySpace* faked_space = scope.space();
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
CHECK_EQ(faked_space->Size(), 0);
CHECK_EQ(faked_space->Capacity(), 0);
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
MemoryAllocator* allocator = heap->memory_allocator();
// Allocate an object just under an OS page in size.
@ -860,28 +881,28 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
#endif
HeapObject object =
faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
CHECK_EQ(object.address() % alignment, 0);
object =
faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
CHECK_EQ(object.address() % alignment, 0);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
// Allocated objects size may will contain 4 bytes of padding on 32-bit or
// with pointer compression.
CHECK_EQ(faked_space.Size(), object_size + RoundUp(object_size, alignment));
CHECK_EQ(faked_space->Size(), object_size + RoundUp(object_size, alignment));
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
// Capacity will be 3 OS pages minus the page header.
CHECK_EQ(faked_space.Capacity(),
CHECK_EQ(faked_space->Capacity(),
committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
@ -892,13 +913,14 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
ReadOnlySpaceScope scope(heap);
ReadOnlySpace* faked_space = scope.space();
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
CHECK_EQ(faked_space->Size(), 0);
CHECK_EQ(faked_space->Capacity(), 0);
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
MemoryAllocator* allocator = heap->memory_allocator();
@ -910,23 +932,23 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
kTaggedSize);
CHECK_GT(object_size * 2,
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
faked_space.AllocateRaw(object_size, kWordAligned);
faked_space->AllocateRaw(object_size, kWordAligned);
// Then allocate another so it expands the space to two pages.
faked_space.AllocateRaw(object_size, kWordAligned);
faked_space->AllocateRaw(object_size, kWordAligned);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
// Allocated objects size.
CHECK_EQ(faked_space.Size(), object_size * 2);
CHECK_EQ(faked_space->Size(), object_size * 2);
// Amount of OS allocated memory.
size_t committed_memory_per_page =
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedMemory(), 2 * committed_memory_per_page);
CHECK_EQ(faked_space.CommittedPhysicalMemory(),
CHECK_EQ(faked_space->CommittedMemory(), 2 * committed_memory_per_page);
CHECK_EQ(faked_space->CommittedPhysicalMemory(),
2 * committed_memory_per_page);
// Capacity will be the space up to the amount of committed memory minus the
@ -935,7 +957,7 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
allocator->GetCommitPageSize()) -
MemoryChunkLayout::ObjectStartOffsetInDataPage();
CHECK_EQ(faked_space.Capacity(), 2 * capacity_per_page);
CHECK_EQ(faked_space->Capacity(), 2 * capacity_per_page);
}
} // namespace heap