[static-roots] Support serializing read-only heap as a memory dump

Build mode for serializing the read only heap as a memory dump in the
startup snapshot. This makes compressed pointers of root objects
statically known at mksnapshot time.

This CL also adds a feature to mksnapshot to dump the static addresses
to a C++ header file. This will allow us to use these addresses in the
future.

The mode is disabled for now since we need some build infrastructure
first to conveniently re-generate the table when the layout changes.

Bug: v8:13466
Change-Id: I975b15bd89fedf713fb7d12b4929935ece78139d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4056181
Commit-Queue: Olivier Flückiger <olivf@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84815}
This commit is contained in:
Olivier Flückiger 2022-12-13 14:37:08 +00:00 committed by V8 LUCI CQ
parent 2e9b4afa38
commit afc49f4725
27 changed files with 414 additions and 90 deletions

View File

@ -2036,6 +2036,7 @@ filegroup(
"src/roots/roots-inl.h",
"src/roots/roots.cc",
"src/roots/roots.h",
"src/roots/static-roots.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-bigint.cc",
@ -3234,6 +3235,8 @@ filegroup(
"src/snapshot/embedded/platform-embedded-file-writer-mac.h",
"src/snapshot/embedded/platform-embedded-file-writer-win.cc",
"src/snapshot/embedded/platform-embedded-file-writer-win.h",
"src/snapshot/static-roots-gen.cc",
"src/snapshot/static-roots-gen.h",
"src/snapshot/mksnapshot.cc",
"src/snapshot/snapshot-empty.cc",
],

View File

@ -3508,6 +3508,7 @@ v8_header_set("v8_internal_headers") {
"src/regexp/special-case.h",
"src/roots/roots-inl.h",
"src/roots/roots.h",
"src/roots/static-roots.h",
"src/runtime/runtime-utils.h",
"src/runtime/runtime.h",
"src/sandbox/bounded-size-inl.h",
@ -6227,6 +6228,8 @@ if (current_toolchain == v8_snapshot_toolchain) {
"src/snapshot/embedded/platform-embedded-file-writer-win.h",
"src/snapshot/mksnapshot.cc",
"src/snapshot/snapshot-empty.cc",
"src/snapshot/static-roots-gen.cc",
"src/snapshot/static-roots-gen.h",
]
if (v8_control_flow_integrity) {

View File

@ -122,6 +122,17 @@ namespace internal {
#define V8_CAN_CREATE_SHARED_HEAP_BOOL false
#endif
// Disabling WASM or INTL invalidates the contents of static-roots.h
#if defined(V8_SHARED_RO_HEAP) && defined(V8_COMPRESS_POINTERS) && \
defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && V8_ENABLE_WEBASSEMBLY && \
defined(V8_INTL_SUPPORT)
// TODO(olivf, v8:13466): Add build infra to conveniently re-regenerate the
// static roots table and then enable it.
#define V8_STATIC_ROOTS_BOOL false
#else
#define V8_STATIC_ROOTS_BOOL false
#endif
#ifdef V8_ENABLE_SANDBOX
#define V8_ENABLE_SANDBOX_BOOL true
#else

View File

@ -34,6 +34,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/flush-instruction-cache.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
@ -99,6 +100,7 @@
#include "src/profiler/heap-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/regexp/regexp-stack.h"
#include "src/roots/static-roots.h"
#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/snapshot/embedded/embedded-file-writer-interface.h"
#include "src/snapshot/read-only-deserializer.h"
@ -423,6 +425,20 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
static constexpr size_t kSeed = 0;
size_t hash = kSeed;
// Hash static entries of the roots table.
hash = base::hash_combine(hash, V8_STATIC_ROOTS_BOOL);
#if V8_STATIC_ROOTS_BOOL
hash = base::hash_combine(hash,
static_cast<int>(RootIndex::kReadOnlyRootsCount));
RootIndex i = RootIndex::kFirstReadOnlyRoot;
for (auto ptr : StaticReadOnlyRootsPointerTable) {
hash = base::hash_combine(ptr, hash);
hash = base::hash_combine(std::hash<std::string>{}(roots_table().name(i)),
hash);
++i;
}
#endif // V8_STATIC_ROOTS_BOOL
// Hash data sections of builtin code objects.
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
@ -4136,6 +4152,30 @@ VirtualMemoryCage* Isolate::GetPtrComprCodeCageForTesting() {
return V8_EXTERNAL_CODE_SPACE_BOOL ? heap_.code_range() : GetPtrComprCage();
}
// If this check fails mksnapshot needs to be built without static roots and
// then called with --static-roots to re-regenerate the static-roots.h file.
void Isolate::VerifyStaticRoots() {
#if V8_STATIC_ROOTS_BOOL
auto& roots = roots_table();
CHECK_EQ(static_cast<int>(RootIndex::kReadOnlyRootsCount),
StaticReadOnlyRootsPointerTable.size());
RootIndex idx = RootIndex::kFirstReadOnlyRoot;
ReadOnlyPage* first_page = read_only_heap()->read_only_space()->pages()[0];
for (Tagged_t cmp_ptr : StaticReadOnlyRootsPointerTable) {
Address the_root = roots[idx];
Address ptr =
V8HeapCompressionScheme::DecompressTaggedPointer(cage_base(), cmp_ptr);
CHECK_EQ(the_root, ptr);
// All roots must fit on first page, since only this page is guaranteed to
// have a stable offset from the cage base. If this ever changes we need
// to load more pages with predictable offset at
// ReadOnlySpace::InitFromMemoryDump.
CHECK(first_page->Contains(the_root));
++idx;
}
#endif // V8_STATIC_ROOTS_BOOL
}
bool Isolate::Init(SnapshotData* startup_snapshot_data,
SnapshotData* read_only_snapshot_data,
SnapshotData* shared_heap_snapshot_data, bool can_rehash) {
@ -4482,6 +4522,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
can_rehash);
startup_deserializer.DeserializeIntoIsolate();
}
if (DEBUG_BOOL) VerifyStaticRoots();
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->Initialize();

View File

@ -2040,6 +2040,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Object LocalsBlockListCacheGet(Handle<ScopeInfo> scope_info);
private:
void VerifyStaticRoots();
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
bool is_shared);
~Isolate();

View File

@ -2001,6 +2001,8 @@ DEFINE_STRING(embedded_src, nullptr,
DEFINE_STRING(
embedded_variant, nullptr,
"Label to disambiguate symbols in embedded data file. (mksnapshot only)")
DEFINE_STRING(static_roots, nullptr,
"Path for writing static-roots.h. (mksnapshot only)")
DEFINE_STRING(startup_src, nullptr,
"Write V8 startup as C++ src. (mksnapshot only)")
DEFINE_STRING(startup_blob, nullptr,

View File

@ -345,26 +345,28 @@ size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
}
base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
MemoryAllocator::AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
Executability executable,
PageSize page_size) {
#ifdef V8_COMPRESS_POINTERS
MemoryAllocator::AllocateUninitializedChunkAt(BaseSpace* space,
size_t area_size,
Executability executable,
Address hint,
PageSize page_size) {
#ifndef V8_COMPRESS_POINTERS
// When pointer compression is enabled, spaces are expected to be at a
// predictable address (see mkgrokdump) so we don't supply a hint and rely on
// the deterministic behaviour of the BoundedPageAllocator.
void* address_hint = nullptr;
#else
void* address_hint = AlignedAddress(isolate_->heap()->GetRandomMmapAddr(),
MemoryChunk::kAlignment);
if (hint == kNullAddress) {
hint = reinterpret_cast<Address>(AlignedAddress(
isolate_->heap()->GetRandomMmapAddr(), MemoryChunk::kAlignment));
}
#endif
VirtualMemory reservation;
size_t chunk_size = ComputeChunkSize(area_size, executable);
DCHECK_EQ(chunk_size % GetCommitPageSize(), 0);
Address base =
AllocateAlignedMemory(chunk_size, area_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
Address base = AllocateAlignedMemory(
chunk_size, area_size, MemoryChunk::kAlignment, executable,
reinterpret_cast<void*>(hint), &reservation);
if (base == kNullAddress) return {};
size_ += reservation.size();
@ -587,12 +589,13 @@ Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
return page;
}
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space) {
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space,
Address hint) {
DCHECK_EQ(space->identity(), RO_SPACE);
size_t size = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE);
base::Optional<MemoryChunkAllocationResult> chunk_info =
AllocateUninitializedChunk(space, size, NOT_EXECUTABLE,
PageSize::kRegular);
AllocateUninitializedChunkAt(space, size, NOT_EXECUTABLE, hint,
PageSize::kRegular);
if (!chunk_info) return nullptr;
return new (chunk_info->start) ReadOnlyPage(
isolate_->heap(), space, chunk_info->size, chunk_info->area_start,

View File

@ -194,7 +194,8 @@ class MemoryAllocator {
size_t object_size,
Executability executable);
ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space,
Address hint = kNullAddress);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
@ -293,7 +294,14 @@ class MemoryAllocator {
// the unintialized memory region.
V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
Executability executable, PageSize page_size);
Executability executable, PageSize page_size) {
return AllocateUninitializedChunkAt(space, area_size, executable,
kNullAddress, page_size);
}
V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
AllocateUninitializedChunkAt(BaseSpace* space, size_t area_size,
Executability executable, Address hint,
PageSize page_size);
// Internal raw allocation method that allocates an aligned MemoryChunk and
// sets the right memory permissions.

View File

@ -21,6 +21,7 @@
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/snapshot-utils.h"
namespace v8 {
@ -754,5 +755,46 @@ SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
pages_ = artifacts->pages();
}
void ReadOnlySpace::InitFromMemoryDump(Isolate* isolate,
SnapshotByteSource* in) {
size_t num_pages = in->GetInt();
auto cage = isolate->GetPtrComprCage();
CHECK_LT(num_pages, 10);
auto first_page = cage->base() + in->GetInt();
for (size_t i = 0; i < num_pages; ++i) {
int size = in->GetInt();
ReadOnlyPage* chunk;
if (i == 0) {
chunk =
heap()->memory_allocator()->AllocateReadOnlyPage(this, first_page);
// If this fails we probably allocated r/o space too late.
CHECK_EQ(reinterpret_cast<void*>(first_page), chunk);
} else {
chunk = heap()->memory_allocator()->AllocateReadOnlyPage(this);
}
capacity_ += AreaSize();
AccountCommitted(chunk->size());
CHECK_NOT_NULL(chunk);
CHECK_LE(chunk->area_start() + size, chunk->area_end());
in->CopyRaw(reinterpret_cast<void*>(chunk->area_start()), size);
chunk->IncreaseAllocatedBytes(size);
chunk->high_water_mark_ = (chunk->area_start() - chunk->address()) + size;
DCHECK_NE(chunk->allocated_bytes(), 0);
accounting_stats_.IncreaseCapacity(chunk->area_size());
accounting_stats_.IncreaseAllocatedBytes(chunk->allocated_bytes(), chunk);
pages_.push_back(chunk);
top_ = chunk->area_start() + size;
limit_ = chunk->area_end();
}
}
} // namespace internal
} // namespace v8

View File

@ -23,7 +23,7 @@ namespace internal {
class MemoryAllocator;
class ReadOnlyHeap;
class SnapshotData;
class SnapshotByteSource;
class ReadOnlyPage : public BasicMemoryChunk {
public:
@ -235,6 +235,8 @@ class ReadOnlySpace : public BaseSpace {
Address FirstPageAddress() const { return pages_.front()->address(); }
void InitFromMemoryDump(Isolate* isolate, SnapshotByteSource* source);
protected:
friend class SingleCopyReadOnlyArtifacts;

View File

@ -728,6 +728,20 @@ void Heap::CreateInitialReadOnlyObjects() {
Factory* factory = isolate()->factory();
ReadOnlyRoots roots(this);
// For static roots we need the r/o space to have identical layout on all
// compile targets. Varying objects are padded to their biggest size.
auto StaticRootsEnsureAllocatedSize = [&](HeapObject obj, int required) {
#ifdef V8_STATIC_ROOTS_BOOL
if (required == obj.Size()) return;
CHECK_LT(obj.Size(), required);
int filler_size = required - obj.Size();
auto filler = factory->NewFillerObject(filler_size,
AllocationAlignment::kTaggedAligned,
AllocationType::kReadOnly);
CHECK_EQ(filler->address() + filler->Size(), obj.address() + required);
#endif
};
// The -0 value must be set before NewNumber works.
set_minus_zero_value(
*factory->NewHeapNumber<AllocationType::kReadOnly>(-0.0));
@ -939,6 +953,8 @@ void Heap::CreateInitialReadOnlyObjects() {
Handle<SwissNameDictionary> empty_swiss_property_dictionary =
factory->CreateCanonicalEmptySwissNameDictionary();
set_empty_swiss_property_dictionary(*empty_swiss_property_dictionary);
StaticRootsEnsureAllocatedSize(*empty_swiss_property_dictionary,
8 * kTaggedSize);
// Allocate the empty FeedbackMetadata.
Handle<FeedbackMetadata> empty_feedback_metadata =
@ -959,8 +975,9 @@ void Heap::CreateInitialReadOnlyObjects() {
set_native_scope_info(*native_scope_info);
// Canonical off-heap trampoline data
set_off_heap_trampoline_relocation_info(
*Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
auto reloc_info = Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_);
set_off_heap_trampoline_relocation_info(*reloc_info);
StaticRootsEnsureAllocatedSize(*reloc_info, 4 * kTaggedSize);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// These roots will not be used.

View File

@ -4,9 +4,11 @@
#include "src/roots/roots.h"
#include "src/common/globals.h"
#include "src/objects/elements-kind.h"
#include "src/objects/objects-inl.h"
#include "src/objects/visitors.h"
#include "src/roots/static-roots.h"
namespace v8 {
namespace internal {
@ -68,5 +70,19 @@ Handle<HeapNumber> ReadOnlyRoots::FindHeapNumber(double value) {
return Handle<HeapNumber>();
}
void ReadOnlyRoots::InitFromStaticRootsTable(Address cage_base) {
CHECK(V8_STATIC_ROOTS_BOOL);
#if V8_STATIC_ROOTS_BOOL
RootIndex pos = RootIndex::kFirstReadOnlyRoot;
for (auto element : StaticReadOnlyRootsPointerTable) {
auto ptr =
V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, element);
*GetLocation(pos) = ptr;
++pos;
}
DCHECK_EQ(static_cast<int>(pos) - 1, RootIndex::kLastReadOnlyRoot);
#endif // V8_STATIC_ROOTS_BOOL
}
} // namespace internal
} // namespace v8

View File

@ -643,6 +643,10 @@ class ReadOnlyRoots {
// heap verification.
void Iterate(RootVisitor* visitor);
// Uncompress pointers in the static roots table and store them into the
// actual roots table.
void InitFromStaticRootsTable(Address cage_base);
private:
V8_INLINE Address first_name_for_protector() const;
V8_INLINE Address last_name_for_protector() const;

20
src/roots/static-roots.h Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ROOTS_STATIC_ROOTS_H_
#define V8_ROOTS_STATIC_ROOTS_H_
#include "src/common/globals.h"
#if V8_STATIC_ROOTS_BOOL
namespace v8 {
namespace internal {
// TODO(olivf, v8:13466): Enable and add static roots
constexpr static std::array<Tagged_t, 0> StaticReadOnlyRootsPointerTable = {};
} // namespace internal
} // namespace v8
#endif // V8_STATIC_ROOTS_BOOL
#endif // V8_ROOTS_STATIC_ROOTS_H_

View File

@ -108,28 +108,6 @@ AlignedCachedData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
bool CodeSerializer::SerializeReadOnlyObject(
HeapObject obj, const DisallowGarbageCollection& no_gc) {
if (!ReadOnlyHeap::Contains(obj)) return false;
// For objects on the read-only heap, never serialize the object, but instead
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
Address address = obj.address();
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
for (ReadOnlyPage* page : read_only_space->pages()) {
if (chunk == page) break;
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
sink_.Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
sink_.PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex");
sink_.PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset");
return true;
}
void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
ReadOnlyRoots roots(isolate());
InstanceType instance_type;
@ -139,7 +117,7 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (SerializeHotObject(raw)) return;
if (SerializeRoot(raw)) return;
if (SerializeBackReference(raw)) return;
if (SerializeReadOnlyObject(raw, no_gc)) return;
if (SerializeReadOnlyObjectReference(raw, &sink_)) return;
instance_type = raw.map().instance_type();
CHECK(!InstanceTypeChecker::IsCode(instance_type));

View File

@ -111,9 +111,6 @@ class CodeSerializer : public Serializer {
private:
void SerializeObjectImpl(Handle<HeapObject> o) override;
bool SerializeReadOnlyObject(HeapObject obj,
const DisallowGarbageCollection& no_gc);
DISALLOW_GARBAGE_COLLECTION(no_gc_)
uint32_t source_hash_;
};

View File

@ -911,6 +911,8 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
// object.
case CASE_RANGE_ALL_SPACES(kNewObject): {
SnapshotSpace space = NewObject::Decode(data);
DCHECK_IMPLIES(V8_STATIC_ROOTS_BOOL,
space != SnapshotSpace::kReadOnlyHeap);
// Save the reference type before recursing down into reading the object.
HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
Handle<HeapObject> heap_object = ReadObject(space);
@ -925,9 +927,12 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
}
// Reference an object in the read-only heap. This should be used when an
// object is read-only, but is not a root.
// object is read-only, but is not a root. Except with static roots we
// always use this reference to refer to read only objects since they are
// created by loading a memory dump of r/o space.
case kReadOnlyHeapRef: {
DCHECK(isolate()->heap()->deserialization_complete());
DCHECK(isolate()->heap()->deserialization_complete() ||
V8_STATIC_ROOTS_BOOL);
uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt();
@ -964,6 +969,7 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
// Find an object in the read-only object cache and write a pointer to it
// to the current object.
case kReadOnlyObjectCache: {
DCHECK(!V8_STATIC_ROOTS_BOOL);
int cache_index = source_.GetInt();
// TODO(leszeks): Could we use the address of the cached_read_only_object
// entry as a Handle backing?

View File

@ -18,6 +18,7 @@
#include "src/flags/flags.h"
#include "src/snapshot/embedded/embedded-file-writer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/static-roots-gen.h"
namespace {
@ -230,7 +231,7 @@ int main(int argc, char** argv) {
std::string usage = "Usage: " + std::string(argv[0]) +
" [--startup-src=file]" + " [--startup-blob=file]" +
" [--embedded-src=file]" + " [--embedded-variant=label]" +
" [--target-arch=arch]" +
" [--static-roots=file]" + " [--target-arch=arch]" +
" [--target-os=os] [extras]\n\n";
int result = i::FlagList::SetFlagsFromCommandLine(
&argc, argv, true, HelpOptions(HelpOptions::kExit, usage.c_str()));
@ -289,6 +290,10 @@ int main(int argc, char** argv) {
// is still alive (we called DisableEmbeddedBlobRefcounting above).
// That's fine as far as the embedded file writer is concerned.
WriteEmbeddedFile(&embedded_writer);
if (i::v8_flags.static_roots) {
i::StaticRootsTableGen::write(i_isolate, i::v8_flags.static_roots);
}
}
if (warmup_script) {

View File

@ -5,10 +5,12 @@
#include "src/snapshot/read-only-deserializer.h"
#include "src/api/api.h"
#include "src/common/globals.h"
#include "src/execution/v8threads.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/read-only-heap.h"
#include "src/objects/slots.h"
#include "src/roots/static-roots.h"
namespace v8 {
namespace internal {
@ -31,20 +33,25 @@ void ReadOnlyDeserializer::DeserializeIntoIsolate() {
{
ReadOnlyRoots roots(isolate());
if (V8_STATIC_ROOTS_BOOL) {
ro_heap->read_only_space()->InitFromMemoryDump(isolate(), source());
roots.InitFromStaticRootsTable(isolate()->cage_base());
ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
} else {
roots.Iterate(this);
roots.Iterate(this);
ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
// Deserialize the Read-only Object Cache.
for (;;) {
Object* object = ro_heap->ExtendReadOnlyObjectCache();
// During deserialization, the visitor populates the read-only object
// cache and eventually terminates the cache with undefined.
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
FullObjectSlot(object));
if (object->IsUndefined(roots)) break;
// Deserialize the Read-only Object Cache.
for (;;) {
Object* object = ro_heap->ExtendReadOnlyObjectCache();
// During deserialization, the visitor populates the read-only object
// cache and eventually terminates the cache with undefined.
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
FullObjectSlot(object));
if (object->IsUndefined(roots)) break;
}
DeserializeDeferredObjects();
}
DeserializeDeferredObjects();
#ifdef DEBUG
roots.VerifyNameForProtectors();
#endif
@ -53,6 +60,29 @@ void ReadOnlyDeserializer::DeserializeIntoIsolate() {
if (should_rehash()) {
isolate()->heap()->InitializeHashSeed();
RehashReadOnly();
}
}
void ReadOnlyDeserializer::RehashReadOnly() {
DCHECK(should_rehash());
if (V8_STATIC_ROOTS_BOOL) {
// Since we are not deserializing individual objects we need to scan the
// heap and search for the ones that need rehashing.
ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
PtrComprCageBase cage_base(isolate());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
auto instance_type = object.map(cage_base).instance_type();
if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
auto str = String::cast(object);
str.set_raw_hash_field(Name::kEmptyHashField);
str.EnsureHash();
} else if (object.NeedsRehashing(instance_type)) {
object.RehashBasedOnMap(isolate());
}
}
} else {
Rehash();
}
}

View File

@ -22,6 +22,8 @@ class ReadOnlyDeserializer final : public Deserializer<Isolate> {
// Deserialize the snapshot into an empty heap.
void DeserializeIntoIsolate();
void RehashReadOnly();
};
} // namespace internal

View File

@ -33,6 +33,7 @@ ReadOnlySerializer::~ReadOnlySerializer() {
void ReadOnlySerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
CHECK(ReadOnlyHeap::Contains(*obj));
CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
DCHECK(!V8_STATIC_ROOTS_BOOL);
// There should be no references to the not_mapped_symbol except for the entry
// in the root table, so don't try to serialize a reference and rely on the
@ -73,36 +74,56 @@ void ReadOnlySerializer::SerializeReadOnlyRoots() {
CHECK_IMPLIES(!allow_active_isolate_for_testing(),
isolate()->handle_scope_implementer()->blocks()->empty());
ReadOnlyRoots(isolate()).Iterate(this);
if (reconstruct_read_only_and_shared_object_caches_for_testing()) {
ReconstructReadOnlyObjectCacheForTesting();
if (!V8_STATIC_ROOTS_BOOL) {
ReadOnlyRoots(isolate()).Iterate(this);
if (reconstruct_read_only_and_shared_object_caches_for_testing()) {
ReconstructReadOnlyObjectCacheForTesting();
}
}
}
void ReadOnlySerializer::FinalizeSerialization() {
// This comes right after serialization of the other snapshots, where we
// add entries to the read-only object cache. Add one entry with 'undefined'
// to terminate the read-only object cache.
Object undefined = ReadOnlyRoots(isolate()).undefined_value();
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
FullObjectSlot(&undefined));
SerializeDeferredObjects();
Pad();
if (V8_STATIC_ROOTS_BOOL) {
DCHECK(object_cache_empty());
DCHECK(deferred_objects_empty());
DCHECK_EQ(sink_.Position(), 0);
auto space = isolate()->read_only_heap()->read_only_space();
size_t num_pages = space->pages().size();
sink_.PutInt(num_pages, "num pages");
Tagged_t pos = V8HeapCompressionScheme::CompressTagged(
reinterpret_cast<Address>(space->pages()[0]));
sink_.PutInt(pos, "first page offset");
for (auto p : space->pages()) {
size_t page_size = p->area_size();
sink_.PutInt(page_size, "page size");
sink_.PutRaw(reinterpret_cast<const byte*>(p->area_start()),
static_cast<int>(p->area_size()), "page");
}
} else {
// This comes right after serialization of the other snapshots, where we
// add entries to the read-only object cache. Add one entry with 'undefined'
// to terminate the read-only object cache.
Object undefined = ReadOnlyRoots(isolate()).undefined_value();
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
FullObjectSlot(&undefined));
SerializeDeferredObjects();
Pad();
#ifdef DEBUG
// Check that every object on read-only heap is reachable (and was
// serialized).
ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
if (IsNotMappedSymbol(object)) {
CHECK(did_serialize_not_mapped_symbol_);
} else {
CHECK_NOT_NULL(serialized_objects_.Find(object));
// Check that every object on read-only heap is reachable (and was
// serialized).
ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
if (IsNotMappedSymbol(object)) {
CHECK(did_serialize_not_mapped_symbol_);
} else {
CHECK_NOT_NULL(serialized_objects_.Find(object));
}
}
#endif // DEBUG
}
#endif
}
bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
@ -122,13 +143,16 @@ bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
SnapshotByteSink* sink, Handle<HeapObject> obj) {
if (!ReadOnlyHeap::Contains(*obj)) return false;
// Get the cache index and serialize it into the read-only snapshot if
// necessary.
int cache_index = SerializeInObjectCache(obj);
// Writing out the cache entry into the calling serializer's sink.
sink->Put(kReadOnlyObjectCache, "ReadOnlyObjectCache");
sink->PutInt(cache_index, "read_only_object_cache_index");
if (V8_STATIC_ROOTS_BOOL) {
SerializeReadOnlyObjectReference(*obj, sink);
} else {
// Get the cache index and serialize it into the read-only snapshot if
// necessary.
int cache_index = SerializeInObjectCache(obj);
// Writing out the cache entry into the calling serializer's sink.
sink->Put(kReadOnlyObjectCache, "ReadOnlyObjectCache");
sink->PutInt(cache_index, "read_only_object_cache_index");
}
return true;
}

View File

@ -46,6 +46,8 @@ class RootsSerializer : public Serializer {
// Serializes |object| if not previously seen and returns its cache index.
int SerializeInObjectCache(Handle<HeapObject> object);
bool object_cache_empty() { return object_cache_index_map_.size() == 0; }
private:
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;

View File

@ -1370,5 +1370,28 @@ Handle<FixedArray> ObjectCacheIndexMap::Values(Isolate* isolate) {
return externals;
}
bool Serializer::SerializeReadOnlyObjectReference(HeapObject obj,
SnapshotByteSink* sink) {
if (!ReadOnlyHeap::Contains(obj)) return false;
// For objects on the read-only heap, never serialize the object, but instead
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
Address address = obj.address();
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
DCHECK(!read_only_space->writable());
for (ReadOnlyPage* page : read_only_space->pages()) {
if (chunk == page) break;
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
sink->Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
sink->PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex");
sink->PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset");
return true;
}
} // namespace internal
} // namespace v8

View File

@ -266,6 +266,8 @@ class Serializer : public SerializerDeserializer {
return external_reference_encoder_.TryEncode(addr);
}
bool SerializeReadOnlyObjectReference(HeapObject obj, SnapshotByteSink* sink);
// GetInt reads 4 bytes at once, requiring padding at the end.
// Use padding_offset to specify the space you want to use after padding.
void Pad(int padding_offset = 0);
@ -317,6 +319,8 @@ class Serializer : public SerializerDeserializer {
Snapshot::kReconstructReadOnlyAndSharedObjectCachesForTesting) != 0;
}
bool deferred_objects_empty() { return deferred_objects_.size() == 0; }
private:
// A circular queue of hot objects. This is added to in the same order as in
// Deserializer::HotObjectsList, but this stores the objects as an array of

View File

@ -0,0 +1,58 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/static-roots-gen.h"
#include <fstream>
#include "src/common/ptr-compr-inl.h"
#include "src/execution/isolate.h"
#include "src/roots/roots-inl.h"
#include "src/roots/roots.h"
namespace v8 {
namespace internal {
void StaticRootsTableGen::write(Isolate* isolate, const char* file) {
CHECK(file);
static_assert(static_cast<int>(RootIndex::kFirstReadOnlyRoot) == 0);
std::ofstream out(file);
const auto roots = isolate->roots_table();
const auto size = static_cast<int>(RootIndex::kReadOnlyRootsCount);
out << "// Copyright 2022 the V8 project authors. All rights reserved.\n"
<< "// Use of this source code is governed by a BSD-style license "
"that can be\n"
<< "// found in the LICENSE file.\n"
<< "\n"
<< "#ifndef V8_ROOTS_STATIC_ROOTS_H_\n"
<< "#define V8_ROOTS_STATIC_ROOTS_H_\n"
<< "\n"
<< "#include \"src/common/globals.h\"\n"
<< "#if V8_STATIC_ROOTS_BOOL\n"
<< "\n"
<< "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n"
<< "constexpr static std::array<Tagged_t, " << size
<< "> StaticReadOnlyRootsPointerTable = {\n";
auto pos = RootIndex::kFirstReadOnlyRoot;
for (; pos <= RootIndex::kLastReadOnlyRoot; ++pos) {
auto el = roots[pos];
auto n = roots.name(pos);
el = V8HeapCompressionScheme::CompressTagged(el);
out << " " << reinterpret_cast<void*>(el) << ", // " << n << "\n";
}
CHECK_EQ(static_cast<int>(pos), size);
out << "};\n"
<< "\n"
<< "} // namespace internal\n"
<< "} // namespace v8\n"
<< "#endif // V8_STATIC_ROOTS_BOOL\n"
<< "#endif // V8_ROOTS_STATIC_ROOTS_H_\n";
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,21 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_STATIC_ROOTS_GEN_H_
#define V8_SNAPSHOT_STATIC_ROOTS_GEN_H_
namespace v8 {
namespace internal {
class Isolate;
class StaticRootsTableGen {
public:
static void write(Isolate* isolate, const char* file);
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_STATIC_ROOTS_GEN_H_

View File

@ -166,7 +166,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(hint, AlignedAddress(hint, alignment));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
if (v8_flags.randomize_all_allocations) {
if (!hint && v8_flags.randomize_all_allocations) {
hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
}
void* result = nullptr;