[snapshot] Remove the builtins snapshot

Now that lazy deserialization has been removed, we can roll back all
the mechanisms we introduced to support lazy single-builtin
deserialization.

This CL moves serialized builtin code objects (i.e.
off-heap-trampolines in most cases) back into the startup snapshot.
Support classes for builtin serialization and deserialization, as well
as the builtins snapshot itself are removed. Templatization on the
allocator class is removed as well.

Tbr: delphick@chromium.org
Bug: v8:6666, v8:7990
Change-Id: I2a910f8d3278b7e27b5f18ad408361ebd18871cc
Reviewed-on: https://chromium-review.googlesource.com/c/1304539
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57160}
This commit is contained in:
Jakob Gruber 2018-10-31 10:09:54 +01:00 committed by Commit Bot
parent c2f9da8246
commit 4ef0e79cba
40 changed files with 174 additions and 1258 deletions

View File

@ -2476,14 +2476,6 @@ v8_source_set("v8_base") {
"src/simulator-base.cc",
"src/simulator-base.h",
"src/simulator.h",
"src/snapshot/builtin-deserializer-allocator.cc",
"src/snapshot/builtin-deserializer-allocator.h",
"src/snapshot/builtin-deserializer.cc",
"src/snapshot/builtin-deserializer.h",
"src/snapshot/builtin-serializer-allocator.cc",
"src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",

View File

@ -78,7 +78,6 @@
#include "src/runtime-profiler.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h"
#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-serializer.h"
@ -833,12 +832,6 @@ StartupData SnapshotCreator::CreateBlob(
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
// Builtin serialization places additional objects into the partial snapshot
// cache and thus needs to happen before SerializeWeakReferencesAndDeferred
// is called below.
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
@ -847,10 +840,9 @@ StartupData SnapshotCreator::CreateBlob(
i::SnapshotData read_only_snapshot(&read_only_serializer);
i::SnapshotData startup_snapshot(&startup_serializer);
i::BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
StartupData result = i::Snapshot::CreateSnapshotBlob(
&startup_snapshot, &builtin_snapshot, &read_only_snapshot,
context_snapshots, can_be_rehashed);
StartupData result =
i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &read_only_snapshot,
context_snapshots, can_be_rehashed);
// Delete heap-allocated context snapshot instances.
for (const auto context_snapshot : context_snapshots) {

View File

@ -82,7 +82,7 @@ class Builtins {
Handle<Code> NewFunctionContext(ScopeType scope_type);
Handle<Code> JSConstructStubGeneric();
// Used by BuiltinDeserializer and CreateOffHeapTrampolines in isolate.cc.
// Used by CreateOffHeapTrampolines in isolate.cc.
void set_builtin(int index, HeapObject* builtin);
Code* builtin(int index);

View File

@ -2930,7 +2930,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@ -3806,6 +3806,10 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
if (!isMinorGC) {
IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
// Currently we iterate the dispatch table to update pointers to possibly
// moved Code objects for bytecode handlers.
// TODO(v8:6666): Remove iteration once builtins are embedded (and thus
// immovable) in every build configuration.
isolate_->interpreter()->IterateDispatchTable(v);
v->Synchronize(VisitorSynchronization::kDispatchTable);
}

View File

@ -99,15 +99,13 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
}
void Interpreter::IterateDispatchTable(RootVisitor* v) {
Heap* heap = isolate_->heap();
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
// If the handler is embedded, it is immovable.
if (InstructionStream::PcIsOffHeap(isolate_, code_entry)) continue;
Object* code = code_entry == kNullAddress
? nullptr
: Code::GetCodeFromTargetAddress(code_entry);
: heap->GcSafeFindCodeForInnerPointer(code_entry);
Object* old_code = code;
v->VisitRootPointer(Root::kDispatchTable, nullptr, ObjectSlot(&code));
if (code != old_code) {

View File

@ -19,7 +19,6 @@ namespace v8 {
namespace internal {
class Isolate;
class BuiltinDeserializerAllocator;
class Callable;
class UnoptimizedCompilationJob;
class FunctionLiteral;
@ -82,7 +81,6 @@ class Interpreter {
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
friend class v8::internal::BuiltinDeserializerAllocator;
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;

View File

@ -3044,8 +3044,7 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
// Note that references to the old, on-heap code objects may still exist on
// the heap. This is fine for the sake of serialization, as serialization
// will replace all of them with a builtin reference which is later
// deserialized to point to the object within the builtins table.
// will canonicalize all builtins in MaybeCanonicalizeBuiltin().
//
// From this point onwards, some builtin code objects may be unreachable and
// thus collected by the GC.

View File

@ -370,23 +370,17 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info,
}
inline bool Code::is_interpreter_trampoline_builtin() const {
Builtins* builtins = GetIsolate()->builtins();
Code* interpreter_entry_trampoline =
builtins->builtin(Builtins::kInterpreterEntryTrampoline);
bool is_interpreter_trampoline =
(builtin_index() == interpreter_entry_trampoline->builtin_index() ||
this == builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch));
(builtin_index() == Builtins::kInterpreterEntryTrampoline ||
builtin_index() == Builtins::kInterpreterEnterBytecodeAdvance ||
builtin_index() == Builtins::kInterpreterEnterBytecodeDispatch);
return is_interpreter_trampoline;
}
inline bool Code::checks_optimization_marker() const {
Builtins* builtins = GetIsolate()->builtins();
Code* interpreter_entry_trampoline =
builtins->builtin(Builtins::kInterpreterEntryTrampoline);
bool checks_marker =
(this == builtins->builtin(Builtins::kCompileLazy) ||
builtin_index() == interpreter_entry_trampoline->builtin_index());
(builtin_index() == Builtins::kCompileLazy ||
builtin_index() == Builtins::kInterpreterEntryTrampoline);
return checks_marker ||
(kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());
}

View File

@ -1,105 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
namespace internal {
using interpreter::Bytecodes;
using interpreter::Interpreter;
BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int code_object_id = deserializer()->CurrentCodeObjectId();
DCHECK_NE(BuiltinDeserializer::kNoCodeObjectId, code_object_id);
DCHECK_EQ(CODE_SPACE, space);
DCHECK_EQ(deserializer()->ExtractCodeObjectSize(code_object_id), size);
#ifdef DEBUG
RegisterCodeObjectAllocation(code_object_id);
#endif
DCHECK(Builtins::IsBuiltinId(code_object_id));
Object* obj = isolate()->builtins()->builtin(code_object_id);
DCHECK(Internals::HasHeapObjectTag(reinterpret_cast<Address>(obj)));
return HeapObject::cast(obj)->address();
}
Heap::Reservation
BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
Heap::Reservation result;
// Reservations for builtins.
for (int i = 0; i < Builtins::builtin_count; i++) {
uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
return result;
}
void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
const Heap::Chunk& chunk, int builtin_id) {
DCHECK_EQ(deserializer()->ExtractCodeObjectSize(builtin_id), chunk.size);
DCHECK_EQ(chunk.size, chunk.end - chunk.start);
SkipList::Update(chunk.start, chunk.size);
isolate()->builtins()->set_builtin(builtin_id,
HeapObject::FromAddress(chunk.start));
#ifdef DEBUG
RegisterCodeObjectReservation(builtin_id);
#endif
}
void BuiltinDeserializerAllocator::InitializeFromReservations(
const Heap::Reservation& reservation) {
DCHECK(!AllowHeapAllocation::IsAllowed());
// Initialize the builtins table.
for (int i = 0; i < Builtins::builtin_count; i++) {
InitializeBuiltinFromReservation(reservation[i], i);
}
}
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {
const auto result = unused_reservations_.emplace(code_object_id);
CHECK(result.second); // False, iff builtin_id was already present in set.
}
void BuiltinDeserializerAllocator::RegisterCodeObjectAllocation(
int code_object_id) {
const size_t removed_elems = unused_reservations_.erase(code_object_id);
CHECK_EQ(removed_elems, 1);
}
bool BuiltinDeserializerAllocator::ReservationsAreFullyUsed() const {
// Not 100% precise but should be good enough.
return unused_reservations_.empty();
}
#endif // DEBUG
Isolate* BuiltinDeserializerAllocator::isolate() const {
return deserializer()->isolate();
}
BuiltinDeserializer* BuiltinDeserializerAllocator::deserializer() const {
return static_cast<BuiltinDeserializer*>(deserializer_);
}
} // namespace internal
} // namespace v8

View File

@ -1,121 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
#include <unordered_set>
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
template <class AllocatorT>
class Deserializer;
class BuiltinDeserializer;
class BuiltinDeserializerAllocator final {
using Bytecode = interpreter::Bytecode;
using OperandScale = interpreter::OperandScale;
public:
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
// Allocation works differently here than in other deserializers. Instead of
// a statically-known memory area determined at serialization-time, our
// memory requirements here are determined at runtime. Another major
// difference is that we create builtin Code objects up-front (before
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
// When reserving / allocating space, required objects are requested from the
// GC prior to deserialization. Pre-allocated builtin code objects are written
// into the builtins table (this is to make deserialization of builtin
// references easier).
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeFromReservations.
Address Allocate(AllocationSpace space, int size);
void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
void SetAlignment(AllocationAlignment alignment) { UNREACHABLE(); }
void set_next_reference_is_weak(bool next_reference_is_weak) {
next_reference_is_weak_ = next_reference_is_weak;
}
bool GetAndClearNextReferenceIsWeak() {
bool saved = next_reference_is_weak_;
next_reference_is_weak_ = false;
return saved;
}
#ifdef DEBUG
bool next_reference_is_weak() const { return next_reference_is_weak_; }
#endif
HeapObject* GetMap(uint32_t index) { UNREACHABLE(); }
HeapObject* GetLargeObject(uint32_t index) { UNREACHABLE(); }
HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
uint32_t chunk_offset) {
UNREACHABLE();
}
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
// Builtin deserialization does not bake reservations into the snapshot, hence
// this is a nop.
void DecodeReservation(const std::vector<SerializedData::Reservation>& res) {}
// These methods are used to pre-allocate builtin objects prior to
// deserialization.
// TODO(jgruber): Refactor reservation/allocation logic in deserializers to
// make this less messy.
Heap::Reservation CreateReservationsForEagerBuiltins();
void InitializeFromReservations(const Heap::Reservation& reservation);
#ifdef DEBUG
bool ReservationsAreFullyUsed() const;
#endif
private:
Isolate* isolate() const;
BuiltinDeserializer* deserializer() const;
// Used after memory allocation prior to isolate initialization, to register
// the newly created object in code space and add it to the builtins table.
void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
int builtin_id);
#ifdef DEBUG
void RegisterCodeObjectReservation(int code_object_id);
void RegisterCodeObjectAllocation(int code_object_id);
std::unordered_set<int> unused_reservations_;
#endif
private:
// The current deserializer. Note that this always points to a
// BuiltinDeserializer instance, but we can't perform the cast during
// construction since that makes vtable-based checks fail.
Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
bool next_reference_is_weak_ = false;
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_

View File

@ -1,135 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/builtin-deserializer.h"
#include "src/assembler-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
using interpreter::Bytecodes;
using interpreter::Interpreter;
// Tracks the code object currently being deserialized (required for
// allocation).
class DeserializingCodeObjectScope {
public:
DeserializingCodeObjectScope(BuiltinDeserializer* builtin_deserializer,
int code_object_id)
: builtin_deserializer_(builtin_deserializer) {
DCHECK_EQ(BuiltinDeserializer::kNoCodeObjectId,
builtin_deserializer->current_code_object_id_);
builtin_deserializer->current_code_object_id_ = code_object_id;
}
~DeserializingCodeObjectScope() {
builtin_deserializer_->current_code_object_id_ =
BuiltinDeserializer::kNoCodeObjectId;
}
private:
BuiltinDeserializer* builtin_deserializer_;
DISALLOW_COPY_AND_ASSIGN(DeserializingCodeObjectScope)
};
BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
code_offsets_ = data->BuiltinOffsets();
DCHECK_EQ(Builtins::builtin_count, code_offsets_.length());
DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
void BuiltinDeserializer::DeserializeEagerBuiltins() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
// Deserialize builtins.
Builtins* builtins = isolate()->builtins();
for (int i = 0; i < Builtins::builtin_count; i++) {
Code* code = DeserializeBuiltinRaw(i);
builtins->set_builtin(i, code);
}
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
for (int i = 0; i < Builtins::builtin_count; i++) {
Code* code = builtins->builtin(i);
const char* name = Builtins::name(i);
code->PrintBuiltinCode(isolate(), name);
}
}
#endif
}
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
DeserializingCodeObjectScope scope(this, builtin_id);
const int initial_position = source()->position();
source()->set_position(code_offsets_[builtin_id]);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
// Rewind.
source()->set_position(initial_position);
// Flush the instruction cache.
Code* code = Code::cast(o);
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
CodeEventListener::LogEventsAndTags code_tag;
switch (code->kind()) {
case Code::BUILTIN:
code_tag = CodeEventListener::BUILTIN_TAG;
break;
case Code::BYTECODE_HANDLER:
code_tag = CodeEventListener::BYTECODE_HANDLER_TAG;
break;
default:
UNREACHABLE();
}
PROFILE(isolate(), CodeCreateEvent(code_tag, AbstractCode::cast(code),
Builtins::name(builtin_id)));
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(
code->raw_instruction_start(),
ByteArray::cast(code->source_position_table())));
return code;
}
uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
DCHECK_LT(code_object_id, Builtins::builtin_count);
const int initial_position = source()->position();
// Grab the size of the code object.
source()->set_position(code_offsets_[code_object_id]);
byte data = source()->Get();
USE(data);
DCHECK_EQ(kNewObject | kPlain | kStartOfObject | CODE_SPACE, data);
const uint32_t result = source()->GetInt() << kObjectAlignmentBits;
// Rewind.
source()->set_position(initial_position);
return result;
}
} // namespace internal
} // namespace v8

View File

@ -1,74 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
namespace internal {
class BuiltinSnapshotData;
// Deserializes the builtins blob.
class BuiltinDeserializer final
: public Deserializer<BuiltinDeserializerAllocator> {
using Bytecode = interpreter::Bytecode;
using OperandScale = interpreter::OperandScale;
public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
// Builtins deserialization is tightly integrated with deserialization of the
// startup blob. In particular, we need to ensure that no GC can occur
// between startup- and builtins deserialization, as all builtins have been
// pre-allocated and their pointers may not be invalidated.
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
void DeserializeEagerBuiltins();
private:
// Deserializes the single given builtin. Assumes that reservations have
// already been allocated.
Code* DeserializeBuiltinRaw(int builtin_id);
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractCodeObjectSize(int builtin_id);
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
void VisitRootPointers(Root root, const char* description, ObjectSlot start,
ObjectSlot end) override {
UNREACHABLE();
}
int CurrentCodeObjectId() const { return current_code_object_id_; }
private:
// Stores the code object currently being deserialized. The
// {current_code_object_id} stores the index of the currently-deserialized
// code object within the snapshot (and within {code_offsets_}). We need this
// to determine where to 'allocate' from during deserialization.
static const int kNoCodeObjectId = -1;
int current_code_object_id_ = kNoCodeObjectId;
// The offsets of each builtin within the serialized data. Equivalent to
// BuiltinSerializer::builtin_offsets_ but on the deserialization side.
Vector<const uint32_t> code_offsets_;
// For current_code_object_id_.
friend class DeserializingCodeObjectScope;
// For isolate(), CurrentCodeObjectId() and ExtractBuiltinSize().
friend class BuiltinDeserializerAllocator;
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_

View File

@ -1,61 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
SerializerReference BuiltinSerializerAllocator::Allocate(AllocationSpace space,
uint32_t size) {
DCHECK_EQ(space, CODE_SPACE);
DCHECK_GT(size, 0);
// Builtin serialization & deserialization does not use the reservation
// system. Instead of worrying about chunk indices and offsets, we simply
// need to generate unique offsets here.
const auto ref =
SerializerReference::BuiltinReference(next_builtin_reference_index_);
allocated_bytes_ += size;
next_builtin_reference_index_++;
return ref;
}
#ifdef DEBUG
bool BuiltinSerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_builtin_reference());
return reference.builtin_index() < next_builtin_reference_index_;
}
#endif // DEBUG
std::vector<SerializedData::Reservation>
BuiltinSerializerAllocator::EncodeReservations() const {
return std::vector<SerializedData::Reservation>();
}
void BuiltinSerializerAllocator::OutputStatistics() {
DCHECK(FLAG_serialization_statistics);
PrintF(" Spaces (bytes):\n");
for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
uint32_t space_size = (space == CODE_SPACE) ? allocated_bytes_ : 0;
PrintF("%16d", space_size);
}
PrintF("\n");
}
} // namespace internal
} // namespace v8

View File

@ -1,50 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
template <class AllocatorT>
class Serializer;
class BuiltinSerializerAllocator final {
public:
BuiltinSerializerAllocator(
Serializer<BuiltinSerializerAllocator>* serializer) {}
SerializerReference Allocate(AllocationSpace space, uint32_t size);
SerializerReference AllocateMap() { UNREACHABLE(); }
SerializerReference AllocateLargeObject(uint32_t size) { UNREACHABLE(); }
SerializerReference AllocateOffHeapBackingStore() { UNREACHABLE(); }
#ifdef DEBUG
bool BackReferenceIsAlreadyAllocated(
SerializerReference back_reference) const;
#endif
std::vector<SerializedData::Reservation> EncodeReservations() const;
void OutputStatistics();
private:
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
uint32_t allocated_bytes_ = 0;
uint32_t next_builtin_reference_index_ = 0;
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializerAllocator)
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_

View File

@ -1,104 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/builtin-serializer.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/startup-serializer.h"
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::OperandScale;
BuiltinSerializer::BuiltinSerializer(Isolate* isolate,
StartupSerializer* startup_serializer)
: Serializer(isolate), startup_serializer_(startup_serializer) {}
BuiltinSerializer::~BuiltinSerializer() {
OutputStatistics("BuiltinSerializer");
}
void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
// Serialize builtins.
for (int i = 0; i < Builtins::builtin_count; i++) {
Code* code = isolate()->builtins()->builtin(i);
SetBuiltinOffset(i, sink_.Position());
SerializeBuiltin(code);
}
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
int data_length = static_cast<int>(sizeof(code_offsets_));
// Pad with kNop since GetInt() might read too far.
Pad(data_length);
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
void BuiltinSerializer::VisitRootPointers(Root root, const char* description,
ObjectSlot start, ObjectSlot end) {
UNREACHABLE(); // We iterate manually in SerializeBuiltins.
}
void BuiltinSerializer::SerializeBuiltin(Code* code) {
DCHECK_GE(code->builtin_index(), 0);
// All builtins are serialized unconditionally when the respective builtin is
// reached while iterating the builtins list. A builtin seen at any other
// time (e.g. startup snapshot creation, or while iterating a builtin code
// object during builtin serialization) is serialized by reference - see
// BuiltinSerializer::SerializeObject below.
ObjectSerializer object_serializer(this, code, &sink_, kPlain,
kStartOfObject);
object_serializer.Serialize();
}
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
// Roots can simply be serialized as root references.
if (SerializeRoot(o, how_to_code, where_to_point, skip)) return;
// Builtins are serialized using a dedicated bytecode. We only reach this
// point if encountering a Builtin e.g. while iterating the body of another
// builtin.
if (SerializeBuiltinReference(o, how_to_code, where_to_point, skip)) return;
// Embedded objects are serialized as part of the read-only object and partial
// snapshot caches.
// Currently we expect to see:
// * Code: Jump targets.
// * ByteArrays: Relocation infos.
// * FixedArrays: Handler tables.
// * Strings: CSA_ASSERTs in debug builds, various other string constants.
// * HeapNumbers: Embedded constants.
// TODO(6624): Jump targets should never trigger content serialization, it
// should always result in a reference instead. Reloc infos and handler tables
// should not end up in the partial snapshot cache.
if (startup_serializer_->SerializeUsingReadOnlyObjectCache(
&sink_, o, how_to_code, where_to_point, skip)) {
return;
}
startup_serializer_->SerializeUsingPartialSnapshotCache(
&sink_, o, how_to_code, where_to_point, skip);
}
void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
code_offsets_[builtin_id] = offset;
}
} // namespace internal
} // namespace v8

View File

@ -1,59 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#include "src/builtins/builtins.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/serializer.h"
namespace v8 {
namespace internal {
class StartupSerializer;
// Responsible for serializing builtin objects during startup snapshot creation
// into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
void SerializeBuiltinsAndHandlers();
private:
void VisitRootPointers(Root root, const char* description, ObjectSlot start,
ObjectSlot end) override;
void SerializeBuiltin(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void SetBuiltinOffset(int builtin_id, uint32_t offset);
void SetHandlerOffset(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale,
uint32_t offset);
// The startup serializer is needed for access to the partial snapshot cache,
// which is used to serialize things like embedded constants.
StartupSerializer* startup_serializer_;
// Stores the starting offset, within the serialized data, of each code
// object. This is later packed into the builtin snapshot, and used by the
// builtin deserializer to deserialize individual builtins.
//
// Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
// Builtin offsets.
uint32_t code_offsets_[Builtins::builtin_count];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_H_

View File

@ -140,17 +140,13 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
case Code::NUMBER_OF_KINDS: // Pseudo enum value.
case Code::BYTECODE_HANDLER: // No direct references to handlers.
break; // hit UNREACHABLE below.
case Code::BUILTIN:
SerializeBuiltinReference(code_object, how_to_code, where_to_point, 0);
return;
case Code::STUB:
if (code_object->builtin_index() == -1) {
SerializeCodeStub(code_object, how_to_code, where_to_point);
return SerializeCodeStub(code_object, how_to_code, where_to_point);
} else {
SerializeBuiltinReference(code_object, how_to_code, where_to_point,
0);
return SerializeCodeObject(code_object, how_to_code, where_to_point);
}
return;
case Code::BUILTIN:
default:
return SerializeCodeObject(code_object, how_to_code, where_to_point);
}

View File

@ -42,7 +42,7 @@ class ScriptData {
DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
class CodeSerializer : public Serializer<> {
class CodeSerializer : public Serializer {
public:
static ScriptCompiler::CachedData* Serialize(Handle<SharedFunctionInfo> info);

View File

@ -5,7 +5,6 @@
#include "src/snapshot/default-deserializer-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/deserializer.h"
#include "src/snapshot/startup-deserializer.h"
@ -13,7 +12,7 @@ namespace v8 {
namespace internal {
DefaultDeserializerAllocator::DefaultDeserializerAllocator(
Deserializer<DefaultDeserializerAllocator>* deserializer)
Deserializer* deserializer)
: deserializer_(deserializer) {}
// We know the space requirements before deserialization and can
@ -149,72 +148,6 @@ bool DefaultDeserializerAllocator::ReserveSpace() {
return true;
}
// static
bool DefaultDeserializerAllocator::ReserveSpace(
StartupDeserializer* startup_deserializer,
BuiltinDeserializer* builtin_deserializer) {
Isolate* isolate = startup_deserializer->isolate();
// Create a set of merged reservations to reserve space in one go. The
// BuiltinDeserializer's reservations are ignored. Instead, we manually
// determine the required code-space.
Heap::Reservation merged_reservations[kNumberOfSpaces];
for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
merged_reservations[i] =
startup_deserializer->allocator()->reservations_[i];
}
Heap::Reservation builtin_reservations =
builtin_deserializer->allocator()->CreateReservationsForEagerBuiltins();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
merged_reservations[CODE_SPACE].push_back(c);
}
if (!isolate->heap()->ReserveSpace(
merged_reservations,
&startup_deserializer->allocator()->allocated_maps_)) {
return false;
}
DisallowHeapAllocation no_allocation;
// Distribute the successful allocations between both deserializers.
// There's nothing to be done here except for code space.
{
const int num_builtin_reservations =
static_cast<int>(builtin_reservations.size());
for (int i = num_builtin_reservations - 1; i >= 0; i--) {
const auto& c = merged_reservations[CODE_SPACE].back();
DCHECK_EQ(c.size, builtin_reservations[i].size);
DCHECK_EQ(c.size, c.end - c.start);
builtin_reservations[i].start = c.start;
builtin_reservations[i].end = c.end;
merged_reservations[CODE_SPACE].pop_back();
}
builtin_deserializer->allocator()->InitializeFromReservations(
builtin_reservations);
}
// Write back startup reservations.
for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
startup_deserializer->allocator()->reservations_[i].swap(
merged_reservations[i]);
}
for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
startup_deserializer->allocator()->high_water_[i] =
startup_deserializer->allocator()->reservations_[i][0].start;
}
return true;
}
bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
const uint32_t chunk_index = current_chunk_[space];

View File

@ -12,16 +12,12 @@
namespace v8 {
namespace internal {
template <class AllocatorT>
class Deserializer;
class BuiltinDeserializer;
class StartupDeserializer;
class DefaultDeserializerAllocator final {
public:
DefaultDeserializerAllocator(
Deserializer<DefaultDeserializerAllocator>* deserializer);
explicit DefaultDeserializerAllocator(Deserializer* deserializer);
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@ -61,11 +57,6 @@ class DefaultDeserializerAllocator final {
void DecodeReservation(const std::vector<SerializedData::Reservation>& res);
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
// reservation for both without garbage collection in-between.
static bool ReserveSpace(StartupDeserializer* startup_deserializer,
BuiltinDeserializer* builtin_deserializer);
bool ReservationsAreFullyUsed() const;
// ------- Misc Utility Methods -------
@ -106,7 +97,7 @@ class DefaultDeserializerAllocator final {
std::vector<HeapObject*> deserialized_large_objects_;
// The current deserializer.
Deserializer<DefaultDeserializerAllocator>* const deserializer_;
Deserializer* const deserializer_;
DISALLOW_COPY_AND_ASSIGN(DefaultDeserializerAllocator)
};

View File

@ -12,8 +12,7 @@
namespace v8 {
namespace internal {
DefaultSerializerAllocator::DefaultSerializerAllocator(
Serializer<DefaultSerializerAllocator>* serializer)
DefaultSerializerAllocator::DefaultSerializerAllocator(Serializer* serializer)
: serializer_(serializer) {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
pending_chunk_[i] = 0;

View File

@ -10,13 +10,11 @@
namespace v8 {
namespace internal {
template <class AllocatorT>
class Serializer;
class DefaultSerializerAllocator final {
public:
DefaultSerializerAllocator(
Serializer<DefaultSerializerAllocator>* serializer);
explicit DefaultSerializerAllocator(Serializer* serializer);
SerializerReference Allocate(AllocationSpace space, uint32_t size);
SerializerReference AllocateMap();
@ -68,7 +66,7 @@ class DefaultSerializerAllocator final {
uint32_t custom_chunk_size_ = 0;
// The current serializer.
Serializer<DefaultSerializerAllocator>* const serializer_;
Serializer* const serializer_;
DISALLOW_COPY_AND_ASSIGN(DefaultSerializerAllocator)
};

View File

@ -6,6 +6,7 @@
#include "src/assembler-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table.h"
@ -14,7 +15,6 @@
#include "src/objects/maybe-object.h"
#include "src/objects/slots.h"
#include "src/objects/string.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@ -64,22 +64,17 @@ class UnalignedSlot {
Address ptr_;
};
template <class AllocatorT>
void Deserializer<AllocatorT>::UnalignedCopy(UnalignedSlot dest,
MaybeObject value) {
void Deserializer::UnalignedCopy(UnalignedSlot dest, MaybeObject value) {
DCHECK(!allocator()->next_reference_is_weak());
dest.Write(value.ptr());
}
template <class AllocatorT>
void Deserializer<AllocatorT>::UnalignedCopy(UnalignedSlot dest,
Address value) {
void Deserializer::UnalignedCopy(UnalignedSlot dest, Address value) {
DCHECK(!allocator()->next_reference_is_weak());
dest.Write(value);
}
template <class AllocatorT>
void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
@ -98,14 +93,12 @@ void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
SerializedData::ComputeMagicNumber(external_reference_table_));
}
template <class AllocatorT>
void Deserializer<AllocatorT>::Rehash() {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
for (const auto& item : to_rehash_) item->RehashBasedOnMap(isolate());
}
template <class AllocatorT>
Deserializer<AllocatorT>::~Deserializer() {
Deserializer::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
@ -118,28 +111,19 @@ Deserializer<AllocatorT>::~Deserializer() {
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
template <class AllocatorT>
void Deserializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
ObjectSlot start,
ObjectSlot end) {
// Builtins are deserialized in a separate pass by the BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
void Deserializer::VisitRootPointers(Root root, const char* description,
ObjectSlot start, ObjectSlot end) {
// The space must be new space. Any other space would cause ReadChunk to try
// to update the remembered using nullptr as the address.
ReadData(UnalignedSlot(start), UnalignedSlot(end), NEW_SPACE, kNullAddress);
}
template <class AllocatorT>
void Deserializer<AllocatorT>::Synchronize(
VisitorSynchronization::SyncTag tag) {
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
}
template <class AllocatorT>
void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
void Deserializer::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) {
case kAlignmentPrefix:
@ -190,9 +174,7 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
return string->hash_field();
}
template <class AllocatorT>
HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
int space) {
HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj->IsString()) {
// Uninitialize hash field as we need to recompute the hash.
@ -315,8 +297,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
return obj;
}
template <class AllocatorT>
HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
HeapObject* Deserializer::GetBackReferencedObject(int space) {
HeapObject* obj;
switch (space) {
case LO_SPACE:
@ -364,10 +345,8 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
// The reason for this strange interface is that otherwise the object is
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
template <class AllocatorT>
void Deserializer<AllocatorT>::ReadObject(
int space_number, UnalignedSlot write_back,
HeapObjectReferenceType reference_type) {
void Deserializer::ReadObject(int space_number, UnalignedSlot write_back,
HeapObjectReferenceType reference_type) {
const int size = source_.GetInt() << kObjectAlignmentBits;
Address address =
@ -396,18 +375,6 @@ void Deserializer<AllocatorT>::ReadObject(
#endif // DEBUG
}
template <class AllocatorT>
Object* Deserializer<AllocatorT>::ReadDataSingle() {
MaybeObject o;
UnalignedSlot start(&o);
UnalignedSlot end(start.address() + kPointerSize);
int source_space = NEW_SPACE;
Address current_object = kNullAddress;
CHECK(ReadData(start, end, source_space, current_object));
return o->GetHeapObjectAssumeStrong();
}
static void NoExternalReferencesCallback() {
// The following check will trigger if a function or object template
// with references to native functions have been deserialized from
@ -416,10 +383,8 @@ static void NoExternalReferencesCallback() {
CHECK_WITH_MSG(false, "No external references provided via API");
}
template <class AllocatorT>
bool Deserializer<AllocatorT>::ReadData(UnalignedSlot current,
UnalignedSlot limit, int source_space,
Address current_object_address) {
bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
int source_space, Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
@ -526,10 +491,6 @@ bool Deserializer<AllocatorT>::ReadData(UnalignedSlot current,
SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
// Find a builtin and write a pointer to it to the current object.
SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
SINGLE_CASE(kBuiltin, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
#undef CASE_STATEMENT
#undef CASE_BODY
@ -792,8 +753,7 @@ bool Deserializer<AllocatorT>::ReadData(UnalignedSlot current,
return true;
}
template <class AllocatorT>
UnalignedSlot Deserializer<AllocatorT>::ReadExternalReferenceCase(
UnalignedSlot Deserializer::ReadExternalReferenceCase(
HowToCode how, UnalignedSlot current, Address current_object_address) {
int skip = source_.GetInt();
current.Advance(skip);
@ -815,11 +775,11 @@ UnalignedSlot Deserializer<AllocatorT>::ReadExternalReferenceCase(
return current;
}
template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
UnalignedSlot Deserializer<AllocatorT>::ReadDataCase(
Isolate* isolate, UnalignedSlot current, Address current_object_address,
byte data, bool write_barrier_needed) {
UnalignedSlot Deserializer::ReadDataCase(Isolate* isolate,
UnalignedSlot current,
Address current_object_address,
byte data, bool write_barrier_needed) {
bool emit_write_barrier = false;
bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
@ -859,24 +819,15 @@ UnalignedSlot Deserializer<AllocatorT>::ReadDataCase(
int cache_index = source_.GetInt();
new_object = isolate->partial_snapshot_cache()->at(cache_index);
emit_write_barrier = Heap::InNewSpace(new_object);
} else if (where == kAttachedReference) {
} else {
DCHECK_EQ(where, kAttachedReference);
int index = source_.GetInt();
new_object = *attached_objects_[index];
emit_write_barrier = Heap::InNewSpace(new_object);
} else {
DCHECK_EQ(where, kBuiltin);
int builtin_id = source_.GetInt();
new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
if (within == kInnerPointer) {
DCHECK_EQ(how, kFromCode);
if (where == kBuiltin) {
// At this point, new_object may still be uninitialized, thus the
// unchecked Code cast.
new_object = reinterpret_cast<Object*>(
reinterpret_cast<Code*>(new_object)->raw_instruction_start());
} else if (new_object->IsCode()) {
if (new_object->IsCode()) {
new_object = reinterpret_cast<Object*>(
Code::cast(new_object)->raw_instruction_start());
} else {
@ -915,9 +866,5 @@ UnalignedSlot Deserializer<AllocatorT>::ReadDataCase(
return current;
}
// Explicit instantiation.
template class Deserializer<BuiltinDeserializerAllocator>;
template class Deserializer<DefaultDeserializerAllocator>;
} // namespace internal
} // namespace v8

View File

@ -31,7 +31,6 @@ class UnalignedSlot;
#endif
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
template <class AllocatorT = DefaultDeserializerAllocator>
class Deserializer : public SerializerDeserializer {
public:
~Deserializer() override;
@ -58,9 +57,6 @@ class Deserializer : public SerializerDeserializer {
void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
// Deserializes into a single pointer and returns the resulting object.
Object* ReadDataSingle();
// This returns the address of an object that has been described in the
// snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space);
@ -92,7 +88,7 @@ class Deserializer : public SerializerDeserializer {
return new_scripts_;
}
AllocatorT* allocator() { return &allocator_; }
DefaultDeserializerAllocator* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
@ -152,7 +148,7 @@ class Deserializer : public SerializerDeserializer {
std::vector<Handle<Script>> new_scripts_;
std::vector<byte*> off_heap_backing_stores_;
AllocatorT allocator_;
DefaultDeserializerAllocator allocator_;
const bool deserializing_user_code_;
// TODO(6593): generalize rehashing, and remove this flag.

View File

@ -14,7 +14,7 @@ class SerializedCodeData;
class SharedFunctionInfo;
// Deserializes the object graph rooted at a given object.
class ObjectDeserializer final : public Deserializer<> {
class ObjectDeserializer final : public Deserializer {
public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);

View File

@ -15,7 +15,7 @@ class Context;
// Deserializes the context-dependent object graph rooted at a given object.
// The PartialDeserializer is not expected to deserialize any code objects.
class PartialDeserializer final : public Deserializer<> {
class PartialDeserializer final : public Deserializer {
public:
static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,

View File

@ -55,9 +55,6 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip)) {
return;
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
if (SerializeRoot(obj, how_to_code, where_to_point, skip)) return;

View File

@ -13,7 +13,7 @@ namespace internal {
class StartupSerializer;
class PartialSerializer : public Serializer<> {
class PartialSerializer : public Serializer {
public:
PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback);

View File

@ -13,7 +13,7 @@ namespace internal {
// Deserializes the read-only blob, creating the read-only roots and the
// Read-only object cache used by the other deserializers.
class ReadOnlyDeserializer final : public Deserializer<> {
class ReadOnlyDeserializer final : public Deserializer {
public:
explicit ReadOnlyDeserializer(const SnapshotData* data)
: Deserializer(data, false) {}

View File

@ -20,7 +20,7 @@ enum class RootIndex : uint16_t;
// Base class for serializer that iterate over roots. Also maintains a cache
// that can be used to share non-root objects with other serializers.
class RootsSerializer : public Serializer<> {
class RootsSerializer : public Serializer {
public:
// The serializer expects that all roots before |first_root_to_be_serialized|
// are already serialized.

View File

@ -123,8 +123,11 @@ class SerializerDeserializer : public RootVisitor {
const std::vector<CallHandlerInfo*>& call_handler_infos);
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x0e) \
V(0x2e) \
V(0x3e) \
V(0x3f) \
V(0x4e) \
V(0x58) \
V(0x59) \
V(0x5a) \
@ -134,6 +137,7 @@ class SerializerDeserializer : public RootVisitor {
V(0x5e) \
V(0x5f) \
V(0x67) \
V(0x6e) \
V(0x76) \
V(0x79) \
V(0x7a) \
@ -160,8 +164,6 @@ class SerializerDeserializer : public RootVisitor {
// 0x07 External reference referenced by id.
kExternalReference = 0x07,
// 0x0e Builtin code referenced by index.
kBuiltin = 0x0e,
// 0x16 Root array item.
kRootArray = 0x16,
// 0x17 Object provided in the attached list.

View File

@ -12,15 +12,13 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/map.h"
#include "src/objects/slots-inl.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
template <class AllocatorT>
Serializer<AllocatorT>::Serializer(Isolate* isolate)
Serializer::Serializer(Isolate* isolate)
: isolate_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
@ -44,8 +42,7 @@ Serializer<AllocatorT>::Serializer(Isolate* isolate)
#endif // OBJECT_PRINT
}
template <class AllocatorT>
Serializer<AllocatorT>::~Serializer() {
Serializer::~Serializer() {
if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
for (int space = 0; space < LAST_SPACE; ++space) {
@ -58,17 +55,14 @@ Serializer<AllocatorT>::~Serializer() {
}
#ifdef OBJECT_PRINT
template <class AllocatorT>
void Serializer<AllocatorT>::CountInstanceType(Map* map, int size,
AllocationSpace space) {
void Serializer::CountInstanceType(Map* map, int size, AllocationSpace space) {
int instance_type = map->instance_type();
instance_type_count_[space][instance_type]++;
instance_type_size_[space][instance_type] += size;
}
#endif // OBJECT_PRINT
template <class AllocatorT>
void Serializer<AllocatorT>::OutputStatistics(const char* name) {
void Serializer::OutputStatistics(const char* name) {
if (!FLAG_serialization_statistics) return;
PrintF("%s:\n", name);
@ -92,8 +86,7 @@ void Serializer<AllocatorT>::OutputStatistics(const char* name) {
#endif // OBJECT_PRINT
}
template <class AllocatorT>
void Serializer<AllocatorT>::SerializeDeferredObjects() {
void Serializer::SerializeDeferredObjects() {
while (!deferred_objects_.empty()) {
HeapObject* obj = deferred_objects_.back();
deferred_objects_.pop_back();
@ -103,26 +96,16 @@ void Serializer<AllocatorT>::SerializeDeferredObjects() {
sink_.Put(kSynchronize, "Finished with deferred objects");
}
template <class AllocatorT>
bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
return false;
}
template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
ObjectSlot start,
ObjectSlot end) {
// Builtins are serialized in a separate pass by the BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
bool Serializer::MustBeDeferred(HeapObject* object) { return false; }
void Serializer::VisitRootPointers(Root root, const char* description,
ObjectSlot start, ObjectSlot end) {
for (ObjectSlot current = start; current < end; ++current) {
SerializeRootObject(*current);
}
}
template <class AllocatorT>
void Serializer<AllocatorT>::SerializeRootObject(Object* object) {
void Serializer::SerializeRootObject(Object* object) {
if (object->IsSmi()) {
PutSmi(Smi::cast(object));
} else {
@ -131,8 +114,7 @@ void Serializer<AllocatorT>::SerializeRootObject(Object* object) {
}
#ifdef DEBUG
template <class AllocatorT>
void Serializer<AllocatorT>::PrintStack() {
void Serializer::PrintStack() {
for (const auto o : stack_) {
o->Print();
PrintF("\n");
@ -140,11 +122,8 @@ void Serializer<AllocatorT>::PrintStack() {
}
#endif // DEBUG
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeRoot(HeapObject* obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
bool Serializer::SerializeRoot(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
RootIndex root_index;
// Derived serializers are responsible for determining if the root has
// actually been serialized before calling this.
@ -155,11 +134,8 @@ bool Serializer<AllocatorT>::SerializeRoot(HeapObject* obj,
return false;
}
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeHotObject(HeapObject* obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
// Encode a reference to a hot object by its index in the working set.
int index = hot_objects_.Find(obj);
@ -179,11 +155,8 @@ bool Serializer<AllocatorT>::SerializeHotObject(HeapObject* obj,
return true;
}
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
SerializerReference reference = reference_map_.LookupReference(obj);
if (!reference.is_valid()) return false;
// Encode the location of an already deserialized object in order to write
@ -219,44 +192,15 @@ bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
return true;
}
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeBuiltinReference(
HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
int skip) {
if (!obj->IsCode()) return false;
Code* code = Code::cast(obj);
int builtin_index = code->builtin_index();
if (builtin_index < 0) return false;
DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
(how_to_code == kFromCode));
DCHECK_LT(builtin_index, Builtins::builtin_count);
DCHECK_LE(0, builtin_index);
if (FLAG_trace_serializer) {
PrintF(" Encoding builtin reference: %s\n",
isolate()->builtins()->name(builtin_index));
}
FlushSkip(skip);
sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
sink_.PutInt(builtin_index, "builtin_index");
return true;
}
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
bool Serializer::ObjectIsBytecodeHandler(HeapObject* obj) const {
if (!obj->IsCode()) return false;
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
RootIndex root, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point, int skip) {
void Serializer::PutRoot(RootIndex root, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point,
int skip) {
int root_index = static_cast<int>(root);
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
@ -285,16 +229,14 @@ void Serializer<AllocatorT>::PutRoot(
}
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutSmi(Smi* smi) {
void Serializer::PutSmi(Smi* smi) {
sink_.Put(kOnePointerRawData, "Smi");
byte* bytes = reinterpret_cast<byte*>(&smi);
for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
SerializerReference reference) {
void Serializer::PutBackReference(HeapObject* object,
SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
switch (reference.space()) {
case MAP_SPACE:
@ -314,10 +256,9 @@ void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
hot_objects_.Add(object);
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
HowToCode how_to_code,
WhereToPoint where_to_point) {
void Serializer::PutAttachedReference(SerializerReference reference,
HowToCode how_to_code,
WhereToPoint where_to_point) {
DCHECK(reference.is_attached_reference());
DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
(how_to_code == kFromCode && where_to_point == kStartOfObject) ||
@ -326,8 +267,7 @@ void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
template <class AllocatorT>
int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
int Serializer::PutAlignmentPrefix(HeapObject* object) {
AllocationAlignment alignment = HeapObject::RequiredAlignment(object->map());
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
@ -338,14 +278,12 @@ int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
return 0;
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutNextChunk(int space) {
void Serializer::PutNextChunk(int space) {
sink_.Put(kNextChunk, "NextChunk");
sink_.Put(space, "NextChunkSpace");
}
template <class AllocatorT>
void Serializer<AllocatorT>::Pad(int padding_offset) {
void Serializer::Pad(int padding_offset) {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
@ -357,14 +295,12 @@ void Serializer<AllocatorT>::Pad(int padding_offset) {
}
}
template <class AllocatorT>
void Serializer<AllocatorT>::InitializeCodeAddressMap() {
void Serializer::InitializeCodeAddressMap() {
isolate_->InitializeLoggingAndCounters();
code_address_map_ = new CodeAddressMap(isolate_);
}
template <class AllocatorT>
Code* Serializer<AllocatorT>::CopyCode(Code* code) {
Code* Serializer::CopyCode(Code* code) {
code_buffer_.clear(); // Clear buffer without deleting backing store.
int size = code->CodeSize();
code_buffer_.insert(code_buffer_.end(),
@ -374,9 +310,8 @@ Code* Serializer<AllocatorT>::CopyCode(Code* code) {
reinterpret_cast<Address>(&code_buffer_.front())));
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
AllocationSpace space, int size, Map* map) {
void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
int size, Map* map) {
if (serializer_->code_address_map_) {
const char* code_name =
serializer_->code_address_map_->Lookup(object_->address());
@ -421,8 +356,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
}
template <class AllocatorT>
int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
int32_t Serializer::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
SerializerReference reference =
serializer_->reference_map()->LookupReference(backing_store);
@ -441,8 +375,7 @@ int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
return static_cast<int32_t>(reference.off_heap_backing_store_index());
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
JSTypedArray* typed_array = JSTypedArray::cast(object_);
FixedTypedArrayBase* elements =
FixedTypedArrayBase::cast(typed_array->elements());
@ -479,8 +412,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
SerializeObject();
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
@ -496,8 +428,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
buffer->set_backing_store(backing_store);
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
void Serializer::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
// For external strings with known resources, we replace the resource field
// with the encoded external reference, which we restore upon deserialize.
@ -531,9 +462,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
}
}
template <class AllocatorT>
void Serializer<
AllocatorT>::ObjectSerializer::SerializeExternalStringAsSequentialString() {
void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
ReadOnlyRoots roots(serializer_->isolate());
@ -617,8 +546,7 @@ class UnlinkWeakNextScope {
DISALLOW_HEAP_ALLOCATION(no_gc_);
};
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
void Serializer::ObjectSerializer::Serialize() {
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
object_->ShortPrint();
@ -660,8 +588,7 @@ void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
SerializeObject();
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
void Serializer::ObjectSerializer::SerializeObject() {
int size = object_->Size();
Map* map = object_->map();
AllocationSpace space =
@ -686,8 +613,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
SerializeContent(map, size);
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
void Serializer::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_->ShortPrint();
@ -712,9 +638,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
SerializeContent(map, size);
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
int size) {
void Serializer::ObjectSerializer::SerializeContent(Map* map, int size) {
UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
if (object_->IsCode()) {
// For code objects, output raw bytes first.
@ -731,16 +655,15 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
}
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(HeapObject* host,
ObjectSlot start,
ObjectSlot end) {
void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
ObjectSlot start,
ObjectSlot end) {
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
HeapObject* host, MaybeObjectSlot start, MaybeObjectSlot end) {
void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
MaybeObjectSlot start,
MaybeObjectSlot end) {
MaybeObjectSlot current = start;
while (current < end) {
while (current < end && ((*current)->IsSmi() || (*current)->IsCleared())) {
@ -789,9 +712,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
}
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitEmbeddedPointer(
Code* host, RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code* host,
RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Object* object = rinfo->target_object();
@ -800,9 +722,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitEmbeddedPointer(
bytes_processed_so_far_ += rinfo->target_address_size();
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
Foreign* host, Address* p) {
void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
Address* p) {
int skip = SkipTo(reinterpret_cast<Address>(p));
Address target = *p;
auto encoded_reference = serializer_->EncodeExternalReference(target);
@ -816,9 +737,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
bytes_processed_so_far_ += kPointerSize;
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
Code* host, RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitExternalReference(Code* host,
RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
Address target = rinfo->target_external_reference();
auto encoded_reference = serializer_->EncodeExternalReference(target);
@ -836,9 +756,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
bytes_processed_so_far_ += rinfo->target_address_size();
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
Code* host, RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitInternalReference(Code* host,
RelocInfo* rinfo) {
// We do not use skip from last patched pc to find the pc to patch, since
// target_address_address may not return addresses in ascending order when
// used for internal references. External references may be stored at the
@ -860,9 +779,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
sink_->PutInt(target_offset, "internal ref value");
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
Code* host, RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitRuntimeEntry(Code* host,
RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Address target = rinfo->target_address();
@ -874,9 +792,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
bytes_processed_so_far_ += rinfo->target_address_size();
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitOffHeapTarget(
Code* host, RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitOffHeapTarget(Code* host,
RelocInfo* rinfo) {
DCHECK(FLAG_embedded_builtins);
{
STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
@ -919,9 +836,7 @@ class CompareRelocInfo {
};
} // namespace
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitRelocInfo(
RelocIterator* it) {
void Serializer::ObjectSerializer::VisitRelocInfo(RelocIterator* it) {
std::priority_queue<RelocInfo, std::vector<RelocInfo>, CompareRelocInfo>
reloc_queue;
for (; !it->done(); it->next()) {
@ -934,17 +849,15 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitRelocInfo(
}
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitCodeTarget(
Code* host, RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(Code* host,
RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
Address object_start = object_->address();
int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
@ -990,8 +903,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
}
}
template <class AllocatorT>
int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
int Serializer::ObjectSerializer::SkipTo(Address to) {
Address object_start = object_->address();
int up_to_offset = static_cast<int>(to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
@ -1003,8 +915,7 @@ int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
return to_skip;
}
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
void Serializer::ObjectSerializer::OutputCode(int size) {
DCHECK_EQ(kPointerSize, bytes_processed_so_far_);
Code* code = Code::cast(object_);
// To make snapshots reproducible, we make a copy of the code object
@ -1039,9 +950,5 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
}
// Explicit instantiation.
template class Serializer<BuiltinSerializerAllocator>;
template class Serializer<DefaultSerializerAllocator>;
} // namespace internal
} // namespace v8

View File

@ -154,7 +154,6 @@ class ObjectCacheIndexMap {
DISALLOW_COPY_AND_ASSIGN(ObjectCacheIndexMap);
};
template <class AllocatorT = DefaultSerializerAllocator>
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(Isolate* isolate);
@ -221,11 +220,6 @@ class Serializer : public SerializerDeserializer {
bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
// Returns true if the object was successfully serialized as a builtin
// reference.
bool SerializeBuiltinReference(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
// Returns true if the given heap object is a bytecode handler code object.
bool ObjectIsBytecodeHandler(HeapObject* obj) const;
@ -271,7 +265,7 @@ class Serializer : public SerializerDeserializer {
SerializerReferenceMap* reference_map() { return &reference_map_; }
const RootIndexMap* root_index_map() const { return &root_index_map_; }
AllocatorT* allocator() { return &allocator_; }
DefaultSerializerAllocator* allocator() { return &allocator_; }
SnapshotByteSink sink_; // Used directly by subclasses.
@ -284,7 +278,7 @@ class Serializer : public SerializerDeserializer {
std::vector<byte> code_buffer_;
std::vector<HeapObject*> deferred_objects_; // To handle stack overflow.
int recursion_depth_ = 0;
AllocatorT allocator_;
DefaultSerializerAllocator allocator_;
#ifdef OBJECT_PRINT
static const int kInstanceTypes = LAST_TYPE + 1;
@ -303,8 +297,7 @@ class Serializer : public SerializerDeserializer {
class RelocInfoIterator;
template <class AllocatorT>
class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
class Serializer::ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer, HeapObject* obj,
SnapshotByteSink* sink, HowToCode how_to_code,

View File

@ -11,8 +11,6 @@
#include "src/callable.h"
#include "src/interface-descriptors.h"
#include "src/objects-inl.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/partial-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/startup-deserializer.h"
@ -47,12 +45,10 @@ bool Snapshot::Initialize(Isolate* isolate) {
CHECK(VerifyChecksum(blob));
Vector<const byte> startup_data = ExtractStartupData(blob);
SnapshotData startup_snapshot_data(startup_data);
Vector<const byte> builtin_data = ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
Vector<const byte> read_only_data = ExtractReadOnlyData(blob);
SnapshotData read_only_snapshot_data(read_only_data);
StartupDeserializer deserializer(
&startup_snapshot_data, &builtin_snapshot_data, &read_only_snapshot_data);
StartupDeserializer deserializer(&startup_snapshot_data,
&read_only_snapshot_data);
deserializer.SetRehashability(ExtractRehashability(blob));
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
@ -94,7 +90,7 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
void ProfileDeserialization(
const SnapshotData* read_only_snapshot,
const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const SnapshotData* startup_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
@ -105,9 +101,6 @@ void ProfileDeserialization(
for (const auto& reservation : startup_snapshot->Reservations()) {
startup_total += reservation.chunk_size();
}
for (const auto& reservation : builtin_snapshot->Reservations()) {
startup_total += reservation.chunk_size();
}
PrintF("%10d bytes per isolate\n", startup_total);
for (size_t i = 0; i < context_snapshots.size(); i++) {
int context_total = 0;
@ -121,7 +114,6 @@ void ProfileDeserialization(
v8::StartupData Snapshot::CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
const BuiltinSnapshotData* builtin_snapshot,
const SnapshotData* read_only_snapshot,
const std::vector<SnapshotData*>& context_snapshots, bool can_be_rehashed) {
uint32_t num_contexts = static_cast<uint32_t>(context_snapshots.size());
@ -130,8 +122,6 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(builtin_snapshot->RawData().length());
DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(read_only_snapshot->RawData().length());
DCHECK(IsAligned(total_length, kPointerAlignment));
for (const auto context_snapshot : context_snapshots) {
@ -139,7 +129,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
DCHECK(IsAligned(total_length, kPointerAlignment));
}
ProfileDeserialization(read_only_snapshot, startup_snapshot, builtin_snapshot,
ProfileDeserialization(read_only_snapshot, startup_snapshot,
context_snapshots);
char* data = new char[total_length];
@ -168,17 +158,6 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
}
payload_offset += payload_length;
// Builtins.
SetHeaderValue(data, kBuiltinOffsetOffset, payload_offset);
payload_length = builtin_snapshot->RawData().length();
CopyBytes(data + payload_offset,
reinterpret_cast<const char*>(builtin_snapshot->RawData().start()),
payload_length);
if (FLAG_profile_deserialization) {
PrintF("%10d bytes for builtins\n", payload_length);
}
payload_offset += payload_length;
// Read-only.
SetHeaderValue(data, kReadOnlyOffsetOffset, payload_offset);
payload_length = read_only_snapshot->RawData().length();
@ -517,13 +496,6 @@ Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
uint32_t num_contexts = ExtractNumContexts(data);
return ExtractData(data, StartupSnapshotOffset(num_contexts),
GetHeaderValue(data, kBuiltinOffsetOffset));
}
Vector<const byte> Snapshot::ExtractBuiltinData(const v8::StartupData* data) {
DCHECK(SnapshotIsValid(data));
return ExtractData(data, GetHeaderValue(data, kBuiltinOffsetOffset),
GetHeaderValue(data, kReadOnlyOffsetOffset));
}
@ -573,8 +545,7 @@ void Snapshot::CheckVersion(const v8::StartupData* data) {
}
}
template <class AllocatorT>
SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
SnapshotData::SnapshotData(const Serializer* serializer) {
DisallowHeapAllocation no_gc;
std::vector<Reservation> reservations = serializer->EncodeReservations();
const std::vector<byte>* payload = serializer->Payload();
@ -608,10 +579,6 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
static_cast<size_t>(payload->size()));
}
// Explicit instantiation.
template SnapshotData::SnapshotData(
const Serializer<DefaultSerializerAllocator>* serializer);
std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
uint32_t size = GetHeaderValue(kNumReservationsOffset);
std::vector<SerializedData::Reservation> reservations(size);
@ -631,27 +598,5 @@ Vector<const byte> SnapshotData::Payload() const {
return Vector<const byte>(payload, length);
}
BuiltinSnapshotData::BuiltinSnapshotData(const BuiltinSerializer* serializer)
: SnapshotData(serializer) {}
Vector<const byte> BuiltinSnapshotData::Payload() const {
Vector<const byte> payload = SnapshotData::Payload();
const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
DCHECK_EQ(data_ + size_, payload.start() + payload.size());
DCHECK_GT(payload.size(), builtin_offsets_size);
return Vector<const byte>(payload.start(),
payload.size() - builtin_offsets_size);
}
Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
Vector<const byte> payload = SnapshotData::Payload();
const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
DCHECK_EQ(data_ + size_, payload.start() + payload.size());
DCHECK_GT(payload.size(), builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
payload.start() + payload.size() - builtin_offsets_size);
return Vector<const uint32_t>(data, Builtins::builtin_count);
}
} // namespace internal
} // namespace v8

View File

@ -15,7 +15,6 @@ namespace internal {
// Forward declarations.
class Isolate;
class BuiltinSerializer;
class PartialSerializer;
class StartupSerializer;
@ -23,8 +22,7 @@ class StartupSerializer;
class SnapshotData : public SerializedData {
public:
// Used when producing.
template <class AllocatorT>
explicit SnapshotData(const Serializer<AllocatorT>* serializer);
explicit SnapshotData(const Serializer* serializer);
// Used when consuming.
explicit SnapshotData(const Vector<const byte> snapshot)
@ -52,33 +50,6 @@ class SnapshotData : public SerializedData {
static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
};
class BuiltinSnapshotData final : public SnapshotData {
public:
// Used when producing.
// This simply forwards to the SnapshotData constructor.
// The BuiltinSerializer appends the builtin offset table to the payload.
explicit BuiltinSnapshotData(const BuiltinSerializer* serializer);
// Used when consuming.
explicit BuiltinSnapshotData(const Vector<const byte> snapshot)
: SnapshotData(snapshot) {
}
// Returns the serialized payload without the builtin offsets table.
Vector<const byte> Payload() const override;
// Returns only the builtin offsets table.
Vector<const uint32_t> BuiltinOffsets() const;
private:
// In addition to the format specified in SnapshotData, BuiltinsSnapshotData
// includes a list of builtin at the end of the serialized payload:
//
// ...
// ... serialized payload
// ... list of builtins offsets
};
class EmbeddedData final {
public:
static EmbeddedData FromIsolate(Isolate* isolate);
@ -182,7 +153,6 @@ class Snapshot : public AllStatic {
static v8::StartupData CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
const BuiltinSnapshotData* builtin_snapshot,
const SnapshotData* read_only_snapshot,
const std::vector<SnapshotData*>& context_snapshots,
bool can_be_rehashed);
@ -198,7 +168,6 @@ class Snapshot : public AllStatic {
static bool ExtractRehashability(const v8::StartupData* data);
static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
static Vector<const byte> ExtractReadOnlyData(const v8::StartupData* data);
static Vector<const byte> ExtractBuiltinData(const v8::StartupData* data);
static Vector<const byte> ExtractContextData(const v8::StartupData* data,
uint32_t index);
@ -218,14 +187,12 @@ class Snapshot : public AllStatic {
// [2] checksum part A
// [3] checksum part B
// [4] (128 bytes) version string
// [5] offset to builtins
// [6] offset to readonly
// [7] offset to context 0
// [8] offset to context 1
// [5] offset to readonly
// [6] offset to context 0
// [7] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
// ... builtin snapshot data
// ... read-only snapshot data
// ... context 0 snapshot data
// ... context 1 snapshot data
@ -241,10 +208,8 @@ class Snapshot : public AllStatic {
static const uint32_t kVersionStringOffset =
kChecksumPartBOffset + kUInt32Size;
static const uint32_t kVersionStringLength = 64;
static const uint32_t kBuiltinOffsetOffset =
kVersionStringOffset + kVersionStringLength;
static const uint32_t kReadOnlyOffsetOffset =
kBuiltinOffsetOffset + kUInt32Size;
kVersionStringOffset + kVersionStringLength;
static const uint32_t kFirstContextOffsetOffset =
kReadOnlyOffsetOffset + kUInt32Size;

View File

@ -9,7 +9,6 @@
#include "src/code-stubs.h"
#include "src/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/snapshot.h"
@ -22,10 +21,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
ReadOnlyDeserializer read_only_deserializer(read_only_data_);
read_only_deserializer.SetRehashability(can_rehash());
read_only_deserializer.DeserializeInto(isolate);
BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
if (!DefaultDeserializerAllocator::ReserveSpace(this,
&builtin_deserializer)) {
if (!allocator()->ReserveSpace()) {
V8::FatalProcessOutOfMemory(isolate, "StartupDeserializer");
}
@ -47,10 +44,6 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
RestoreExternalReferenceRedirectors(accessor_infos());
RestoreExternalReferenceRedirectors(call_handler_infos());
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
builtin_deserializer.DeserializeEagerBuiltins();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
FlushICacheForNewIsolate();

View File

@ -12,14 +12,11 @@ namespace v8 {
namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
class StartupDeserializer final : public Deserializer<> {
class StartupDeserializer final : public Deserializer {
public:
StartupDeserializer(const SnapshotData* startup_data,
const BuiltinSnapshotData* builtin_data,
const SnapshotData* read_only_data)
: Deserializer(startup_data, false),
read_only_data_(read_only_data),
builtin_data_(builtin_data) {}
: Deserializer(startup_data, false), read_only_data_(read_only_data) {}
// Deserialize the snapshot into an empty heap.
void DeserializeInto(Isolate* isolate);
@ -29,7 +26,6 @@ class StartupDeserializer final : public Deserializer<> {
void PrintDisassembledCodeObjects();
const SnapshotData* read_only_data_;
const BuiltinSnapshotData* builtin_data_;
};
} // namespace internal

View File

@ -28,14 +28,32 @@ StartupSerializer::~StartupSerializer() {
OutputStatistics("StartupSerializer");
}
namespace {
// Due to how we currently create the embedded blob, we may encounter both
// off-heap trampolines and old, outdated full Code objects during
// serialization. This ensures that we only serialize the canonical version of
// each builtin.
// See also CreateOffHeapTrampolines().
HeapObject* MaybeCanonicalizeBuiltin(Isolate* isolate, HeapObject* obj) {
if (!obj->IsCode()) return obj;
const int builtin_index = Code::cast(obj)->builtin_index();
if (!Builtins::IsBuiltinId(builtin_index)) return obj;
return isolate->builtins()->builtin(builtin_index);
}
} // namespace
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
DCHECK(!obj->IsJSFunction());
if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip)) {
return;
}
// TODO(jgruber): Remove canonicalization once off-heap trampoline creation
// moves to Isolate::Init().
obj = MaybeCanonicalizeBuiltin(isolate(), obj);
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
if (IsRootAndHasBeenSerialized(obj) &&
SerializeRoot(obj, how_to_code, where_to_point, skip))

View File

@ -38,13 +38,12 @@
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/heap/spaces.h"
#include "src/interpreter/interpreter.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-deserializer.h"
@ -72,12 +71,10 @@ void DisableAlwaysOpt() {
// deserialize an isolate.
struct StartupBlobs {
Vector<const byte> startup;
Vector<const byte> builtin;
Vector<const byte> read_only;
void Dispose() {
startup.Dispose();
builtin.Dispose();
read_only.Dispose();
}
};
@ -99,9 +96,7 @@ class TestSerializer {
static v8::Isolate* NewIsolateFromBlob(StartupBlobs& blobs) {
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
BuiltinSnapshotData builtin_snapshot(blobs.builtin);
StartupDeserializer deserializer(&startup_snapshot, &builtin_snapshot,
&read_only_snapshot);
StartupDeserializer deserializer(&startup_snapshot, &read_only_snapshot);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
@ -261,16 +256,11 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
StartupSerializer ser(internal_isolate, &read_only_serializer);
ser.SerializeStrongReferences();
i::BuiltinSerializer builtin_serializer(internal_isolate, &ser);
builtin_serializer.SerializeBuiltinsAndHandlers();
ser.SerializeWeakReferencesAndDeferred();
read_only_serializer.FinalizeSerialization();
SnapshotData startup_snapshot(&ser);
SnapshotData read_only_snapshot(&read_only_serializer);
BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
return {WritePayload(startup_snapshot.RawData()),
WritePayload(builtin_snapshot.RawData()),
WritePayload(read_only_snapshot.RawData())};
}
@ -483,7 +473,6 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
}
static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
Vector<const byte>* builtin_blob_out,
Vector<const byte>* read_only_blob_out,
Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestSerializer::NewIsolateInitialized();
@ -529,20 +518,15 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&raw_context, false);
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
read_only_serializer.FinalizeSerialization();
SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
*builtin_blob_out = WritePayload(builtin_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
@ -552,13 +536,11 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
UNINITIALIZED_TEST(PartialSerializerContext) {
DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> builtin_blob;
Vector<const byte> read_only_blob;
Vector<const byte> partial_blob;
PartiallySerializeContext(&startup_blob, &builtin_blob, &read_only_blob,
&partial_blob);
PartiallySerializeContext(&startup_blob, &read_only_blob, &partial_blob);
StartupBlobs blobs = {startup_blob, builtin_blob, read_only_blob};
StartupBlobs blobs = {startup_blob, read_only_blob};
v8::Isolate* v8_isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(v8_isolate);
{
@ -597,7 +579,7 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
}
static void PartiallySerializeCustomContext(
Vector<const byte>* startup_blob_out, Vector<const byte>* builtin_blob_out,
Vector<const byte>* startup_blob_out,
Vector<const byte>* read_only_blob_out,
Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestSerializer::NewIsolateInitialized();
@ -663,20 +645,15 @@ static void PartiallySerializeCustomContext(
v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&raw_context, false);
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
read_only_serializer.FinalizeSerialization();
SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
*builtin_blob_out = WritePayload(builtin_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
@ -686,13 +663,12 @@ static void PartiallySerializeCustomContext(
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> builtin_blob;
Vector<const byte> read_only_blob;
Vector<const byte> partial_blob;
PartiallySerializeCustomContext(&startup_blob, &builtin_blob, &read_only_blob,
PartiallySerializeCustomContext(&startup_blob, &read_only_blob,
&partial_blob);
StartupBlobs blobs = {startup_blob, builtin_blob, read_only_blob};
StartupBlobs blobs = {startup_blob, read_only_blob};
v8::Isolate* v8_isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(v8_isolate);
{

View File

@ -19,10 +19,6 @@
"name": "SnapshotSizeStartup",
"results_regexp": "(\\d+) bytes in \\d+ chunks for startup$"
},
{
"name": "SnapshotSizeBuiltins",
"results_regexp": "(\\d+) bytes for builtins$"
},
{
"name": "SnapshotSizeReadOnly",
"results_regexp": "(\\d+) bytes for read-only$"