[snapshot] Refactor Serializer

This CL refactors allocation & reservation logic into a new
DefaultSerializerAllocator class.  In upcoming work, this will be
further extended by a custom allocator for builtin serialization.

Additionally, this cleans up a bunch of cosmetics (encapsulation and
other nits).

Bug: v8:6624
Change-Id: Ibcf12a525c8fcb26d9c16b7a12fd598c37a0e10a
Reviewed-on: https://chromium-review.googlesource.com/650357
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48077}
This commit is contained in:
Jakob Gruber 2017-09-19 12:01:52 +09:00 committed by Commit Bot
parent 5b127a9796
commit 59e4b75187
17 changed files with 423 additions and 271 deletions

View File

@ -1953,6 +1953,8 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-serializer-allocator.cc",
"src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/natives-common.cc",

View File

@ -4063,7 +4063,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;

View File

@ -20,7 +20,7 @@ BuiltinSerializer::~BuiltinSerializer() {
void BuiltinSerializer::SerializeBuiltins() {
for (int i = 0; i < Builtins::builtin_count; i++) {
builtin_offsets_[i] = sink()->Position();
builtin_offsets_[i] = sink_.Position();
SerializeBuiltin(isolate()->builtins()->builtin(i));
}
Pad(); // Pad with kNop since GetInt() might read too far.
@ -55,7 +55,7 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
DCHECK(!o->IsSmi());
// Roots can simply be serialized as root references.
int root_index = root_index_map_.Lookup(o);
int root_index = root_index_map()->Lookup(o);
if (root_index != RootIndexMap::kInvalidRootIndex) {
DCHECK(startup_serializer_->root_has_been_serialized(root_index));
PutRoot(root_index, o, how_to_code, where_to_point, skip);

View File

@ -15,7 +15,7 @@ class StartupSerializer;
// Responsible for serializing all builtin objects during startup snapshot
// creation. Builtins are serialized into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer {
class BuiltinSerializer : public Serializer<> {
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;

View File

@ -55,7 +55,7 @@ ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
SerializeDeferredObjects();
Pad();
SerializedCodeData data(sink()->data(), this);
SerializedCodeData data(sink_.data(), this);
return data.GetScriptData();
}
@ -64,7 +64,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
int root_index = root_index_map_.Lookup(obj);
int root_index = root_index_map()->Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
@ -317,9 +317,7 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
const std::vector<uint32_t>* stub_keys = cs->stub_keys();
std::vector<Reservation> reservations;
cs->EncodeReservations(&reservations);
std::vector<Reservation> reservations = cs->EncodeReservations();
// Calculate sizes.
uint32_t reservation_size =

View File

@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
class CodeSerializer : public Serializer {
class CodeSerializer : public Serializer<> {
public:
static ScriptData* Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,

View File

@ -0,0 +1,153 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/default-serializer-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/serializer.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
DefaultSerializerAllocator::DefaultSerializerAllocator(
Serializer<DefaultSerializerAllocator>* serializer)
: serializer_(serializer) {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
pending_chunk_[i] = 0;
}
}
SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
uint32_t size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
DCHECK(size > 0 && size <= MaxChunkSizeInSpace(space));
// Maps are allocated through AllocateMap.
DCHECK_NE(MAP_SPACE, space);
uint32_t new_chunk_size = pending_chunk_[space] + size;
if (new_chunk_size > MaxChunkSizeInSpace(space)) {
// The new chunk size would not fit onto a single page. Complete the
// current chunk and start a new one.
serializer_->PutNextChunk(space);
completed_chunks_[space].push_back(pending_chunk_[space]);
pending_chunk_[space] = 0;
new_chunk_size = size;
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
return SerializerReference::BackReference(
space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
}
SerializerReference DefaultSerializerAllocator::AllocateMap() {
// Maps are allocated one-by-one when deserializing.
return SerializerReference::MapReference(num_maps_++);
}
SerializerReference DefaultSerializerAllocator::AllocateLargeObject(
uint32_t size) {
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
large_objects_total_size_ += size;
return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
}
SerializerReference DefaultSerializerAllocator::AllocateOffHeapBackingStore() {
DCHECK_NE(0, seen_backing_stores_index_);
return SerializerReference::OffHeapBackingStoreReference(
seen_backing_stores_index_++);
}
#ifdef DEBUG
bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_back_reference());
AllocationSpace space = reference.space();
if (space == LO_SPACE) {
return reference.large_object_index() < seen_large_objects_index_;
} else if (space == MAP_SPACE) {
return reference.map_index() < num_maps_;
} else {
size_t chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].size()) {
return reference.chunk_offset() < pending_chunk_[space];
} else {
return chunk_index < completed_chunks_[space].size() &&
reference.chunk_offset() < completed_chunks_[space][chunk_index];
}
}
}
bool DefaultSerializerAllocator::HasNotExceededFirstPageOfEachSpace() const {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
if (!completed_chunks_[i].empty()) return false;
}
return true;
}
#endif
std::vector<SerializedData::Reservation>
DefaultSerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
STATIC_ASSERT(NEW_SPACE == 0);
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out.emplace_back(completed_chunks_[i][j]);
}
if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
out.emplace_back(pending_chunk_[i]);
}
out.back().mark_as_last();
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
out.emplace_back(num_maps_ * Map::kSize);
out.back().mark_as_last();
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
out.emplace_back(large_objects_total_size_);
out.back().mark_as_last();
return out;
}
void DefaultSerializerAllocator::OutputStatistics() {
DCHECK(FLAG_serialization_statistics);
PrintF(" Spaces (bytes):\n");
STATIC_ASSERT(NEW_SPACE == 0);
for (int space = 0; space < kNumberOfSpaces; space++) {
PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
STATIC_ASSERT(NEW_SPACE == 0);
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16" PRIuS, s);
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
PrintF("%16d", num_maps_ * Map::kSize);
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
PrintF("%16d\n", large_objects_total_size_);
}
// static
uint32_t DefaultSerializerAllocator::MaxChunkSizeInSpace(int space) {
DCHECK(0 <= space && space < kNumberOfPreallocatedSpaces);
return static_cast<uint32_t>(
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,74 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
template <class AllocatorT>
class Serializer;
class DefaultSerializerAllocator final {
public:
DefaultSerializerAllocator(
Serializer<DefaultSerializerAllocator>* serializer);
SerializerReference Allocate(AllocationSpace space, uint32_t size);
SerializerReference AllocateMap();
SerializerReference AllocateLargeObject(uint32_t size);
SerializerReference AllocateOffHeapBackingStore();
#ifdef DEBUG
bool BackReferenceIsAlreadyAllocated(
SerializerReference back_reference) const;
bool HasNotExceededFirstPageOfEachSpace() const;
#endif
std::vector<SerializedData::Reservation> EncodeReservations() const;
void OutputStatistics();
private:
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
static uint32_t MaxChunkSizeInSpace(int space);
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
// page. So we track the chunk size in pending_chunk_ of a space, but
// when it exceeds a page, we complete the current chunk and start a new one.
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
// Number of maps that we need to allocate.
uint32_t num_maps_ = 0;
// We map serialized large objects to indexes for back-referencing.
uint32_t large_objects_total_size_ = 0;
uint32_t seen_large_objects_index_ = 0;
// Used to keep track of the off-heap backing stores used by TypedArrays/
// ArrayBuffers. Note that the index begins at 1 and not 0, because when a
// TypedArray has an on-heap backing store, the backing_store pointer in the
// corresponding ArrayBuffer will be null, which makes it indistinguishable
// from index 0.
uint32_t seen_backing_stores_index_ = 1;
// The current serializer.
Serializer<DefaultSerializerAllocator>* const serializer_;
DISALLOW_COPY_AND_ASSIGN(DefaultSerializerAllocator)
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_

View File

@ -36,11 +36,12 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
context->set(Context::NEXT_CONTEXT_LINK, isolate_->heap()->undefined_value());
context->set(Context::NEXT_CONTEXT_LINK,
isolate()->heap()->undefined_value());
DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
// Reset math random cache to get fresh random numbers.
context->set_math_random_index(Smi::kZero);
context->set_math_random_cache(isolate_->heap()->undefined_value());
context->set_math_random_cache(isolate()->heap()->undefined_value());
DCHECK_NULL(rehashable_global_dictionary_);
rehashable_global_dictionary_ = context->global_object()->global_dictionary();
@ -66,7 +67,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
int root_index = root_index_map_.Lookup(obj);
int root_index = root_index_map()->Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
@ -87,7 +88,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Pointers from the partial snapshot to the objects in the startup snapshot
// should go through the root array or through the partial snapshot cache.
// If this is not the case you may have to add something to the root array.
DCHECK(!startup_serializer_->reference_map()->Lookup(obj).is_valid());
DCHECK(!startup_serializer_->ReferenceMapContains(obj));
// All the internalized strings that the partial snapshot needs should be
// either in the root table or in the partial snapshot cache.
DCHECK(!obj->IsInternalizedString());
@ -97,7 +98,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
// Clear literal boilerplates and feedback.
if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate_);
if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate());
if (obj->IsJSObject()) {
JSObject* jsobj = JSObject::cast(obj);
@ -138,7 +139,7 @@ void PartialSerializer::SerializeEmbedderFields() {
HandleScope scope(isolate());
Handle<JSObject> obj(embedder_field_holders_.back(), isolate());
embedder_field_holders_.pop_back();
SerializerReference reference = reference_map_.Lookup(*obj);
SerializerReference reference = reference_map()->Lookup(*obj);
DCHECK(reference.is_back_reference());
int embedder_fields_count = obj->GetEmbedderFieldCount();
for (int i = 0; i < embedder_fields_count; i++) {

View File

@ -13,7 +13,7 @@ namespace internal {
class StartupSerializer;
class PartialSerializer : public Serializer {
class PartialSerializer : public Serializer<> {
public:
PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback);

View File

@ -5,32 +5,18 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
#include "src/heap/heap-inl.h"
#include "src/macro-assembler.h"
#include "src/objects/map.h"
#include "src/snapshot/natives.h"
namespace v8 {
namespace internal {
Serializer::Serializer(Isolate* isolate)
template <class AllocatorT>
Serializer<AllocatorT>::Serializer(Isolate* isolate)
: isolate_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
recursion_depth_(0),
code_address_map_(NULL),
num_maps_(0),
large_objects_total_size_(0),
seen_large_objects_index_(0),
seen_backing_stores_index_(1) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
pending_chunk_[i] = 0;
max_chunk_size_[i] = static_cast<uint32_t>(
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
}
allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
instance_type_count_ = NewArray<int>(kInstanceTypes);
@ -46,7 +32,8 @@ Serializer::Serializer(Isolate* isolate)
#endif // OBJECT_PRINT
}
Serializer::~Serializer() {
template <class AllocatorT>
Serializer<AllocatorT>::~Serializer() {
if (code_address_map_ != NULL) delete code_address_map_;
#ifdef OBJECT_PRINT
if (instance_type_count_ != NULL) {
@ -57,28 +44,21 @@ Serializer::~Serializer() {
}
#ifdef OBJECT_PRINT
void Serializer::CountInstanceType(Map* map, int size) {
template <class AllocatorT>
void Serializer<AllocatorT>::CountInstanceType(Map* map, int size) {
int instance_type = map->instance_type();
instance_type_count_[instance_type]++;
instance_type_size_[instance_type] += size;
}
#endif // OBJECT_PRINT
void Serializer::OutputStatistics(const char* name) {
template <class AllocatorT>
void Serializer<AllocatorT>::OutputStatistics(const char* name) {
if (!FLAG_serialization_statistics) return;
PrintF("%s:\n", name);
PrintF(" Spaces (bytes):\n");
for (int space = 0; space < kNumberOfSpaces; space++) {
PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16" PRIuS, s);
}
PrintF("%16d", num_maps_ * Map::kSize);
PrintF("%16d\n", large_objects_total_size_);
allocator()->OutputStatistics();
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
@ -92,7 +72,8 @@ void Serializer::OutputStatistics(const char* name) {
#endif // OBJECT_PRINT
}
void Serializer::SerializeDeferredObjects() {
template <class AllocatorT>
void Serializer<AllocatorT>::SerializeDeferredObjects() {
while (!deferred_objects_.empty()) {
HeapObject* obj = deferred_objects_.back();
deferred_objects_.pop_back();
@ -102,9 +83,14 @@ void Serializer::SerializeDeferredObjects() {
sink_.Put(kSynchronize, "Finished with deferred objects");
}
bool Serializer::MustBeDeferred(HeapObject* object) { return false; }
template <class AllocatorT>
bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
return false;
}
void Serializer::VisitRootPointers(Root root, Object** start, Object** end) {
template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
PutSmi(Smi::cast(*current));
@ -114,45 +100,9 @@ void Serializer::VisitRootPointers(Root root, Object** start, Object** end) {
}
}
void Serializer::EncodeReservations(
std::vector<SerializedData::Reservation>* out) const {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out->push_back(SerializedData::Reservation(completed_chunks_[i][j]));
}
if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
out->push_back(SerializedData::Reservation(pending_chunk_[i]));
}
out->back().mark_as_last();
}
out->push_back(SerializedData::Reservation(num_maps_ * Map::kSize));
out->back().mark_as_last();
out->push_back(SerializedData::Reservation(large_objects_total_size_));
out->back().mark_as_last();
}
#ifdef DEBUG
bool Serializer::BackReferenceIsAlreadyAllocated(
SerializerReference reference) {
DCHECK(reference.is_back_reference());
AllocationSpace space = reference.space();
if (space == LO_SPACE) {
return reference.large_object_index() < seen_large_objects_index_;
} else if (space == MAP_SPACE) {
return reference.map_index() < num_maps_;
} else {
size_t chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].size()) {
return reference.chunk_offset() < pending_chunk_[space];
} else {
return chunk_index < completed_chunks_[space].size() &&
reference.chunk_offset() < completed_chunks_[space][chunk_index];
}
}
}
void Serializer::PrintStack() {
template <class AllocatorT>
void Serializer<AllocatorT>::PrintStack() {
for (const auto o : stack_) {
o->Print();
PrintF("\n");
@ -160,8 +110,11 @@ void Serializer::PrintStack() {
}
#endif // DEBUG
bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeHotObject(HeapObject* obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
// Encode a reference to a hot object by its index in the working set.
int index = hot_objects_.Find(obj);
@ -180,8 +133,12 @@ bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
}
return true;
}
bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
SerializerReference reference = reference_map_.Lookup(obj);
if (!reference.is_valid()) return false;
// Encode the location of an already deserialized object in order to write
@ -217,7 +174,8 @@ bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
return true;
}
bool Serializer::SerializeBuiltinReference(
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeBuiltinReference(
HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
int skip, BuiltinReferenceSerializationMode mode) {
if (!obj->IsCode()) return false;
@ -248,10 +206,11 @@ bool Serializer::SerializeBuiltinReference(
return true;
}
void Serializer::PutRoot(int root_index, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point,
int skip) {
template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
int root_index, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point, int skip) {
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
object->ShortPrint();
@ -280,22 +239,25 @@ void Serializer::PutRoot(int root_index, HeapObject* object,
}
}
void Serializer::PutSmi(Smi* smi) {
template <class AllocatorT>
void Serializer<AllocatorT>::PutSmi(Smi* smi) {
sink_.Put(kOnePointerRawData, "Smi");
byte* bytes = reinterpret_cast<byte*>(&smi);
for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
}
void Serializer::PutBackReference(HeapObject* object,
SerializerReference reference) {
DCHECK(BackReferenceIsAlreadyAllocated(reference));
template <class AllocatorT>
void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
sink_.PutInt(reference.back_reference(), "BackRefValue");
hot_objects_.Add(object);
}
void Serializer::PutAttachedReference(SerializerReference reference,
HowToCode how_to_code,
WhereToPoint where_to_point) {
template <class AllocatorT>
void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
HowToCode how_to_code,
WhereToPoint where_to_point) {
DCHECK(reference.is_attached_reference());
DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
(how_to_code == kFromCode && where_to_point == kStartOfObject) ||
@ -304,7 +266,8 @@ void Serializer::PutAttachedReference(SerializerReference reference,
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
int Serializer::PutAlignmentPrefix(HeapObject* object) {
template <class AllocatorT>
int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
AllocationAlignment alignment = object->RequiredAlignment();
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
@ -315,44 +278,14 @@ int Serializer::PutAlignmentPrefix(HeapObject* object) {
return 0;
}
SerializerReference Serializer::AllocateOffHeapBackingStore() {
DCHECK_NE(0, seen_backing_stores_index_);
return SerializerReference::OffHeapBackingStoreReference(
seen_backing_stores_index_++);
template <class AllocatorT>
void Serializer<AllocatorT>::PutNextChunk(int space) {
sink_.Put(kNextChunk, "NextChunk");
sink_.Put(space, "NextChunkSpace");
}
SerializerReference Serializer::AllocateLargeObject(int size) {
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
large_objects_total_size_ += size;
return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
}
SerializerReference Serializer::AllocateMap() {
// Maps are allocated one-by-one when deserializing.
return SerializerReference::MapReference(num_maps_++);
}
SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
uint32_t new_chunk_size = pending_chunk_[space] + size;
if (new_chunk_size > max_chunk_size(space)) {
// The new chunk size would not fit onto a single page. Complete the
// current chunk and start a new one.
sink_.Put(kNextChunk, "NextChunk");
sink_.Put(space, "NextChunkSpace");
completed_chunks_[space].push_back(pending_chunk_[space]);
pending_chunk_[space] = 0;
new_chunk_size = size;
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
return SerializerReference::BackReference(
space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
}
void Serializer::Pad() {
template <class AllocatorT>
void Serializer<AllocatorT>::Pad() {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
@ -364,12 +297,14 @@ void Serializer::Pad() {
}
}
void Serializer::InitializeCodeAddressMap() {
template <class AllocatorT>
void Serializer<AllocatorT>::InitializeCodeAddressMap() {
isolate_->InitializeLoggingAndCounters();
code_address_map_ = new CodeAddressMap(isolate_);
}
Code* Serializer::CopyCode(Code* code) {
template <class AllocatorT>
Code* Serializer<AllocatorT>::CopyCode(Code* code) {
code_buffer_.clear(); // Clear buffer without deleting backing store.
int size = code->CodeSize();
code_buffer_.insert(code_buffer_.end(), code->address(),
@ -377,15 +312,9 @@ Code* Serializer::CopyCode(Code* code) {
return Code::cast(HeapObject::FromAddress(&code_buffer_.front()));
}
bool Serializer::HasNotExceededFirstPageOfEachSpace() {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
if (!completed_chunks_[i].empty()) return false;
}
return true;
}
void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
int size, Map* map) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
AllocationSpace space, int size, Map* map) {
if (serializer_->code_address_map_) {
const char* code_name =
serializer_->code_address_map_->Lookup(object_->address());
@ -403,16 +332,16 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
} else {
sink_->Put(NOT_EXECUTABLE, "not executable large object");
}
back_reference = serializer_->AllocateLargeObject(size);
back_reference = serializer_->allocator()->AllocateLargeObject(size);
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
back_reference = serializer_->AllocateMap();
back_reference = serializer_->allocator()->AllocateMap();
sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
// This is redundant, but we include it anyways.
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
back_reference = serializer_->Allocate(space, size + fill);
back_reference = serializer_->allocator()->Allocate(space, size + fill);
sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
@ -430,7 +359,8 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
}
int32_t Serializer::ObjectSerializer::SerializeBackingStore(
template <class AllocatorT>
int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
SerializerReference reference =
serializer_->reference_map()->Lookup(backing_store);
@ -441,7 +371,7 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore(
sink_->PutInt(byte_length, "length");
sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
"BackingStore");
reference = serializer_->AllocateOffHeapBackingStore();
reference = serializer_->allocator()->AllocateOffHeapBackingStore();
// Mark this backing store as already serialized.
serializer_->reference_map()->Add(backing_store, reference);
}
@ -453,7 +383,8 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore(
// same backing store does not know anything about it. This fixup step finds
// neutered TypedArrays and clears the values in the FixedTypedArray so that
// we don't try to serialize the now invalid backing store.
void Serializer::ObjectSerializer::FixupIfNeutered() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::FixupIfNeutered() {
JSTypedArray* array = JSTypedArray::cast(object_);
if (!array->WasNeutered()) return;
@ -463,7 +394,8 @@ void Serializer::ObjectSerializer::FixupIfNeutered() {
fta->set_length(0);
}
void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
@ -479,7 +411,8 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeFixedTypedArray() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeFixedTypedArray() {
FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(object_);
void* backing_store = fta->DataPtr();
// We cannot store byte_length larger than Smi range in the snapshot.
@ -498,7 +431,8 @@ void Serializer::ObjectSerializer::SerializeFixedTypedArray() {
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeExternalString() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
if (object_->map() != heap->native_source_string_map()) {
// Usually we cannot recreate resources for external strings. To work
@ -521,7 +455,9 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
}
}
void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
template <class AllocatorT>
void Serializer<
AllocatorT>::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
Isolate* isolate = serializer_->isolate();
@ -604,7 +540,8 @@ class UnlinkWeakNextScope {
DisallowHeapAllocation no_gc_;
};
void Serializer::ObjectSerializer::Serialize() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
object_->ShortPrint();
@ -645,7 +582,8 @@ void Serializer::ObjectSerializer::Serialize() {
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeObject() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
int size = object_->Size();
Map* map = object_->map();
AllocationSpace space =
@ -669,7 +607,8 @@ void Serializer::ObjectSerializer::SerializeObject() {
SerializeContent(map, size);
}
void Serializer::ObjectSerializer::SerializeDeferred() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_->ShortPrint();
@ -694,7 +633,9 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
SerializeContent(map, size);
}
void Serializer::ObjectSerializer::SerializeContent(Map* map, int size) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
int size) {
UnlinkWeakNextScope unlink_weak_next(object_);
if (object_->IsCode()) {
// For code objects, output raw bytes first.
@ -711,8 +652,10 @@ void Serializer::ObjectSerializer::SerializeContent(Map* map, int size) {
}
}
void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
Object** start, Object** end) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(HeapObject* host,
Object** start,
Object** end) {
Object** current = start;
while (current < end) {
while (current < end && (*current)->IsSmi()) current++;
@ -750,8 +693,9 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
}
}
void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code* host,
RelocInfo* rinfo) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitEmbeddedPointer(
Code* host, RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Object* object = rinfo->target_object();
@ -760,8 +704,9 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code* host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
Address* p) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
Foreign* host, Address* p) {
int skip = SkipTo(reinterpret_cast<Address>(p));
Address target = *p;
auto encoded_reference = serializer_->EncodeExternalReference(target);
@ -775,8 +720,9 @@ void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitExternalReference(Code* host,
RelocInfo* rinfo) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
Code* host, RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
Address target = rinfo->target_external_reference();
auto encoded_reference = serializer_->EncodeExternalReference(target);
@ -794,8 +740,9 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code* host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitInternalReference(Code* host,
RelocInfo* rinfo) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
Code* host, RelocInfo* rinfo) {
// We do not use skip from last patched pc to find the pc to patch, since
// target_address_address may not return addresses in ascending order when
// used for internal references. External references may be stored at the
@ -817,8 +764,9 @@ void Serializer::ObjectSerializer::VisitInternalReference(Code* host,
sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
}
void Serializer::ObjectSerializer::VisitRuntimeEntry(Code* host,
RelocInfo* rinfo) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
Code* host, RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Address target = rinfo->target_address();
@ -830,15 +778,17 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(Code* host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeTarget(Code* host,
RelocInfo* rinfo) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitCodeTarget(
Code* host, RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
Address object_start = object_->address();
int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
@ -864,7 +814,8 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
}
}
int Serializer::ObjectSerializer::SkipTo(Address to) {
template <class AllocatorT>
int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
Address object_start = object_->address();
int up_to_offset = static_cast<int>(to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
@ -875,7 +826,8 @@ int Serializer::ObjectSerializer::SkipTo(Address to) {
return to_skip;
}
void Serializer::ObjectSerializer::OutputCode(int size) {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
DCHECK_EQ(kPointerSize, bytes_processed_so_far_);
Code* code = Code::cast(object_);
if (FLAG_predictable) {
@ -910,5 +862,8 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
sink_->PutRaw(start, bytes_to_output, "Code");
}
// Explicit instantiation.
template class Serializer<DefaultSerializerAllocator>;
} // namespace internal
} // namespace v8

View File

@ -10,6 +10,7 @@
#include "src/isolate.h"
#include "src/log.h"
#include "src/objects.h"
#include "src/snapshot/default-serializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
@ -119,25 +120,24 @@ class CodeAddressMap : public CodeEventLogger {
Isolate* isolate_;
};
// There can be only one serializer per V8 process.
template <class AllocatorT = DefaultSerializerAllocator>
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(Isolate* isolate);
~Serializer() override;
void EncodeReservations(std::vector<SerializedData::Reservation>* out) const;
std::vector<SerializedData::Reservation> EncodeReservations() const {
return allocator_.EncodeReservations();
}
void SerializeDeferredObjects();
const std::vector<byte>* Payload() const { return sink_.data(); }
bool ReferenceMapContains(HeapObject* o) {
return reference_map()->Lookup(o).is_valid();
}
Isolate* isolate() const { return isolate_; }
SerializerReferenceMap* reference_map() { return &reference_map_; }
RootIndexMap* root_index_map() { return &root_index_map_; }
#ifdef OBJECT_PRINT
void CountInstanceType(Map* map, int size);
#endif // OBJECT_PRINT
protected:
class ObjectSerializer;
class RecursionScope {
@ -155,6 +155,7 @@ class Serializer : public SerializerDeserializer {
Serializer* serializer_;
};
void SerializeDeferredObjects();
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) = 0;
@ -164,16 +165,13 @@ class Serializer : public SerializerDeserializer {
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
void PutSmi(Smi* smi);
void PutBackReference(HeapObject* object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference,
HowToCode how_to_code, WhereToPoint where_to_point);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject* object);
void PutNextChunk(int space);
// Returns true if the object was successfully serialized as hot object.
bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
@ -202,17 +200,10 @@ class Serializer : public SerializerDeserializer {
}
}
// This will return the space for an object.
SerializerReference AllocateOffHeapBackingStore();
SerializerReference AllocateLargeObject(int size);
SerializerReference AllocateMap();
SerializerReference Allocate(AllocationSpace space, int size);
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
bool HasNotExceededFirstPageOfEachSpace();
// GetInt reads 4 bytes at once, requiring padding at the end.
void Pad();
@ -222,14 +213,6 @@ class Serializer : public SerializerDeserializer {
Code* CopyCode(Code* code);
inline uint32_t max_chunk_size(int space) const {
DCHECK_LE(0, space);
DCHECK_LT(space, kNumberOfSpaces);
return max_chunk_size_[space];
}
const SnapshotByteSink* sink() const { return &sink_; }
void QueueDeferredObject(HeapObject* obj) {
DCHECK(reference_map_.Lookup(obj).is_back_reference());
deferred_objects_.push_back(obj);
@ -237,56 +220,32 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
void CountInstanceType(Map* map, int size);
#endif // OBJECT_PRINT
#ifdef DEBUG
void PushStack(HeapObject* o) { stack_.push_back(o); }
void PopStack() { stack_.pop_back(); }
void PrintStack();
bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
#endif // DEBUG
Isolate* isolate_;
SerializerReferenceMap* reference_map() { return &reference_map_; }
RootIndexMap* root_index_map() { return &root_index_map_; }
AllocatorT* allocator() { return &allocator_; }
SnapshotByteSink sink_;
ExternalReferenceEncoder external_reference_encoder_;
SerializerReferenceMap reference_map_;
RootIndexMap root_index_map_;
int recursion_depth_;
friend class Deserializer;
friend class ObjectSerializer;
friend class RecursionScope;
friend class SnapshotData;
SnapshotByteSink sink_; // Used directly by subclasses.
private:
CodeAddressMap* code_address_map_;
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
// page. So we track the chunk size in pending_chunk_ of a space, but
// when it exceeds a page, we complete the current chunk and start a new one.
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
// Number of maps that we need to allocate.
uint32_t num_maps_;
// We map serialized large objects to indexes for back-referencing.
uint32_t large_objects_total_size_;
uint32_t seen_large_objects_index_;
// Used to keep track of the off-heap backing stores used by TypedArrays/
// ArrayBuffers. Note that the index begins at 1 and not 0, because when a
// TypedArray has an on-heap backing store, the backing_store pointer in the
// corresponding ArrayBuffer will be null, which makes it indistinguishable
// from index 0.
uint32_t seen_backing_stores_index_;
Isolate* isolate_;
SerializerReferenceMap reference_map_;
ExternalReferenceEncoder external_reference_encoder_;
RootIndexMap root_index_map_;
CodeAddressMap* code_address_map_ = nullptr;
std::vector<byte> code_buffer_;
// To handle stack overflow.
std::vector<HeapObject*> deferred_objects_;
std::vector<HeapObject*> deferred_objects_; // To handle stack overflow.
int recursion_depth_ = 0;
AllocatorT allocator_;
#ifdef OBJECT_PRINT
static const int kInstanceTypes = 256;
@ -298,10 +257,13 @@ class Serializer : public SerializerDeserializer {
std::vector<HeapObject*> stack_;
#endif // DEBUG
friend class DefaultSerializerAllocator;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
class Serializer::ObjectSerializer : public ObjectVisitor {
template <class AllocatorT>
class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer, HeapObject* obj,
SnapshotByteSink* sink, HowToCode how_to_code,

View File

@ -269,11 +269,11 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
return Vector<const byte>(context_data, context_length);
}
SnapshotData::SnapshotData(const Serializer* serializer) {
template <class AllocatorT>
SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
DisallowHeapAllocation no_gc;
std::vector<Reservation> reservations;
serializer->EncodeReservations(&reservations);
const std::vector<byte>* payload = serializer->sink()->data();
std::vector<Reservation> reservations = serializer->EncodeReservations();
const std::vector<byte>* payload = serializer->Payload();
// Calculate sizes.
uint32_t reservation_size =
@ -299,6 +299,10 @@ SnapshotData::SnapshotData(const Serializer* serializer) {
static_cast<size_t>(payload->size()));
}
// Explicit instantiation.
template SnapshotData::SnapshotData(
const Serializer<DefaultSerializerAllocator>* serializer);
bool SnapshotData::IsSane() {
return GetHeaderValue(kVersionHashOffset) == Version::Hash();
}

View File

@ -23,7 +23,8 @@ class StartupSerializer;
class SnapshotData : public SerializedData {
public:
// Used when producing.
explicit SnapshotData(const Serializer* serializer);
template <class AllocatorT>
explicit SnapshotData(const Serializer<AllocatorT>* serializer);
// Used when consuming.
explicit SnapshotData(const Vector<const byte> snapshot)

View File

@ -44,7 +44,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
int root_index = root_index_map_.Lookup(obj);
int root_index = root_index_map()->Lookup(obj);
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
if (root_index != RootIndexMap::kInvalidRootIndex) {
@ -58,7 +58,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
if (isolate_->external_reference_redirector() && obj->IsAccessorInfo()) {
if (isolate()->external_reference_redirector() && obj->IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
AccessorInfo* info = AccessorInfo::cast(obj);
Address original_address = Foreign::cast(info->getter())->foreign_address();
@ -66,7 +66,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
accessor_infos_.push_back(info);
} else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
Script::cast(obj)->set_context_data(
isolate_->heap()->uninitialized_symbol());
isolate()->heap()->uninitialized_symbol());
} else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
@ -125,7 +125,7 @@ void StartupSerializer::SerializeStrongReferences() {
serializing_immortal_immovables_roots_ = true;
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
// Check that immortal immovable roots are allocated on the first page.
CHECK(HasNotExceededFirstPageOfEachSpace());
DCHECK(allocator()->HasNotExceededFirstPageOfEachSpace());
serializing_immortal_immovables_roots_ = false;
// Visit the rest of the strong roots.
// Clear the stack limits to make the snapshot reproducible.
@ -185,11 +185,11 @@ void StartupSerializer::CheckRehashability(HeapObject* table) {
// We can only correctly rehash if the four hash tables below are the only
// ones that we deserialize.
if (table->IsUnseededNumberDictionary()) return;
if (table == isolate_->heap()->empty_ordered_hash_table()) return;
if (table == isolate_->heap()->empty_slow_element_dictionary()) return;
if (table == isolate_->heap()->empty_property_dictionary()) return;
if (table == isolate_->heap()->weak_object_to_code_table()) return;
if (table == isolate_->heap()->string_table()) return;
if (table == isolate()->heap()->empty_ordered_hash_table()) return;
if (table == isolate()->heap()->empty_slow_element_dictionary()) return;
if (table == isolate()->heap()->empty_property_dictionary()) return;
if (table == isolate()->heap()->weak_object_to_code_table()) return;
if (table == isolate()->heap()->string_table()) return;
can_be_rehashed_ = false;
}

View File

@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
class StartupSerializer : public Serializer {
class StartupSerializer : public Serializer<> {
public:
StartupSerializer(
Isolate* isolate,

View File

@ -1339,6 +1339,8 @@
'snapshot/builtin-serializer.h',
'snapshot/code-serializer.cc',
'snapshot/code-serializer.h',
'snapshot/default-serializer-allocator.cc',
'snapshot/default-serializer-allocator.h',
'snapshot/deserializer.cc',
'snapshot/deserializer.h',
'snapshot/natives-common.cc',