[snapshot] Refactor deserializer allocations
A continuation of the work in 59e4b751
, this extracts logic around
memory reservation and allocations out of the Deserializer class.
Follow-up work is planned to create a specialized allocator for
builtin deserialization.
Bug: v8:6624
Change-Id: I7081cdc557ab8fb2571aadb816399e136ea2cdbb
Reviewed-on: https://chromium-review.googlesource.com/716036
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48634}
This commit is contained in:
parent
8411f8f939
commit
4450f7ca51
2
BUILD.gn
2
BUILD.gn
@ -1968,6 +1968,8 @@ v8_source_set("v8_base") {
|
||||
"src/snapshot/builtin-serializer.h",
|
||||
"src/snapshot/code-serializer.cc",
|
||||
"src/snapshot/code-serializer.h",
|
||||
"src/snapshot/default-deserializer-allocator.cc",
|
||||
"src/snapshot/default-deserializer-allocator.h",
|
||||
"src/snapshot/default-serializer-allocator.cc",
|
||||
"src/snapshot/default-serializer-allocator.h",
|
||||
"src/snapshot/deserializer.cc",
|
||||
|
@ -38,7 +38,7 @@ BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
|
||||
: Deserializer(data, false) {
|
||||
// We may have to relax this at some point to pack reloc infos and handler
|
||||
// tables into the builtin blob (instead of the partial snapshot cache).
|
||||
DCHECK(ReservesOnlyCodeSpace());
|
||||
DCHECK(allocator()->ReservesOnlyCodeSpace());
|
||||
|
||||
builtin_offsets_ = data->BuiltinOffsets();
|
||||
DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
|
||||
@ -136,7 +136,7 @@ uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
|
||||
}
|
||||
|
||||
Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() {
|
||||
DCHECK(ReservesOnlyCodeSpace());
|
||||
DCHECK(allocator()->ReservesOnlyCodeSpace());
|
||||
|
||||
Heap::Reservation result;
|
||||
|
||||
@ -236,7 +236,8 @@ Address BuiltinDeserializer::Allocate(int space_index, int size) {
|
||||
DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size);
|
||||
Object* obj = isolate()->builtins()->builtin(current_builtin_id_);
|
||||
DCHECK(Internals::HasHeapObjectTag(obj));
|
||||
return HeapObject::cast(obj)->address();
|
||||
HeapObject* heap_obj = HeapObject::cast(obj);
|
||||
return heap_obj->address();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -14,7 +14,7 @@ namespace internal {
|
||||
class BuiltinSnapshotData;
|
||||
|
||||
// Deserializes the builtins blob.
|
||||
class BuiltinDeserializer final : public Deserializer {
|
||||
class BuiltinDeserializer final : public Deserializer<> {
|
||||
public:
|
||||
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
|
||||
|
||||
|
255
src/snapshot/default-deserializer-allocator.cc
Normal file
255
src/snapshot/default-deserializer-allocator.cc
Normal file
@ -0,0 +1,255 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/snapshot/default-deserializer-allocator.h"
|
||||
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/snapshot/builtin-deserializer.h"
|
||||
#include "src/snapshot/deserializer.h"
|
||||
#include "src/snapshot/startup-deserializer.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
DefaultDeserializerAllocator::DefaultDeserializerAllocator(
|
||||
Deserializer<DefaultDeserializerAllocator>* deserializer)
|
||||
: deserializer_(deserializer) {}
|
||||
|
||||
// We know the space requirements before deserialization and can
|
||||
// pre-allocate that reserved space. During deserialization, all we need
|
||||
// to do is to bump up the pointer for each space in the reserved
|
||||
// space. This is also used for fixing back references.
|
||||
// We may have to split up the pre-allocation into several chunks
|
||||
// because it would not fit onto a single page. We do not have to keep
|
||||
// track of when to move to the next chunk. An opcode will signal this.
|
||||
// Since multiple large objects cannot be folded into one large object
|
||||
// space allocation, we have to do an actual allocation when deserializing
|
||||
// each large object. Instead of tracking offset for back references, we
|
||||
// reference large objects by index.
|
||||
Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
|
||||
int size) {
|
||||
if (space == LO_SPACE) {
|
||||
AlwaysAllocateScope scope(isolate());
|
||||
LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
|
||||
// TODO(jgruber): May be cleaner to pass in executability as an argument.
|
||||
Executability exec =
|
||||
static_cast<Executability>(deserializer_->source()->Get());
|
||||
AllocationResult result = lo_space->AllocateRaw(size, exec);
|
||||
HeapObject* obj = result.ToObjectChecked();
|
||||
deserialized_large_objects_.push_back(obj);
|
||||
return obj->address();
|
||||
} else if (space == MAP_SPACE) {
|
||||
DCHECK_EQ(Map::kSize, size);
|
||||
return allocated_maps_[next_map_index_++];
|
||||
} else {
|
||||
DCHECK(space < kNumberOfPreallocatedSpaces);
|
||||
Address address = high_water_[space];
|
||||
DCHECK_NOT_NULL(address);
|
||||
high_water_[space] += size;
|
||||
#ifdef DEBUG
|
||||
// Assert that the current reserved chunk is still big enough.
|
||||
const Heap::Reservation& reservation = reservations_[space];
|
||||
int chunk_index = current_chunk_[space];
|
||||
DCHECK_LE(high_water_[space], reservation[chunk_index].end);
|
||||
#endif
|
||||
if (space == CODE_SPACE) SkipList::Update(address, size);
|
||||
return address;
|
||||
}
|
||||
}
|
||||
|
||||
Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
|
||||
int size) {
|
||||
Address address;
|
||||
HeapObject* obj;
|
||||
|
||||
if (next_alignment_ != kWordAligned) {
|
||||
const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
|
||||
address = AllocateRaw(space, reserved);
|
||||
obj = HeapObject::FromAddress(address);
|
||||
// If one of the following assertions fails, then we are deserializing an
|
||||
// aligned object when the filler maps have not been deserialized yet.
|
||||
// We require filler maps as padding to align the object.
|
||||
Heap* heap = isolate()->heap();
|
||||
DCHECK(heap->free_space_map()->IsMap());
|
||||
DCHECK(heap->one_pointer_filler_map()->IsMap());
|
||||
DCHECK(heap->two_pointer_filler_map()->IsMap());
|
||||
obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
|
||||
address = obj->address();
|
||||
next_alignment_ = kWordAligned;
|
||||
return address;
|
||||
} else {
|
||||
return AllocateRaw(space, size);
|
||||
}
|
||||
}
|
||||
|
||||
void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
|
||||
DCHECK(space < kNumberOfPreallocatedSpaces);
|
||||
uint32_t chunk_index = current_chunk_[space];
|
||||
const Heap::Reservation& reservation = reservations_[space];
|
||||
// Make sure the current chunk is indeed exhausted.
|
||||
CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
|
||||
// Move to next reserved chunk.
|
||||
chunk_index = ++current_chunk_[space];
|
||||
CHECK_LT(chunk_index, reservation.size());
|
||||
high_water_[space] = reservation[chunk_index].start;
|
||||
}
|
||||
|
||||
HeapObject* DefaultDeserializerAllocator::GetMap(uint32_t index) {
|
||||
DCHECK_LT(index, next_map_index_);
|
||||
return HeapObject::FromAddress(allocated_maps_[index]);
|
||||
}
|
||||
|
||||
HeapObject* DefaultDeserializerAllocator::GetLargeObject(uint32_t index) {
|
||||
DCHECK_LT(index, deserialized_large_objects_.size());
|
||||
return deserialized_large_objects_[index];
|
||||
}
|
||||
|
||||
HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
|
||||
uint32_t chunk_index,
|
||||
uint32_t chunk_offset) {
|
||||
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
|
||||
DCHECK_LE(chunk_index, current_chunk_[space]);
|
||||
Address address = reservations_[space][chunk_index].start + chunk_offset;
|
||||
if (next_alignment_ != kWordAligned) {
|
||||
int padding = Heap::GetFillToAlign(address, next_alignment_);
|
||||
next_alignment_ = kWordAligned;
|
||||
DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
|
||||
address += padding;
|
||||
}
|
||||
return HeapObject::FromAddress(address);
|
||||
}
|
||||
|
||||
void DefaultDeserializerAllocator::DecodeReservation(
|
||||
Vector<const SerializedData::Reservation> res) {
|
||||
DCHECK_EQ(0, reservations_[NEW_SPACE].size());
|
||||
STATIC_ASSERT(NEW_SPACE == 0);
|
||||
int current_space = NEW_SPACE;
|
||||
for (auto& r : res) {
|
||||
reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
|
||||
if (r.is_last()) current_space++;
|
||||
}
|
||||
DCHECK_EQ(kNumberOfSpaces, current_space);
|
||||
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
|
||||
}
|
||||
|
||||
bool DefaultDeserializerAllocator::ReserveSpace() {
|
||||
#ifdef DEBUG
|
||||
for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
|
||||
DCHECK(reservations_[i].size() > 0);
|
||||
}
|
||||
#endif // DEBUG
|
||||
DCHECK(allocated_maps_.empty());
|
||||
if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
|
||||
high_water_[i] = reservations_[i][0].start;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool DefaultDeserializerAllocator::ReserveSpace(
|
||||
StartupDeserializer* startup_deserializer,
|
||||
BuiltinDeserializer* builtin_deserializer) {
|
||||
const int first_space = NEW_SPACE;
|
||||
const int last_space = SerializerDeserializer::kNumberOfSpaces;
|
||||
Isolate* isolate = startup_deserializer->isolate();
|
||||
|
||||
// Create a set of merged reservations to reserve space in one go.
|
||||
// The BuiltinDeserializer's reservations are ignored, since our actual
|
||||
// requirements vary based on whether lazy deserialization is enabled.
|
||||
// Instead, we manually determine the required code-space.
|
||||
|
||||
Heap::Reservation merged_reservations[kNumberOfSpaces];
|
||||
for (int i = first_space; i < last_space; i++) {
|
||||
merged_reservations[i] =
|
||||
startup_deserializer->allocator()->reservations_[i];
|
||||
}
|
||||
|
||||
Heap::Reservation builtin_reservations =
|
||||
builtin_deserializer->CreateReservationsForEagerBuiltins();
|
||||
DCHECK(!builtin_reservations.empty());
|
||||
|
||||
for (const auto& c : builtin_reservations) {
|
||||
merged_reservations[CODE_SPACE].push_back(c);
|
||||
}
|
||||
|
||||
if (!isolate->heap()->ReserveSpace(
|
||||
merged_reservations,
|
||||
&startup_deserializer->allocator()->allocated_maps_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DisallowHeapAllocation no_allocation;
|
||||
|
||||
// Distribute the successful allocations between both deserializers.
|
||||
// There's nothing to be done here except for code space.
|
||||
|
||||
{
|
||||
const int num_builtin_reservations =
|
||||
static_cast<int>(builtin_reservations.size());
|
||||
for (int i = num_builtin_reservations - 1; i >= 0; i--) {
|
||||
const auto& c = merged_reservations[CODE_SPACE].back();
|
||||
DCHECK_EQ(c.size, builtin_reservations[i].size);
|
||||
DCHECK_EQ(c.size, c.end - c.start);
|
||||
builtin_reservations[i].start = c.start;
|
||||
builtin_reservations[i].end = c.end;
|
||||
merged_reservations[CODE_SPACE].pop_back();
|
||||
}
|
||||
|
||||
builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
|
||||
}
|
||||
|
||||
// Write back startup reservations.
|
||||
|
||||
for (int i = first_space; i < last_space; i++) {
|
||||
startup_deserializer->allocator()->reservations_[i].swap(
|
||||
merged_reservations[i]);
|
||||
}
|
||||
|
||||
for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
|
||||
startup_deserializer->allocator()->high_water_[i] =
|
||||
startup_deserializer->allocator()->reservations_[i][0].start;
|
||||
builtin_deserializer->allocator()->high_water_[i] = nullptr;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DefaultDeserializerAllocator::ReservesOnlyCodeSpace() const {
|
||||
for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
|
||||
if (space == CODE_SPACE) continue;
|
||||
const auto& r = reservations_[space];
|
||||
for (const Heap::Chunk& c : r)
|
||||
if (c.size != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
|
||||
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
|
||||
const uint32_t chunk_index = current_chunk_[space];
|
||||
if (reservations_[space].size() != chunk_index + 1) {
|
||||
return false;
|
||||
}
|
||||
if (reservations_[space][chunk_index].end != high_water_[space]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return (allocated_maps_.size() == next_map_index_);
|
||||
}
|
||||
|
||||
void DefaultDeserializerAllocator::
|
||||
RegisterDeserializedObjectsForBlackAllocation() {
|
||||
isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
|
||||
reservations_, deserialized_large_objects_, allocated_maps_);
|
||||
}
|
||||
|
||||
Isolate* DefaultDeserializerAllocator::isolate() const {
|
||||
return deserializer_->isolate();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
108
src/snapshot/default-deserializer-allocator.h
Normal file
108
src/snapshot/default-deserializer-allocator.h
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
|
||||
#define V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
|
||||
|
||||
#include "src/globals.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/snapshot/serializer-common.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template <class AllocatorT>
|
||||
class Deserializer;
|
||||
|
||||
class BuiltinDeserializer;
|
||||
class StartupDeserializer;
|
||||
|
||||
class DefaultDeserializerAllocator final {
|
||||
public:
|
||||
DefaultDeserializerAllocator(
|
||||
Deserializer<DefaultDeserializerAllocator>* deserializer);
|
||||
|
||||
// ------- Allocation Methods -------
|
||||
// Methods related to memory allocation during deserialization.
|
||||
|
||||
Address Allocate(AllocationSpace space, int size);
|
||||
|
||||
void MoveToNextChunk(AllocationSpace space);
|
||||
void SetAlignment(AllocationAlignment alignment) {
|
||||
DCHECK_EQ(kWordAligned, next_alignment_);
|
||||
DCHECK_LE(kWordAligned, alignment);
|
||||
DCHECK_LE(alignment, kDoubleUnaligned);
|
||||
next_alignment_ = static_cast<AllocationAlignment>(alignment);
|
||||
}
|
||||
|
||||
HeapObject* GetMap(uint32_t index);
|
||||
HeapObject* GetLargeObject(uint32_t index);
|
||||
HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
|
||||
uint32_t chunk_offset);
|
||||
|
||||
// ------- Reservation Methods -------
|
||||
// Methods related to memory reservations (prior to deserialization).
|
||||
|
||||
void DecodeReservation(Vector<const SerializedData::Reservation> res);
|
||||
bool ReserveSpace();
|
||||
|
||||
// Atomically reserves space for the two given deserializers. Guarantees
|
||||
// reservation for both without garbage collection in-between.
|
||||
static bool ReserveSpace(StartupDeserializer* startup_deserializer,
|
||||
BuiltinDeserializer* builtin_deserializer);
|
||||
|
||||
bool ReservesOnlyCodeSpace() const;
|
||||
bool ReservationsAreFullyUsed() const;
|
||||
|
||||
// ------- Misc Utility Methods -------
|
||||
|
||||
void RegisterDeserializedObjectsForBlackAllocation();
|
||||
|
||||
// For SortMapDescriptors();
|
||||
const std::vector<Address>& GetAllocatedMaps() const {
|
||||
return allocated_maps_;
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate* isolate() const;
|
||||
|
||||
// Raw allocation without considering alignment.
|
||||
Address AllocateRaw(AllocationSpace space, int size);
|
||||
|
||||
private:
|
||||
static constexpr int kNumberOfPreallocatedSpaces =
|
||||
SerializerDeserializer::kNumberOfPreallocatedSpaces;
|
||||
static constexpr int kNumberOfSpaces =
|
||||
SerializerDeserializer::kNumberOfSpaces;
|
||||
|
||||
// The address of the next object that will be allocated in each space.
|
||||
// Each space has a number of chunks reserved by the GC, with each chunk
|
||||
// fitting into a page. Deserialized objects are allocated into the
|
||||
// current chunk of the target space by bumping up high water mark.
|
||||
Heap::Reservation reservations_[kNumberOfSpaces];
|
||||
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
|
||||
Address high_water_[kNumberOfPreallocatedSpaces];
|
||||
|
||||
// The alignment of the next allocation.
|
||||
AllocationAlignment next_alignment_ = kWordAligned;
|
||||
|
||||
// All required maps are pre-allocated during reservation. {next_map_index_}
|
||||
// stores the index of the next map to return from allocation.
|
||||
uint32_t next_map_index_ = 0;
|
||||
std::vector<Address> allocated_maps_;
|
||||
|
||||
// Allocated large objects are kept in this map and may be fetched later as
|
||||
// back-references.
|
||||
std::vector<HeapObject*> deserialized_large_objects_;
|
||||
|
||||
// The current deserializer.
|
||||
Deserializer<DefaultDeserializerAllocator>* const deserializer_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(DefaultDeserializerAllocator)
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
|
@ -4,134 +4,16 @@
|
||||
|
||||
#include "src/snapshot/deserializer.h"
|
||||
|
||||
#include "src/api.h"
|
||||
#include "src/assembler-inl.h"
|
||||
#include "src/bootstrapper.h"
|
||||
#include "src/deoptimizer.h"
|
||||
#include "src/external-reference-table.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/isolate.h"
|
||||
#include "src/macro-assembler.h"
|
||||
#include "src/objects-inl.h"
|
||||
#include "src/snapshot/builtin-deserializer.h"
|
||||
#include "src/objects/string.h"
|
||||
#include "src/snapshot/natives.h"
|
||||
#include "src/snapshot/startup-deserializer.h"
|
||||
#include "src/v8.h"
|
||||
#include "src/v8threads.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void Deserializer::DecodeReservation(
|
||||
Vector<const SerializedData::Reservation> res) {
|
||||
DCHECK_EQ(0, reservations_[NEW_SPACE].size());
|
||||
STATIC_ASSERT(NEW_SPACE == 0);
|
||||
int current_space = NEW_SPACE;
|
||||
for (auto& r : res) {
|
||||
reservations_[current_space].push_back({r.chunk_size(), nullptr, nullptr});
|
||||
if (r.is_last()) current_space++;
|
||||
}
|
||||
DCHECK_EQ(kNumberOfSpaces, current_space);
|
||||
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
|
||||
}
|
||||
|
||||
void Deserializer::RegisterDeserializedObjectsForBlackAllocation() {
|
||||
isolate_->heap()->RegisterDeserializedObjectsForBlackAllocation(
|
||||
reservations_, deserialized_large_objects_, allocated_maps_);
|
||||
}
|
||||
|
||||
bool Deserializer::ReserveSpace() {
|
||||
#ifdef DEBUG
|
||||
for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
|
||||
DCHECK_GT(reservations_[i].size(), 0);
|
||||
}
|
||||
#endif // DEBUG
|
||||
DCHECK(allocated_maps_.empty());
|
||||
if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
|
||||
return false;
|
||||
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
|
||||
high_water_[i] = reservations_[i][0].start;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool Deserializer::ReserveSpace(StartupDeserializer* startup_deserializer,
|
||||
BuiltinDeserializer* builtin_deserializer) {
|
||||
const int first_space = NEW_SPACE;
|
||||
const int last_space = SerializerDeserializer::kNumberOfSpaces;
|
||||
Isolate* isolate = startup_deserializer->isolate();
|
||||
|
||||
// Create a set of merged reservations to reserve space in one go.
|
||||
// The BuiltinDeserializer's reservations are ignored, since our actual
|
||||
// requirements vary based on whether lazy deserialization is enabled.
|
||||
// Instead, we manually determine the required code-space.
|
||||
|
||||
DCHECK(builtin_deserializer->ReservesOnlyCodeSpace());
|
||||
Heap::Reservation merged_reservations[kNumberOfSpaces];
|
||||
for (int i = first_space; i < last_space; i++) {
|
||||
merged_reservations[i] = startup_deserializer->reservations_[i];
|
||||
}
|
||||
|
||||
Heap::Reservation builtin_reservations =
|
||||
builtin_deserializer->CreateReservationsForEagerBuiltins();
|
||||
DCHECK(!builtin_reservations.empty());
|
||||
|
||||
for (const auto& c : builtin_reservations) {
|
||||
merged_reservations[CODE_SPACE].push_back(c);
|
||||
}
|
||||
|
||||
if (!isolate->heap()->ReserveSpace(merged_reservations,
|
||||
&startup_deserializer->allocated_maps_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DisallowHeapAllocation no_allocation;
|
||||
|
||||
// Distribute the successful allocations between both deserializers.
|
||||
// There's nothing to be done here except for code space.
|
||||
|
||||
{
|
||||
const int num_builtin_reservations =
|
||||
static_cast<int>(builtin_reservations.size());
|
||||
for (int i = num_builtin_reservations - 1; i >= 0; i--) {
|
||||
const auto& c = merged_reservations[CODE_SPACE].back();
|
||||
DCHECK_EQ(c.size, builtin_reservations[i].size);
|
||||
DCHECK_EQ(c.size, c.end - c.start);
|
||||
builtin_reservations[i].start = c.start;
|
||||
builtin_reservations[i].end = c.end;
|
||||
merged_reservations[CODE_SPACE].pop_back();
|
||||
}
|
||||
|
||||
builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
|
||||
}
|
||||
|
||||
// Write back startup reservations.
|
||||
|
||||
for (int i = first_space; i < last_space; i++) {
|
||||
startup_deserializer->reservations_[i].swap(merged_reservations[i]);
|
||||
}
|
||||
|
||||
for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
|
||||
startup_deserializer->high_water_[i] =
|
||||
startup_deserializer->reservations_[i][0].start;
|
||||
builtin_deserializer->high_water_[i] = nullptr;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Deserializer::ReservesOnlyCodeSpace() const {
|
||||
for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
|
||||
if (space == CODE_SPACE) continue;
|
||||
const auto& r = reservations_[space];
|
||||
for (const Heap::Chunk& c : r)
|
||||
if (c.size != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Deserializer::Initialize(Isolate* isolate) {
|
||||
template <class AllocatorT>
|
||||
void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
|
||||
DCHECK_NULL(isolate_);
|
||||
DCHECK_NOT_NULL(isolate);
|
||||
isolate_ = isolate;
|
||||
@ -150,56 +32,52 @@ void Deserializer::Initialize(Isolate* isolate) {
|
||||
SerializedData::ComputeMagicNumber(external_reference_table_));
|
||||
}
|
||||
|
||||
void Deserializer::SortMapDescriptors() {
|
||||
for (const auto& address : allocated_maps_) {
|
||||
Map* map = Map::cast(HeapObject::FromAddress(address));
|
||||
if (map->instance_descriptors()->number_of_descriptors() > 1) {
|
||||
map->instance_descriptors()->Sort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool Deserializer::IsLazyDeserializationEnabled() const {
|
||||
template <class AllocatorT>
|
||||
bool Deserializer<AllocatorT>::IsLazyDeserializationEnabled() const {
|
||||
return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
|
||||
}
|
||||
|
||||
Deserializer::~Deserializer() {
|
||||
template <class AllocatorT>
|
||||
Deserializer<AllocatorT>::~Deserializer() {
|
||||
#ifdef DEBUG
|
||||
// Do not perform checks if we aborted deserialization.
|
||||
if (source_.position() == 0) return;
|
||||
// Check that we only have padding bytes remaining.
|
||||
while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
|
||||
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
|
||||
int chunk_index = current_chunk_[space];
|
||||
DCHECK_EQ(reservations_[space].size(), chunk_index + 1);
|
||||
DCHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
|
||||
}
|
||||
DCHECK_EQ(allocated_maps_.size(), next_map_index_);
|
||||
// Check that we've fully used all reserved space.
|
||||
DCHECK(allocator()->ReservationsAreFullyUsed());
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
// This is called on the roots. It is the driver of the deserialization
|
||||
// process. It is also called on the body of each function.
|
||||
void Deserializer::VisitRootPointers(Root root, Object** start, Object** end) {
|
||||
template <class AllocatorT>
|
||||
void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
|
||||
Object** end) {
|
||||
// The space must be new space. Any other space would cause ReadChunk to try
|
||||
// to update the remembered using nullptr as the address.
|
||||
ReadData(start, end, NEW_SPACE, nullptr);
|
||||
}
|
||||
|
||||
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
|
||||
template <class AllocatorT>
|
||||
void Deserializer<AllocatorT>::Synchronize(
|
||||
VisitorSynchronization::SyncTag tag) {
|
||||
static const byte expected = kSynchronize;
|
||||
CHECK_EQ(expected, source_.Get());
|
||||
deserializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
|
||||
}
|
||||
|
||||
void Deserializer::DeserializeDeferredObjects() {
|
||||
template <class AllocatorT>
|
||||
void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
|
||||
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
|
||||
switch (code) {
|
||||
case kAlignmentPrefix:
|
||||
case kAlignmentPrefix + 1:
|
||||
case kAlignmentPrefix + 2:
|
||||
SetAlignment(code);
|
||||
case kAlignmentPrefix + 2: {
|
||||
int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
|
||||
allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
int space = code & kSpaceMask;
|
||||
DCHECK_LE(space, kNumberOfSpaces);
|
||||
@ -241,7 +119,9 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
|
||||
return string->hash_field();
|
||||
}
|
||||
|
||||
HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
|
||||
template <class AllocatorT>
|
||||
HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
|
||||
int space) {
|
||||
if (deserializing_user_code()) {
|
||||
if (obj->IsString()) {
|
||||
String* string = String::cast(obj);
|
||||
@ -334,7 +214,8 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
int Deserializer::MaybeReplaceWithDeserializeLazy(int builtin_id) {
|
||||
template <class AllocatorT>
|
||||
int Deserializer<AllocatorT>::MaybeReplaceWithDeserializeLazy(int builtin_id) {
|
||||
DCHECK(Builtins::IsBuiltinId(builtin_id));
|
||||
return (IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id) &&
|
||||
!deserializing_builtins_)
|
||||
@ -342,66 +223,56 @@ int Deserializer::MaybeReplaceWithDeserializeLazy(int builtin_id) {
|
||||
: builtin_id;
|
||||
}
|
||||
|
||||
HeapObject* Deserializer::GetBackReferencedObject(int space) {
|
||||
template <class AllocatorT>
|
||||
HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
|
||||
HeapObject* obj;
|
||||
SerializerReference back_reference =
|
||||
SerializerReference::FromBitfield(source_.GetInt());
|
||||
if (space == LO_SPACE) {
|
||||
uint32_t index = back_reference.large_object_index();
|
||||
obj = deserialized_large_objects_[index];
|
||||
} else if (space == MAP_SPACE) {
|
||||
int index = back_reference.map_index();
|
||||
DCHECK(index < next_map_index_);
|
||||
obj = HeapObject::FromAddress(allocated_maps_[index]);
|
||||
} else {
|
||||
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
|
||||
uint32_t chunk_index = back_reference.chunk_index();
|
||||
DCHECK_LE(chunk_index, current_chunk_[space]);
|
||||
uint32_t chunk_offset = back_reference.chunk_offset();
|
||||
Address address = reservations_[space][chunk_index].start + chunk_offset;
|
||||
if (next_alignment_ != kWordAligned) {
|
||||
int padding = Heap::GetFillToAlign(address, next_alignment_);
|
||||
next_alignment_ = kWordAligned;
|
||||
DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
|
||||
address += padding;
|
||||
}
|
||||
obj = HeapObject::FromAddress(address);
|
||||
|
||||
switch (space) {
|
||||
case LO_SPACE:
|
||||
obj = allocator()->GetLargeObject(back_reference.large_object_index());
|
||||
break;
|
||||
case MAP_SPACE:
|
||||
obj = allocator()->GetMap(back_reference.map_index());
|
||||
break;
|
||||
default:
|
||||
obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
|
||||
back_reference.chunk_index(),
|
||||
back_reference.chunk_offset());
|
||||
break;
|
||||
}
|
||||
|
||||
if (deserializing_user_code() && obj->IsInternalizedString()) {
|
||||
obj = String::cast(obj)->GetForwardedInternalizedString();
|
||||
}
|
||||
|
||||
hot_objects_.Add(obj);
|
||||
return obj;
|
||||
}
|
||||
|
||||
template <class AllocatorT>
|
||||
void Deserializer<AllocatorT>::SortMapDescriptors() {
|
||||
for (const auto& address : allocator()->GetAllocatedMaps()) {
|
||||
Map* map = Map::cast(HeapObject::FromAddress(address));
|
||||
if (map->instance_descriptors()->number_of_descriptors() > 1) {
|
||||
map->instance_descriptors()->Sort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This routine writes the new object into the pointer provided and then
|
||||
// returns true if the new object was in young space and false otherwise.
|
||||
// The reason for this strange interface is that otherwise the object is
|
||||
// written very late, which means the FreeSpace map is not set up by the
|
||||
// time we need to use it to mark the space at the end of a page free.
|
||||
void Deserializer::ReadObject(int space_number, Object** write_back) {
|
||||
Address address;
|
||||
HeapObject* obj;
|
||||
int size = source_.GetInt() << kObjectAlignmentBits;
|
||||
template <class AllocatorT>
|
||||
void Deserializer<AllocatorT>::ReadObject(int space_number,
|
||||
Object** write_back) {
|
||||
const int size = source_.GetInt() << kObjectAlignmentBits;
|
||||
|
||||
if (next_alignment_ != kWordAligned) {
|
||||
int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
|
||||
address = Allocate(space_number, reserved);
|
||||
obj = HeapObject::FromAddress(address);
|
||||
// If one of the following assertions fails, then we are deserializing an
|
||||
// aligned object when the filler maps have not been deserialized yet.
|
||||
// We require filler maps as padding to align the object.
|
||||
Heap* heap = isolate_->heap();
|
||||
DCHECK(heap->free_space_map()->IsMap());
|
||||
DCHECK(heap->one_pointer_filler_map()->IsMap());
|
||||
DCHECK(heap->two_pointer_filler_map()->IsMap());
|
||||
obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
|
||||
address = obj->address();
|
||||
next_alignment_ = kWordAligned;
|
||||
} else {
|
||||
address = Allocate(space_number, size);
|
||||
obj = HeapObject::FromAddress(address);
|
||||
}
|
||||
Address address = Allocate(space_number, size);
|
||||
HeapObject* obj = HeapObject::FromAddress(address);
|
||||
|
||||
isolate_->heap()->OnAllocationEvent(obj, size);
|
||||
Object** current = reinterpret_cast<Object**>(address);
|
||||
@ -423,46 +294,15 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
// We know the space requirements before deserialization and can
|
||||
// pre-allocate that reserved space. During deserialization, all we need
|
||||
// to do is to bump up the pointer for each space in the reserved
|
||||
// space. This is also used for fixing back references.
|
||||
// We may have to split up the pre-allocation into several chunks
|
||||
// because it would not fit onto a single page. We do not have to keep
|
||||
// track of when to move to the next chunk. An opcode will signal this.
|
||||
// Since multiple large objects cannot be folded into one large object
|
||||
// space allocation, we have to do an actual allocation when deserializing
|
||||
// each large object. Instead of tracking offset for back references, we
|
||||
// reference large objects by index.
|
||||
Address Deserializer::Allocate(int space_index, int size) {
|
||||
if (space_index == LO_SPACE) {
|
||||
AlwaysAllocateScope scope(isolate_);
|
||||
LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
|
||||
Executability exec = static_cast<Executability>(source_.Get());
|
||||
AllocationResult result = lo_space->AllocateRaw(size, exec);
|
||||
HeapObject* obj = result.ToObjectChecked();
|
||||
deserialized_large_objects_.push_back(obj);
|
||||
return obj->address();
|
||||
} else if (space_index == MAP_SPACE) {
|
||||
DCHECK_EQ(Map::kSize, size);
|
||||
return allocated_maps_[next_map_index_++];
|
||||
} else {
|
||||
DCHECK_LT(space_index, kNumberOfPreallocatedSpaces);
|
||||
Address address = high_water_[space_index];
|
||||
DCHECK_NOT_NULL(address);
|
||||
high_water_[space_index] += size;
|
||||
#ifdef DEBUG
|
||||
// Assert that the current reserved chunk is still big enough.
|
||||
const Heap::Reservation& reservation = reservations_[space_index];
|
||||
int chunk_index = current_chunk_[space_index];
|
||||
DCHECK_LE(high_water_[space_index], reservation[chunk_index].end);
|
||||
#endif
|
||||
if (space_index == CODE_SPACE) SkipList::Update(address, size);
|
||||
return address;
|
||||
}
|
||||
template <class AllocatorT>
|
||||
Address Deserializer<AllocatorT>::Allocate(int space_index, int size) {
|
||||
// TODO(jgruber): Remove this indirection once we have a
|
||||
// BuiltinDeserializerAllocator.
|
||||
return allocator()->Allocate(static_cast<AllocationSpace>(space_index), size);
|
||||
}
|
||||
|
||||
Object* Deserializer::ReadDataSingle() {
|
||||
template <class AllocatorT>
|
||||
Object* Deserializer<AllocatorT>::ReadDataSingle() {
|
||||
Object* o;
|
||||
Object** start = &o;
|
||||
Object** end = start + 1;
|
||||
@ -474,8 +314,10 @@ Object* Deserializer::ReadDataSingle() {
|
||||
return o;
|
||||
}
|
||||
|
||||
bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
||||
Address current_object_address) {
|
||||
template <class AllocatorT>
|
||||
bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
|
||||
int source_space,
|
||||
Address current_object_address) {
|
||||
Isolate* const isolate = isolate_;
|
||||
// Write barrier support costs around 1% in startup time. In fact there
|
||||
// are no new space objects in current boot snapshots, so it's not needed,
|
||||
@ -618,15 +460,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
||||
|
||||
case kNextChunk: {
|
||||
int space = source_.Get();
|
||||
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
|
||||
int chunk_index = current_chunk_[space];
|
||||
const Heap::Reservation& reservation = reservations_[space];
|
||||
// Make sure the current chunk is indeed exhausted.
|
||||
CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
|
||||
// Move to next reserved chunk.
|
||||
chunk_index = ++current_chunk_[space];
|
||||
CHECK_LT(chunk_index, reservation.size());
|
||||
high_water_[space] = reservation[chunk_index].start;
|
||||
allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -702,9 +536,11 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
||||
|
||||
case kAlignmentPrefix:
|
||||
case kAlignmentPrefix + 1:
|
||||
case kAlignmentPrefix + 2:
|
||||
SetAlignment(data);
|
||||
case kAlignmentPrefix + 2: {
|
||||
int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
|
||||
allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
|
||||
break;
|
||||
}
|
||||
|
||||
STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
|
||||
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
|
||||
@ -783,10 +619,13 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class AllocatorT>
|
||||
template <int where, int how, int within, int space_number_if_any>
|
||||
Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
|
||||
Address current_object_address, byte data,
|
||||
bool write_barrier_needed) {
|
||||
Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
|
||||
Object** current,
|
||||
Address current_object_address,
|
||||
byte data,
|
||||
bool write_barrier_needed) {
|
||||
bool emit_write_barrier = false;
|
||||
bool current_was_incremented = false;
|
||||
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
|
||||
@ -877,5 +716,8 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
|
||||
return current;
|
||||
}
|
||||
|
||||
// Explicit instantiation.
|
||||
template class Deserializer<DefaultDeserializerAllocator>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -7,14 +7,16 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/objects.h"
|
||||
#include "src/snapshot/default-deserializer-allocator.h"
|
||||
#include "src/snapshot/serializer-common.h"
|
||||
#include "src/snapshot/snapshot-source-sink.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class HeapObject;
|
||||
class Object;
|
||||
|
||||
// Used for platforms with embedded constant pools to trigger deserialization
|
||||
// of objects found in code.
|
||||
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
|
||||
@ -25,21 +27,12 @@ namespace internal {
|
||||
#define V8_CODE_EMBEDS_OBJECT_POINTER 0
|
||||
#endif
|
||||
|
||||
class BuiltinDeserializer;
|
||||
class Heap;
|
||||
class StartupDeserializer;
|
||||
|
||||
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
|
||||
template <class AllocatorT = DefaultDeserializerAllocator>
|
||||
class Deserializer : public SerializerDeserializer {
|
||||
public:
|
||||
~Deserializer() override;
|
||||
|
||||
// Add an object to back an attached reference. The order to add objects must
|
||||
// mirror the order they are added in the serializer.
|
||||
void AddAttachedObject(Handle<HeapObject> attached_object) {
|
||||
attached_objects_.push_back(attached_object);
|
||||
}
|
||||
|
||||
void SetRehashability(bool v) { can_rehash_ = v; }
|
||||
|
||||
protected:
|
||||
@ -49,29 +42,18 @@ class Deserializer : public SerializerDeserializer {
|
||||
: isolate_(nullptr),
|
||||
source_(data->Payload()),
|
||||
magic_number_(data->GetMagicNumber()),
|
||||
next_map_index_(0),
|
||||
external_reference_table_(nullptr),
|
||||
deserialized_large_objects_(0),
|
||||
allocator_(this),
|
||||
deserializing_user_code_(deserializing_user_code),
|
||||
next_alignment_(kWordAligned),
|
||||
can_rehash_(false) {
|
||||
DecodeReservation(data->Reservations());
|
||||
// We start the indicies here at 1, so that we can distinguish between an
|
||||
allocator()->DecodeReservation(data->Reservations());
|
||||
// We start the indices here at 1, so that we can distinguish between an
|
||||
// actual index and a nullptr in a deserialized object requiring fix-up.
|
||||
off_heap_backing_stores_.push_back(nullptr);
|
||||
}
|
||||
|
||||
bool ReserveSpace();
|
||||
|
||||
// Atomically reserves space for the two given deserializers. Guarantees
|
||||
// reservation for both without garbage collection in-between.
|
||||
static bool ReserveSpace(StartupDeserializer* startup_deserializer,
|
||||
BuiltinDeserializer* builtin_deserializer);
|
||||
bool ReservesOnlyCodeSpace() const;
|
||||
|
||||
void Initialize(Isolate* isolate);
|
||||
void DeserializeDeferredObjects();
|
||||
void RegisterDeserializedObjectsForBlackAllocation();
|
||||
|
||||
virtual Address Allocate(int space_index, int size);
|
||||
|
||||
@ -82,6 +64,12 @@ class Deserializer : public SerializerDeserializer {
|
||||
// snapshot by chunk index and offset.
|
||||
HeapObject* GetBackReferencedObject(int space);
|
||||
|
||||
// Add an object to back an attached reference. The order to add objects must
|
||||
// mirror the order they are added in the serializer.
|
||||
void AddAttachedObject(Handle<HeapObject> attached_object) {
|
||||
attached_objects_.push_back(attached_object);
|
||||
}
|
||||
|
||||
// Sort descriptors of deserialized maps using new string hashes.
|
||||
void SortMapDescriptors();
|
||||
|
||||
@ -102,6 +90,8 @@ class Deserializer : public SerializerDeserializer {
|
||||
const std::vector<TransitionArray*>& transition_arrays() const {
|
||||
return transition_arrays_;
|
||||
}
|
||||
|
||||
AllocatorT* allocator() { return &allocator_; }
|
||||
bool deserializing_user_code() const { return deserializing_user_code_; }
|
||||
bool can_rehash() const { return can_rehash_; }
|
||||
|
||||
@ -112,20 +102,10 @@ class Deserializer : public SerializerDeserializer {
|
||||
|
||||
void Synchronize(VisitorSynchronization::SyncTag tag) override;
|
||||
|
||||
void DecodeReservation(Vector<const SerializedData::Reservation> res);
|
||||
|
||||
void UnalignedCopy(Object** dest, Object** src) {
|
||||
memcpy(dest, src, sizeof(*src));
|
||||
}
|
||||
|
||||
void SetAlignment(byte data) {
|
||||
DCHECK_EQ(kWordAligned, next_alignment_);
|
||||
int alignment = data - (kAlignmentPrefix - 1);
|
||||
DCHECK_LE(kWordAligned, alignment);
|
||||
DCHECK_LE(alignment, kDoubleUnaligned);
|
||||
next_alignment_ = static_cast<AllocationAlignment>(alignment);
|
||||
}
|
||||
|
||||
// Fills in some heap data in an area from start to end (non-inclusive). The
|
||||
// space id is used for the write barrier. The object_address is the address
|
||||
// of the object we are writing into, or nullptr if we are not writing into an
|
||||
@ -159,19 +139,8 @@ class Deserializer : public SerializerDeserializer {
|
||||
SnapshotByteSource source_;
|
||||
uint32_t magic_number_;
|
||||
|
||||
// The address of the next object that will be allocated in each space.
|
||||
// Each space has a number of chunks reserved by the GC, with each chunk
|
||||
// fitting into a page. Deserialized objects are allocated into the
|
||||
// current chunk of the target space by bumping up high water mark.
|
||||
Heap::Reservation reservations_[kNumberOfSpaces];
|
||||
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
|
||||
Address high_water_[kNumberOfPreallocatedSpaces];
|
||||
int next_map_index_;
|
||||
std::vector<Address> allocated_maps_;
|
||||
|
||||
ExternalReferenceTable* external_reference_table_;
|
||||
|
||||
std::vector<HeapObject*> deserialized_large_objects_;
|
||||
std::vector<Code*> new_code_objects_;
|
||||
std::vector<AccessorInfo*> accessor_infos_;
|
||||
std::vector<Handle<String>> new_internalized_strings_;
|
||||
@ -179,14 +148,13 @@ class Deserializer : public SerializerDeserializer {
|
||||
std::vector<TransitionArray*> transition_arrays_;
|
||||
std::vector<byte*> off_heap_backing_stores_;
|
||||
|
||||
AllocatorT allocator_;
|
||||
const bool deserializing_user_code_;
|
||||
|
||||
// TODO(jgruber): This workaround will no longer be necessary once builtin
|
||||
// reference patching has been removed (through advance allocation).
|
||||
bool deserializing_builtins_ = false;
|
||||
|
||||
AllocationAlignment next_alignment_;
|
||||
|
||||
// TODO(6593): generalize rehashing, and remove this flag.
|
||||
bool can_rehash_;
|
||||
|
||||
@ -194,6 +162,9 @@ class Deserializer : public SerializerDeserializer {
|
||||
uint32_t num_api_references_;
|
||||
#endif // DEBUG
|
||||
|
||||
// For source(), isolate(), and allocator().
|
||||
friend class DefaultDeserializerAllocator;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Deserializer);
|
||||
};
|
||||
|
||||
|
@ -67,7 +67,7 @@ ObjectDeserializer::DeserializeWasmCompiledModule(
|
||||
|
||||
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
|
||||
Initialize(isolate);
|
||||
if (!ReserveSpace()) return MaybeHandle<HeapObject>();
|
||||
if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
|
||||
|
||||
DCHECK(deserializing_user_code());
|
||||
HandleScope scope(isolate);
|
||||
@ -79,7 +79,7 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
|
||||
DeserializeDeferredObjects();
|
||||
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
|
||||
result = Handle<HeapObject>(HeapObject::cast(root));
|
||||
RegisterDeserializedObjectsForBlackAllocation();
|
||||
allocator()->RegisterDeserializedObjectsForBlackAllocation();
|
||||
}
|
||||
CommitPostProcessedObjects();
|
||||
return scope.CloseAndEscape(result);
|
||||
|
@ -15,7 +15,7 @@ class SharedFunctionInfo;
|
||||
class WasmCompiledModule;
|
||||
|
||||
// Deserializes the object graph rooted at a given object.
|
||||
class ObjectDeserializer final : public Deserializer {
|
||||
class ObjectDeserializer final : public Deserializer<> {
|
||||
public:
|
||||
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
|
||||
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
|
||||
|
@ -30,7 +30,9 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
|
||||
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
|
||||
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
|
||||
Initialize(isolate);
|
||||
if (!ReserveSpace()) V8::FatalProcessOutOfMemory("PartialDeserializer");
|
||||
if (!allocator()->ReserveSpace()) {
|
||||
V8::FatalProcessOutOfMemory("PartialDeserializer");
|
||||
}
|
||||
|
||||
AddAttachedObject(global_proxy);
|
||||
|
||||
@ -44,7 +46,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
|
||||
DeserializeDeferredObjects();
|
||||
DeserializeEmbedderFields(embedder_fields_deserializer);
|
||||
|
||||
RegisterDeserializedObjectsForBlackAllocation();
|
||||
allocator()->RegisterDeserializedObjectsForBlackAllocation();
|
||||
|
||||
// There's no code deserialized here. If this assert fires then that's
|
||||
// changed and logging should be added to notify the profiler et al of the
|
||||
|
@ -15,7 +15,7 @@ class Context;
|
||||
|
||||
// Deserializes the context-dependent object graph rooted at a given object.
|
||||
// The PartialDeserializer is not expected to deserialize any code objects.
|
||||
class PartialDeserializer final : public Deserializer {
|
||||
class PartialDeserializer final : public Deserializer<> {
|
||||
public:
|
||||
static MaybeHandle<Context> DeserializeContext(
|
||||
Isolate* isolate, const SnapshotData* data, bool can_rehash,
|
||||
|
@ -18,7 +18,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
|
||||
|
||||
BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
|
||||
|
||||
if (!Deserializer::ReserveSpace(this, &builtin_deserializer)) {
|
||||
if (!DefaultDeserializerAllocator::ReserveSpace(this,
|
||||
&builtin_deserializer)) {
|
||||
V8::FatalProcessOutOfMemory("StartupDeserializer");
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Initializes an isolate with context-independent data from a given snapshot.
|
||||
class StartupDeserializer final : public Deserializer {
|
||||
class StartupDeserializer final : public Deserializer<> {
|
||||
public:
|
||||
StartupDeserializer(const SnapshotData* startup_data,
|
||||
const BuiltinSnapshotData* builtin_data)
|
||||
|
@ -1345,6 +1345,8 @@
|
||||
'snapshot/builtin-serializer.h',
|
||||
'snapshot/code-serializer.cc',
|
||||
'snapshot/code-serializer.h',
|
||||
'snapshot/default-deserializer-allocator.cc',
|
||||
'snapshot/default-deserializer-allocator.h',
|
||||
'snapshot/default-serializer-allocator.cc',
|
||||
'snapshot/default-serializer-allocator.h',
|
||||
'snapshot/deserializer.cc',
|
||||
|
Loading…
Reference in New Issue
Block a user