Break deserializer reservations into chunks that fit onto a page.
R=mvstanton@chromium.org Review URL: https://codereview.chromium.org/653033002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24639 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
461a2f403e
commit
4f9fd83d85
@ -1177,7 +1177,12 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
|
|||||||
compile_options == ScriptCompiler::kConsumeCodeCache &&
|
compile_options == ScriptCompiler::kConsumeCodeCache &&
|
||||||
!isolate->debug()->is_loaded()) {
|
!isolate->debug()->is_loaded()) {
|
||||||
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
|
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
|
||||||
return CodeSerializer::Deserialize(isolate, *cached_data, source);
|
Handle<SharedFunctionInfo> result;
|
||||||
|
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
|
||||||
|
.ToHandle(&result)) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
// Deserializer failed. Fall through to compile.
|
||||||
} else {
|
} else {
|
||||||
maybe_result = compilation_cache->LookupScript(
|
maybe_result = compilation_cache->LookupScript(
|
||||||
source, script_name, line_offset, column_offset,
|
source, script_name, line_offset, column_offset,
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "src/natives.h"
|
#include "src/natives.h"
|
||||||
#include "src/runtime-profiler.h"
|
#include "src/runtime-profiler.h"
|
||||||
#include "src/scopeinfo.h"
|
#include "src/scopeinfo.h"
|
||||||
|
#include "src/serialize.h"
|
||||||
#include "src/snapshot.h"
|
#include "src/snapshot.h"
|
||||||
#include "src/utils.h"
|
#include "src/utils.h"
|
||||||
#include "src/v8threads.h"
|
#include "src/v8threads.h"
|
||||||
@ -919,33 +920,41 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Heap::ReserveSpace(int* sizes, Address* locations_out) {
|
bool Heap::ReserveSpace(Reservation* reservations) {
|
||||||
bool gc_performed = true;
|
bool gc_performed = true;
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
static const int kThreshold = 20;
|
static const int kThreshold = 20;
|
||||||
while (gc_performed && counter++ < kThreshold) {
|
while (gc_performed && counter++ < kThreshold) {
|
||||||
gc_performed = false;
|
gc_performed = false;
|
||||||
for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
|
for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
|
||||||
if (sizes[space] == 0) continue;
|
Reservation* reservation = &reservations[space];
|
||||||
|
DCHECK_LE(1, reservation->length());
|
||||||
|
if (reservation->at(0).size == 0) continue;
|
||||||
bool perform_gc = false;
|
bool perform_gc = false;
|
||||||
if (space == LO_SPACE) {
|
if (space == LO_SPACE) {
|
||||||
perform_gc = !lo_space()->CanAllocateSize(sizes[space]);
|
DCHECK_EQ(1, reservation->length());
|
||||||
|
perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
|
||||||
} else {
|
} else {
|
||||||
AllocationResult allocation;
|
for (auto& chunk : *reservation) {
|
||||||
if (space == NEW_SPACE) {
|
AllocationResult allocation;
|
||||||
allocation = new_space()->AllocateRaw(sizes[space]);
|
int size = chunk.size;
|
||||||
} else {
|
if (space == NEW_SPACE) {
|
||||||
allocation = paged_space(space)->AllocateRaw(sizes[space]);
|
allocation = new_space()->AllocateRaw(size);
|
||||||
}
|
} else {
|
||||||
FreeListNode* node;
|
allocation = paged_space(space)->AllocateRaw(size);
|
||||||
if (allocation.To(&node)) {
|
}
|
||||||
// Mark with a free list node, in case we have a GC before
|
FreeListNode* node;
|
||||||
// deserializing.
|
if (allocation.To(&node)) {
|
||||||
node->set_size(this, sizes[space]);
|
// Mark with a free list node, in case we have a GC before
|
||||||
DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
|
// deserializing.
|
||||||
locations_out[space] = node->address();
|
node->set_size(this, size);
|
||||||
} else {
|
DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
|
||||||
perform_gc = true;
|
chunk.start = node->address();
|
||||||
|
chunk.end = node->address() + size;
|
||||||
|
} else {
|
||||||
|
perform_gc = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (perform_gc) {
|
if (perform_gc) {
|
||||||
@ -963,10 +972,7 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gc_performed) {
|
return !gc_performed;
|
||||||
// Failed to reserve the space after several attempts.
|
|
||||||
V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1009,7 +1009,16 @@ class Heap {
|
|||||||
|
|
||||||
// Support for partial snapshots. After calling this we have a linear
|
// Support for partial snapshots. After calling this we have a linear
|
||||||
// space to write objects in each space.
|
// space to write objects in each space.
|
||||||
void ReserveSpace(int* sizes, Address* addresses);
|
struct Chunk {
|
||||||
|
uint32_t size;
|
||||||
|
Address start;
|
||||||
|
Address end;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef List<Chunk> Reservation;
|
||||||
|
|
||||||
|
// Returns false if not able to reserve.
|
||||||
|
bool ReserveSpace(Reservation* reservations);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Support for the API.
|
// Support for the API.
|
||||||
|
@ -80,7 +80,9 @@ class List {
|
|||||||
|
|
||||||
Vector<T> ToVector() const { return Vector<T>(data_, length_); }
|
Vector<T> ToVector() const { return Vector<T>(data_, length_); }
|
||||||
|
|
||||||
Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
|
Vector<const T> ToConstVector() const {
|
||||||
|
return Vector<const T>(data_, length_);
|
||||||
|
}
|
||||||
|
|
||||||
// Adds a copy of the given 'element' to the end of the list,
|
// Adds a copy of the given 'element' to the end of the list,
|
||||||
// expanding the list if necessary.
|
// expanding the list if necessary.
|
||||||
|
@ -91,14 +91,25 @@ class SnapshotWriter {
|
|||||||
|
|
||||||
i::byte* snapshot_bytes = snapshot_data.begin();
|
i::byte* snapshot_bytes = snapshot_data.begin();
|
||||||
sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
|
sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
|
||||||
for (size_t i = 0; i < arraysize(spaces); ++i)
|
for (size_t i = 0; i < arraysize(spaces); ++i) {
|
||||||
sink.PutInt(serializer.CurrentAllocationAddress(spaces[i]), "spaces");
|
i::Vector<const uint32_t> chunks =
|
||||||
|
serializer.FinalAllocationChunks(spaces[i]);
|
||||||
|
// For the start-up snapshot, none of the reservations has more than
|
||||||
|
// one chunk (reservation for each space fits onto a single page).
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
sink.PutInt(chunks[0], "spaces");
|
||||||
|
}
|
||||||
|
|
||||||
i::byte* context_bytes = context_snapshot_data.begin();
|
i::byte* context_bytes = context_snapshot_data.begin();
|
||||||
sink.PutBlob(context_bytes, context_snapshot_data.length(), "context");
|
sink.PutBlob(context_bytes, context_snapshot_data.length(), "context");
|
||||||
for (size_t i = 0; i < arraysize(spaces); ++i)
|
for (size_t i = 0; i < arraysize(spaces); ++i) {
|
||||||
sink.PutInt(context_serializer.CurrentAllocationAddress(spaces[i]),
|
i::Vector<const uint32_t> chunks =
|
||||||
"spaces");
|
context_serializer.FinalAllocationChunks(spaces[i]);
|
||||||
|
// For the context snapshot, none of the reservations has more than
|
||||||
|
// one chunk (reservation for each space fits onto a single page).
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
sink.PutInt(chunks[0], "spaces");
|
||||||
|
}
|
||||||
|
|
||||||
size_t written = fwrite(startup_blob.begin(), 1, startup_blob.length(),
|
size_t written = fwrite(startup_blob.begin(), 1, startup_blob.length(),
|
||||||
startup_blob_file_);
|
startup_blob_file_);
|
||||||
@ -203,8 +214,12 @@ class SnapshotWriter {
|
|||||||
|
|
||||||
void WriteSizeVar(const i::Serializer& ser, const char* prefix,
|
void WriteSizeVar(const i::Serializer& ser, const char* prefix,
|
||||||
const char* name, int space) const {
|
const char* name, int space) const {
|
||||||
fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n",
|
i::Vector<const uint32_t> chunks = ser.FinalAllocationChunks(space);
|
||||||
prefix, name, ser.CurrentAllocationAddress(space));
|
// For the start-up snapshot, none of the reservations has more than
|
||||||
|
// one chunk (total reservation fits into a single page).
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n", prefix, name,
|
||||||
|
chunks[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteSnapshotData(const i::List<i::byte>* data) const {
|
void WriteSnapshotData(const i::List<i::byte>* data) const {
|
||||||
@ -416,6 +431,9 @@ int main(int argc, char** argv) {
|
|||||||
context_ser.Serialize(&raw_context);
|
context_ser.Serialize(&raw_context);
|
||||||
ser.SerializeWeakReferences();
|
ser.SerializeWeakReferences();
|
||||||
|
|
||||||
|
context_ser.FinalizeAllocation();
|
||||||
|
ser.FinalizeAllocation();
|
||||||
|
|
||||||
{
|
{
|
||||||
SnapshotWriter writer(argv[1]);
|
SnapshotWriter writer(argv[1]);
|
||||||
if (i::FLAG_raw_file && i::FLAG_raw_context_file)
|
if (i::FLAG_raw_file && i::FLAG_raw_context_file)
|
||||||
|
173
src/serialize.cc
173
src/serialize.cc
@ -598,9 +598,7 @@ Deserializer::Deserializer(SnapshotByteSource* source)
|
|||||||
source_(source),
|
source_(source),
|
||||||
external_reference_decoder_(NULL),
|
external_reference_decoder_(NULL),
|
||||||
deserialized_large_objects_(0) {
|
deserialized_large_objects_(0) {
|
||||||
for (int i = 0; i < kNumberOfSpaces; i++) {
|
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
|
||||||
reservations_[i] = kUninitializedReservation;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -613,10 +611,19 @@ void Deserializer::FlushICacheForNewCodeObjects() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Deserializer::ReserveSpace() {
|
||||||
|
if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
|
||||||
|
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
|
||||||
|
high_water_[i] = reservations_[i][0].start;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Deserializer::Deserialize(Isolate* isolate) {
|
void Deserializer::Deserialize(Isolate* isolate) {
|
||||||
isolate_ = isolate;
|
isolate_ = isolate;
|
||||||
DCHECK(isolate_ != NULL);
|
DCHECK(isolate_ != NULL);
|
||||||
isolate_->heap()->ReserveSpace(reservations_, high_water_);
|
if (!ReserveSpace()) FatalProcessOutOfMemory("deserializing context");
|
||||||
// No active threads.
|
// No active threads.
|
||||||
DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
|
DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
|
||||||
// No active handles.
|
// No active handles.
|
||||||
@ -658,13 +665,17 @@ void Deserializer::Deserialize(Isolate* isolate) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
|
void Deserializer::DeserializePartial(Isolate* isolate, Object** root,
|
||||||
|
OnOOM on_oom) {
|
||||||
isolate_ = isolate;
|
isolate_ = isolate;
|
||||||
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
|
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
|
||||||
DCHECK(reservations_[i] != kUninitializedReservation);
|
DCHECK(reservations_[i].length() > 0);
|
||||||
|
}
|
||||||
|
if (!ReserveSpace()) {
|
||||||
|
if (on_oom == FATAL_ON_OOM) FatalProcessOutOfMemory("deserialize context");
|
||||||
|
*root = NULL;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
Heap* heap = isolate->heap();
|
|
||||||
heap->ReserveSpace(reservations_, high_water_);
|
|
||||||
if (external_reference_decoder_ == NULL) {
|
if (external_reference_decoder_ == NULL) {
|
||||||
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
|
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
|
||||||
}
|
}
|
||||||
@ -700,7 +711,7 @@ Deserializer::~Deserializer() {
|
|||||||
void Deserializer::VisitPointers(Object** start, Object** end) {
|
void Deserializer::VisitPointers(Object** start, Object** end) {
|
||||||
// The space must be new space. Any other space would cause ReadChunk to try
|
// The space must be new space. Any other space would cause ReadChunk to try
|
||||||
// to update the remembered using NULL as the address.
|
// to update the remembered using NULL as the address.
|
||||||
ReadChunk(start, end, NEW_SPACE, NULL);
|
ReadData(start, end, NEW_SPACE, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -788,7 +799,7 @@ void Deserializer::ReadObject(int space_number,
|
|||||||
if (FLAG_log_snapshot_positions) {
|
if (FLAG_log_snapshot_positions) {
|
||||||
LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
|
LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
|
||||||
}
|
}
|
||||||
ReadChunk(current, limit, space_number, address);
|
ReadData(current, limit, space_number, address);
|
||||||
|
|
||||||
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
|
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
|
||||||
// as a (weak) root. If this root is relocated correctly,
|
// as a (weak) root. If this root is relocated correctly,
|
||||||
@ -813,6 +824,9 @@ void Deserializer::ReadObject(int space_number,
|
|||||||
// pre-allocate that reserved space. During deserialization, all we need
|
// pre-allocate that reserved space. During deserialization, all we need
|
||||||
// to do is to bump up the pointer for each space in the reserved
|
// to do is to bump up the pointer for each space in the reserved
|
||||||
// space. This is also used for fixing back references.
|
// space. This is also used for fixing back references.
|
||||||
|
// We may have to split up the pre-allocation into several chunks
|
||||||
|
// because it would not fit onto a single page, we have to keep track
|
||||||
|
// of when to move to the next chunk.
|
||||||
// Since multiple large objects cannot be folded into one large object
|
// Since multiple large objects cannot be folded into one large object
|
||||||
// space allocation, we have to do an actual allocation when deserializing
|
// space allocation, we have to do an actual allocation when deserializing
|
||||||
// each large object. Instead of tracking offset for back references, we
|
// each large object. Instead of tracking offset for back references, we
|
||||||
@ -821,7 +835,7 @@ Address Deserializer::Allocate(int space_index, int size) {
|
|||||||
if (space_index == LO_SPACE) {
|
if (space_index == LO_SPACE) {
|
||||||
AlwaysAllocateScope scope(isolate_);
|
AlwaysAllocateScope scope(isolate_);
|
||||||
LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
|
LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
|
||||||
Executability exec = static_cast<Executability>(source_->GetInt());
|
Executability exec = static_cast<Executability>(source_->Get());
|
||||||
AllocationResult result = lo_space->AllocateRaw(size, exec);
|
AllocationResult result = lo_space->AllocateRaw(size, exec);
|
||||||
HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
|
HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
|
||||||
deserialized_large_objects_.Add(obj);
|
deserialized_large_objects_.Add(obj);
|
||||||
@ -829,16 +843,28 @@ Address Deserializer::Allocate(int space_index, int size) {
|
|||||||
} else {
|
} else {
|
||||||
DCHECK(space_index < kNumberOfPreallocatedSpaces);
|
DCHECK(space_index < kNumberOfPreallocatedSpaces);
|
||||||
Address address = high_water_[space_index];
|
Address address = high_water_[space_index];
|
||||||
|
DCHECK_NE(NULL, address);
|
||||||
|
const Heap::Reservation& reservation = reservations_[space_index];
|
||||||
|
int chunk_index = current_chunk_[space_index];
|
||||||
|
if (address + size > reservation[chunk_index].end) {
|
||||||
|
// The last chunk size matches exactly the already deserialized data.
|
||||||
|
DCHECK_EQ(address, reservation[chunk_index].end);
|
||||||
|
// Move to next reserved chunk.
|
||||||
|
chunk_index = ++current_chunk_[space_index];
|
||||||
|
DCHECK_LT(chunk_index, reservation.length());
|
||||||
|
// Prepare for next allocation in the next chunk.
|
||||||
|
address = reservation[chunk_index].start;
|
||||||
|
} else {
|
||||||
|
high_water_[space_index] = address + size;
|
||||||
|
}
|
||||||
high_water_[space_index] = address + size;
|
high_water_[space_index] = address + size;
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Deserializer::ReadChunk(Object** current,
|
void Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
||||||
Object** limit,
|
Address current_object_address) {
|
||||||
int source_space,
|
|
||||||
Address current_object_address) {
|
|
||||||
Isolate* const isolate = isolate_;
|
Isolate* const isolate = isolate_;
|
||||||
// Write barrier support costs around 1% in startup time. In fact there
|
// Write barrier support costs around 1% in startup time. In fact there
|
||||||
// are no new space objects in current boot snapshots, so it's not needed,
|
// are no new space objects in current boot snapshots, so it's not needed,
|
||||||
@ -890,7 +916,7 @@ void Deserializer::ReadChunk(Object** current,
|
|||||||
new_object = reinterpret_cast<Object*>(address); \
|
new_object = reinterpret_cast<Object*>(address); \
|
||||||
} else if (where == kBackref) { \
|
} else if (where == kBackref) { \
|
||||||
emit_write_barrier = (space_number == NEW_SPACE); \
|
emit_write_barrier = (space_number == NEW_SPACE); \
|
||||||
new_object = GetAddressFromEnd(data & kSpaceMask); \
|
new_object = GetBackReferencedObject(data & kSpaceMask); \
|
||||||
if (deserializing_user_code()) { \
|
if (deserializing_user_code()) { \
|
||||||
new_object = ProcessBackRefInSerializedCode(new_object); \
|
new_object = ProcessBackRefInSerializedCode(new_object); \
|
||||||
} \
|
} \
|
||||||
@ -913,7 +939,7 @@ void Deserializer::ReadChunk(Object** current,
|
|||||||
current = reinterpret_cast<Object**>( \
|
current = reinterpret_cast<Object**>( \
|
||||||
reinterpret_cast<Address>(current) + skip); \
|
reinterpret_cast<Address>(current) + skip); \
|
||||||
emit_write_barrier = (space_number == NEW_SPACE); \
|
emit_write_barrier = (space_number == NEW_SPACE); \
|
||||||
new_object = GetAddressFromEnd(data & kSpaceMask); \
|
new_object = GetBackReferencedObject(data & kSpaceMask); \
|
||||||
if (deserializing_user_code()) { \
|
if (deserializing_user_code()) { \
|
||||||
new_object = ProcessBackRefInSerializedCode(new_object); \
|
new_object = ProcessBackRefInSerializedCode(new_object); \
|
||||||
} \
|
} \
|
||||||
@ -1221,7 +1247,7 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
|
|||||||
seen_large_objects_index_(0) {
|
seen_large_objects_index_(0) {
|
||||||
// The serializer is meant to be used only to generate initial heap images
|
// The serializer is meant to be used only to generate initial heap images
|
||||||
// from a context in which there is only one isolate.
|
// from a context in which there is only one isolate.
|
||||||
for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0;
|
for (int i = 0; i < kNumberOfSpaces; i++) pending_chunk_[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1283,6 +1309,19 @@ void Serializer::VisitPointers(Object** start, Object** end) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Serializer::FinalizeAllocation() {
|
||||||
|
DCHECK_EQ(0, completed_chunks_[LO_SPACE].length()); // Not yet finalized.
|
||||||
|
for (int i = 0; i < kNumberOfSpaces; i++) {
|
||||||
|
// Complete the last pending chunk and if there are no completed chunks,
|
||||||
|
// make sure there is at least one empty chunk.
|
||||||
|
if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
|
||||||
|
completed_chunks_[i].Add(pending_chunk_[i]);
|
||||||
|
pending_chunk_[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// This ensures that the partial snapshot cache keeps things alive during GC and
|
// This ensures that the partial snapshot cache keeps things alive during GC and
|
||||||
// tracks their movement. When it is called during serialization of the startup
|
// tracks their movement. When it is called during serialization of the startup
|
||||||
// snapshot nothing happens. When the partial (context) snapshot is created,
|
// snapshot nothing happens. When the partial (context) snapshot is created,
|
||||||
@ -1369,11 +1408,10 @@ void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
|
|||||||
int index = address_mapper_.MappedTo(heap_object);
|
int index = address_mapper_.MappedTo(heap_object);
|
||||||
sink_->PutInt(index, "large object index");
|
sink_->PutInt(index, "large object index");
|
||||||
} else {
|
} else {
|
||||||
int address = address_mapper_.MappedTo(heap_object);
|
uint32_t existing_allocation = address_mapper_.MappedTo(heap_object);
|
||||||
int offset = CurrentAllocationAddress(space) - address;
|
|
||||||
// Shift out the bits that are always 0.
|
// Shift out the bits that are always 0.
|
||||||
offset >>= kObjectAlignmentBits;
|
existing_allocation >>= kObjectAlignmentBits;
|
||||||
sink_->PutInt(offset, "offset");
|
sink_->PutInt(existing_allocation, "allocation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1533,15 +1571,15 @@ void Serializer::ObjectSerializer::SerializePrologue(int space, int size,
|
|||||||
// Mark this object as already serialized.
|
// Mark this object as already serialized.
|
||||||
if (space == LO_SPACE) {
|
if (space == LO_SPACE) {
|
||||||
if (object_->IsCode()) {
|
if (object_->IsCode()) {
|
||||||
sink_->PutInt(EXECUTABLE, "executable large object");
|
sink_->Put(EXECUTABLE, "executable large object");
|
||||||
} else {
|
} else {
|
||||||
sink_->PutInt(NOT_EXECUTABLE, "not executable large object");
|
sink_->Put(NOT_EXECUTABLE, "not executable large object");
|
||||||
}
|
}
|
||||||
int index = serializer_->AllocateLargeObject(size);
|
int index = serializer_->AllocateLargeObject(size);
|
||||||
serializer_->address_mapper()->AddMapping(object_, index);
|
serializer_->address_mapper()->AddMapping(object_, index);
|
||||||
} else {
|
} else {
|
||||||
int offset = serializer_->Allocate(space, size);
|
int allocation = serializer_->Allocate(space, size);
|
||||||
serializer_->address_mapper()->AddMapping(object_, offset);
|
serializer_->address_mapper()->AddMapping(object_, allocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize the map (first word of the object).
|
// Serialize the map (first word of the object).
|
||||||
@ -1867,17 +1905,32 @@ int Serializer::SpaceOfObject(HeapObject* object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Serializer::AllocateLargeObject(int size) {
|
uint32_t Serializer::AllocateLargeObject(int size) {
|
||||||
fullness_[LO_SPACE] += size;
|
// Large objects are allocated one-by-one when deserializing. We do not
|
||||||
|
// have to keep track of multiple chunks.
|
||||||
|
pending_chunk_[LO_SPACE] += size;
|
||||||
return seen_large_objects_index_++;
|
return seen_large_objects_index_++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Serializer::Allocate(int space, int size) {
|
uint32_t Serializer::Allocate(int space, int size) {
|
||||||
CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
|
CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
|
||||||
int allocation_address = fullness_[space];
|
DCHECK(size > 0 && size < Page::kMaxRegularHeapObjectSize);
|
||||||
fullness_[space] = allocation_address + size;
|
uint32_t new_chunk_size = pending_chunk_[space] + size;
|
||||||
return allocation_address;
|
uint32_t allocation;
|
||||||
|
if (new_chunk_size > Page::kMaxRegularHeapObjectSize) {
|
||||||
|
// The new chunk size would not fit onto a single page. Complete the
|
||||||
|
// current chunk and start a new one.
|
||||||
|
completed_chunks_[space].Add(pending_chunk_[space]);
|
||||||
|
pending_chunk_[space] = 0;
|
||||||
|
new_chunk_size = size;
|
||||||
|
}
|
||||||
|
// For back-referencing, each allocation is encoded as a combination
|
||||||
|
// of chunk index and offset inside the chunk.
|
||||||
|
allocation = ChunkIndexBits::encode(completed_chunks_[space].length()) |
|
||||||
|
OffsetBits::encode(pending_chunk_[space]);
|
||||||
|
pending_chunk_[space] = new_chunk_size;
|
||||||
|
return allocation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1923,6 +1976,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
|
|||||||
Object** location = Handle<Object>::cast(info).location();
|
Object** location = Handle<Object>::cast(info).location();
|
||||||
cs.VisitPointer(location);
|
cs.VisitPointer(location);
|
||||||
cs.Pad();
|
cs.Pad();
|
||||||
|
cs.FinalizeAllocation();
|
||||||
|
|
||||||
SerializedCodeData data(&payload, &cs);
|
SerializedCodeData data(&payload, &cs);
|
||||||
ScriptData* script_data = data.GetScriptData();
|
ScriptData* script_data = data.GetScriptData();
|
||||||
@ -2093,9 +2147,8 @@ void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
|
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
|
||||||
ScriptData* data,
|
Isolate* isolate, ScriptData* data, Handle<String> source) {
|
||||||
Handle<String> source) {
|
|
||||||
base::ElapsedTimer timer;
|
base::ElapsedTimer timer;
|
||||||
if (FLAG_profile_deserialization) timer.Start();
|
if (FLAG_profile_deserialization) timer.Start();
|
||||||
|
|
||||||
@ -2107,10 +2160,15 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
|
|||||||
SerializedCodeData scd(data, *source);
|
SerializedCodeData scd(data, *source);
|
||||||
SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
|
SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
|
||||||
Deserializer deserializer(&payload);
|
Deserializer deserializer(&payload);
|
||||||
|
|
||||||
STATIC_ASSERT(NEW_SPACE == 0);
|
STATIC_ASSERT(NEW_SPACE == 0);
|
||||||
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
|
int current_space = NEW_SPACE;
|
||||||
deserializer.set_reservation(i, scd.GetReservation(i));
|
Vector<const SerializedCodeData::Reservation> res = scd.Reservations();
|
||||||
|
for (const auto& r : res) {
|
||||||
|
deserializer.AddReservation(current_space, r.chunk_size());
|
||||||
|
if (r.is_last_chunk()) current_space++;
|
||||||
}
|
}
|
||||||
|
DCHECK_EQ(kNumberOfSpaces, current_space);
|
||||||
|
|
||||||
// Prepare and register list of attached objects.
|
// Prepare and register list of attached objects.
|
||||||
Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
|
Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
|
||||||
@ -2124,7 +2182,12 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
|
|||||||
deserializer.SetAttachedObjects(&attached_objects);
|
deserializer.SetAttachedObjects(&attached_objects);
|
||||||
|
|
||||||
// Deserialize.
|
// Deserialize.
|
||||||
deserializer.DeserializePartial(isolate, &root);
|
deserializer.DeserializePartial(isolate, &root, Deserializer::NULL_ON_OOM);
|
||||||
|
if (root == NULL) {
|
||||||
|
// Deserializing may fail if the reservations cannot be fulfilled.
|
||||||
|
if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
|
||||||
|
return MaybeHandle<SharedFunctionInfo>();
|
||||||
|
}
|
||||||
deserializer.FlushICacheForNewCodeObjects();
|
deserializer.FlushICacheForNewCodeObjects();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2144,10 +2207,25 @@ SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
|
|||||||
DisallowHeapAllocation no_gc;
|
DisallowHeapAllocation no_gc;
|
||||||
List<uint32_t>* stub_keys = cs->stub_keys();
|
List<uint32_t>* stub_keys = cs->stub_keys();
|
||||||
|
|
||||||
|
// Gather reservation chunk sizes.
|
||||||
|
List<uint32_t> reservations(SerializerDeserializer::kNumberOfSpaces);
|
||||||
|
STATIC_ASSERT(NEW_SPACE == 0);
|
||||||
|
for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
|
||||||
|
Vector<const uint32_t> chunks = cs->FinalAllocationChunks(i);
|
||||||
|
for (int j = 0; j < chunks.length(); j++) {
|
||||||
|
DCHECK(i == LO_SPACE || chunks[j] < Page::kMaxRegularHeapObjectSize);
|
||||||
|
uint32_t chunk = ChunkSizeBits::encode(chunks[j]) |
|
||||||
|
IsLastChunkBits::encode(j == chunks.length() - 1);
|
||||||
|
reservations.Add(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Calculate sizes.
|
// Calculate sizes.
|
||||||
|
int reservation_size = reservations.length() * kInt32Size;
|
||||||
int num_stub_keys = stub_keys->length();
|
int num_stub_keys = stub_keys->length();
|
||||||
int stub_keys_size = stub_keys->length() * kInt32Size;
|
int stub_keys_size = stub_keys->length() * kInt32Size;
|
||||||
int data_length = kHeaderSize + stub_keys_size + payload->length();
|
int data_length =
|
||||||
|
kHeaderSize + reservation_size + stub_keys_size + payload->length();
|
||||||
|
|
||||||
// Allocate backing store and create result data.
|
// Allocate backing store and create result data.
|
||||||
byte* data = NewArray<byte>(data_length);
|
byte* data = NewArray<byte>(data_length);
|
||||||
@ -2157,20 +2235,21 @@ SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
|
|||||||
|
|
||||||
// Set header values.
|
// Set header values.
|
||||||
SetHeaderValue(kCheckSumOffset, CheckSum(cs->source()));
|
SetHeaderValue(kCheckSumOffset, CheckSum(cs->source()));
|
||||||
|
SetHeaderValue(kReservationsOffset, reservations.length());
|
||||||
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
|
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
|
||||||
SetHeaderValue(kPayloadLengthOffset, payload->length());
|
SetHeaderValue(kPayloadLengthOffset, payload->length());
|
||||||
STATIC_ASSERT(NEW_SPACE == 0);
|
|
||||||
for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
|
// Copy reservation chunk sizes.
|
||||||
SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
|
CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
|
||||||
}
|
reservation_size);
|
||||||
|
|
||||||
// Copy code stub keys.
|
// Copy code stub keys.
|
||||||
CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(stub_keys->begin()),
|
CopyBytes(data + kHeaderSize + reservation_size,
|
||||||
stub_keys_size);
|
reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
|
||||||
|
|
||||||
// Copy serialized data.
|
// Copy serialized data.
|
||||||
CopyBytes(data + kHeaderSize + stub_keys_size, payload->begin(),
|
CopyBytes(data + kHeaderSize + reservation_size + stub_keys_size,
|
||||||
static_cast<size_t>(payload->length()));
|
payload->begin(), static_cast<size_t>(payload->length()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
138
src/serialize.h
138
src/serialize.h
@ -152,6 +152,11 @@ class SerializerDeserializer: public ObjectVisitor {
|
|||||||
static const int kNumberOfPreallocatedSpaces = LO_SPACE;
|
static const int kNumberOfPreallocatedSpaces = LO_SPACE;
|
||||||
static const int kNumberOfSpaces = INVALID_SPACE;
|
static const int kNumberOfSpaces = INVALID_SPACE;
|
||||||
|
|
||||||
|
// To encode object for back-references.
|
||||||
|
class OffsetBits : public BitField<uint32_t, 0, kPageSizeBits> {};
|
||||||
|
class ChunkIndexBits
|
||||||
|
: public BitField<uint32_t, kPageSizeBits, 32 - kPageSizeBits> {};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Where the pointed-to object can be found:
|
// Where the pointed-to object can be found:
|
||||||
enum Where {
|
enum Where {
|
||||||
@ -248,13 +253,18 @@ class Deserializer: public SerializerDeserializer {
|
|||||||
// Deserialize the snapshot into an empty heap.
|
// Deserialize the snapshot into an empty heap.
|
||||||
void Deserialize(Isolate* isolate);
|
void Deserialize(Isolate* isolate);
|
||||||
|
|
||||||
// Deserialize a single object and the objects reachable from it.
|
enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM };
|
||||||
void DeserializePartial(Isolate* isolate, Object** root);
|
|
||||||
|
|
||||||
void set_reservation(int space_number, int reservation) {
|
// Deserialize a single object and the objects reachable from it.
|
||||||
DCHECK(space_number >= 0);
|
// We may want to abort gracefully even if deserialization fails.
|
||||||
DCHECK(space_number < kNumberOfSpaces);
|
void DeserializePartial(Isolate* isolate, Object** root,
|
||||||
reservations_[space_number] = reservation;
|
OnOOM on_oom = FATAL_ON_OOM);
|
||||||
|
|
||||||
|
void AddReservation(int space, uint32_t chunk) {
|
||||||
|
DCHECK(space >= 0);
|
||||||
|
DCHECK(space < kNumberOfSpaces);
|
||||||
|
DCHECK(space == LO_SPACE || chunk < Page::kMaxRegularHeapObjectSize);
|
||||||
|
reservations_[space].Add({chunk, NULL, NULL});
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushICacheForNewCodeObjects();
|
void FlushICacheForNewCodeObjects();
|
||||||
@ -274,6 +284,8 @@ class Deserializer: public SerializerDeserializer {
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ReserveSpace();
|
||||||
|
|
||||||
// Allocation sites are present in the snapshot, and must be linked into
|
// Allocation sites are present in the snapshot, and must be linked into
|
||||||
// a list at deserialization time.
|
// a list at deserialization time.
|
||||||
void RelinkAllocationSite(AllocationSite* site);
|
void RelinkAllocationSite(AllocationSite* site);
|
||||||
@ -283,8 +295,8 @@ class Deserializer: public SerializerDeserializer {
|
|||||||
// of the object we are writing into, or NULL if we are not writing into an
|
// of the object we are writing into, or NULL if we are not writing into an
|
||||||
// object, i.e. if we are writing a series of tagged values that are not on
|
// object, i.e. if we are writing a series of tagged values that are not on
|
||||||
// the heap.
|
// the heap.
|
||||||
void ReadChunk(
|
void ReadData(Object** start, Object** end, int space,
|
||||||
Object** start, Object** end, int space, Address object_address);
|
Address object_address);
|
||||||
void ReadObject(int space_number, Object** write_back);
|
void ReadObject(int space_number, Object** write_back);
|
||||||
Address Allocate(int space_index, int size);
|
Address Allocate(int space_index, int size);
|
||||||
|
|
||||||
@ -293,13 +305,20 @@ class Deserializer: public SerializerDeserializer {
|
|||||||
Object* ProcessBackRefInSerializedCode(Object* obj);
|
Object* ProcessBackRefInSerializedCode(Object* obj);
|
||||||
|
|
||||||
// This returns the address of an object that has been described in the
|
// This returns the address of an object that has been described in the
|
||||||
// snapshot as being offset bytes back in a particular space.
|
// snapshot by chunk index and offset.
|
||||||
HeapObject* GetAddressFromEnd(int space) {
|
HeapObject* GetBackReferencedObject(int space) {
|
||||||
int offset = source_->GetInt();
|
if (space == LO_SPACE) {
|
||||||
if (space == LO_SPACE) return deserialized_large_objects_[offset];
|
uint32_t index = source_->GetInt();
|
||||||
DCHECK(space < kNumberOfPreallocatedSpaces);
|
return deserialized_large_objects_[index];
|
||||||
offset <<= kObjectAlignmentBits;
|
} else {
|
||||||
return HeapObject::FromAddress(high_water_[space] - offset);
|
uint32_t allocation = source_->GetInt() << kObjectAlignmentBits;
|
||||||
|
DCHECK(space < kNumberOfPreallocatedSpaces);
|
||||||
|
uint32_t chunk_index = ChunkIndexBits::decode(allocation);
|
||||||
|
uint32_t offset = OffsetBits::decode(allocation);
|
||||||
|
DCHECK_LE(chunk_index, current_chunk_[space]);
|
||||||
|
return HeapObject::FromAddress(reservations_[space][chunk_index].start +
|
||||||
|
offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cached current isolate.
|
// Cached current isolate.
|
||||||
@ -309,13 +328,14 @@ class Deserializer: public SerializerDeserializer {
|
|||||||
Vector<Handle<Object> >* attached_objects_;
|
Vector<Handle<Object> >* attached_objects_;
|
||||||
|
|
||||||
SnapshotByteSource* source_;
|
SnapshotByteSource* source_;
|
||||||
// This is the address of the next object that will be allocated in each
|
// The address of the next object that will be allocated in each space.
|
||||||
// space. It is used to calculate the addresses of back-references.
|
// Each space has a number of chunks reserved by the GC, with each chunk
|
||||||
|
// fitting into a page. Deserialized objects are allocated into the
|
||||||
|
// current chunk of the target space by bumping up high water mark.
|
||||||
|
Heap::Reservation reservations_[kNumberOfSpaces];
|
||||||
|
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
|
||||||
Address high_water_[kNumberOfPreallocatedSpaces];
|
Address high_water_[kNumberOfPreallocatedSpaces];
|
||||||
|
|
||||||
int reservations_[kNumberOfSpaces];
|
|
||||||
static const intptr_t kUninitializedReservation = -1;
|
|
||||||
|
|
||||||
ExternalReferenceDecoder* external_reference_decoder_;
|
ExternalReferenceDecoder* external_reference_decoder_;
|
||||||
|
|
||||||
List<HeapObject*> deserialized_large_objects_;
|
List<HeapObject*> deserialized_large_objects_;
|
||||||
@ -380,11 +400,13 @@ class Serializer : public SerializerDeserializer {
|
|||||||
Serializer(Isolate* isolate, SnapshotByteSink* sink);
|
Serializer(Isolate* isolate, SnapshotByteSink* sink);
|
||||||
~Serializer();
|
~Serializer();
|
||||||
void VisitPointers(Object** start, Object** end);
|
void VisitPointers(Object** start, Object** end);
|
||||||
// You can call this after serialization to find out how much space was used
|
|
||||||
// in each space.
|
void FinalizeAllocation();
|
||||||
int CurrentAllocationAddress(int space) const {
|
|
||||||
DCHECK(space < kNumberOfSpaces);
|
Vector<const uint32_t> FinalAllocationChunks(int space) const {
|
||||||
return fullness_[space];
|
DCHECK_EQ(1, completed_chunks_[LO_SPACE].length()); // Already finalized.
|
||||||
|
DCHECK_EQ(0, pending_chunk_[space]); // No pending chunks.
|
||||||
|
return completed_chunks_[space].ToConstVector();
|
||||||
}
|
}
|
||||||
|
|
||||||
Isolate* isolate() const { return isolate_; }
|
Isolate* isolate() const { return isolate_; }
|
||||||
@ -470,8 +492,8 @@ class Serializer : public SerializerDeserializer {
|
|||||||
void InitializeAllocators();
|
void InitializeAllocators();
|
||||||
// This will return the space for an object.
|
// This will return the space for an object.
|
||||||
static int SpaceOfObject(HeapObject* object);
|
static int SpaceOfObject(HeapObject* object);
|
||||||
int AllocateLargeObject(int size);
|
uint32_t AllocateLargeObject(int size);
|
||||||
int Allocate(int space, int size);
|
uint32_t Allocate(int space, int size);
|
||||||
int EncodeExternalReference(Address addr) {
|
int EncodeExternalReference(Address addr) {
|
||||||
return external_reference_encoder_->Encode(addr);
|
return external_reference_encoder_->Encode(addr);
|
||||||
}
|
}
|
||||||
@ -483,9 +505,14 @@ class Serializer : public SerializerDeserializer {
|
|||||||
bool ShouldBeSkipped(Object** current);
|
bool ShouldBeSkipped(Object** current);
|
||||||
|
|
||||||
Isolate* isolate_;
|
Isolate* isolate_;
|
||||||
// Keep track of the fullness of each space in order to generate
|
|
||||||
// relative addresses for back references.
|
// Objects from the same space are put into chunks for bulk-allocation
|
||||||
int fullness_[kNumberOfSpaces];
|
// when deserializing. We have to make sure that each chunk fits into a
|
||||||
|
// page. So we track the chunk size in pending_chunk_ of a space, but
|
||||||
|
// when it exceeds a page, we complete the current chunk and start a new one.
|
||||||
|
uint32_t pending_chunk_[kNumberOfSpaces];
|
||||||
|
List<uint32_t> completed_chunks_[kNumberOfSpaces];
|
||||||
|
|
||||||
SnapshotByteSink* sink_;
|
SnapshotByteSink* sink_;
|
||||||
ExternalReferenceEncoder* external_reference_encoder_;
|
ExternalReferenceEncoder* external_reference_encoder_;
|
||||||
|
|
||||||
@ -503,7 +530,7 @@ class Serializer : public SerializerDeserializer {
|
|||||||
private:
|
private:
|
||||||
CodeAddressMap* code_address_map_;
|
CodeAddressMap* code_address_map_;
|
||||||
// We map serialized large objects to indexes for back-referencing.
|
// We map serialized large objects to indexes for back-referencing.
|
||||||
int seen_large_objects_index_;
|
uint32_t seen_large_objects_index_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(Serializer);
|
DISALLOW_COPY_AND_ASSIGN(Serializer);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -585,9 +612,8 @@ class CodeSerializer : public Serializer {
|
|||||||
Handle<SharedFunctionInfo> info,
|
Handle<SharedFunctionInfo> info,
|
||||||
Handle<String> source);
|
Handle<String> source);
|
||||||
|
|
||||||
static Handle<SharedFunctionInfo> Deserialize(Isolate* isolate,
|
MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
|
||||||
ScriptData* data,
|
Isolate* isolate, ScriptData* data, Handle<String> source);
|
||||||
Handle<String> source);
|
|
||||||
|
|
||||||
static const int kSourceObjectIndex = 0;
|
static const int kSourceObjectIndex = 0;
|
||||||
static const int kCodeStubsBaseIndex = 1;
|
static const int kCodeStubsBaseIndex = 1;
|
||||||
@ -654,15 +680,35 @@ class SerializedCodeData {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class Reservation {
|
||||||
|
public:
|
||||||
|
uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation); }
|
||||||
|
bool is_last_chunk() const { return IsLastChunkBits::decode(reservation); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
uint32_t reservation;
|
||||||
|
|
||||||
|
DISALLOW_COPY_AND_ASSIGN(Reservation);
|
||||||
|
};
|
||||||
|
|
||||||
|
Vector<const Reservation> Reservations() const {
|
||||||
|
return Vector<const Reservation>(reinterpret_cast<const Reservation*>(
|
||||||
|
script_data_->data() + kHeaderSize),
|
||||||
|
GetHeaderValue(kReservationsOffset));
|
||||||
|
}
|
||||||
|
|
||||||
Vector<const uint32_t> CodeStubKeys() const {
|
Vector<const uint32_t> CodeStubKeys() const {
|
||||||
return Vector<const uint32_t>(
|
int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
|
||||||
reinterpret_cast<const uint32_t*>(script_data_->data() + kHeaderSize),
|
const byte* start = script_data_->data() + kHeaderSize + reservations_size;
|
||||||
GetHeaderValue(kNumCodeStubKeysOffset));
|
return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
|
||||||
|
GetHeaderValue(kNumCodeStubKeysOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
const byte* Payload() const {
|
const byte* Payload() const {
|
||||||
|
int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
|
||||||
int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
|
int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
|
||||||
return script_data_->data() + kHeaderSize + code_stubs_size;
|
return script_data_->data() + kHeaderSize + reservations_size +
|
||||||
|
code_stubs_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
int PayloadLength() const {
|
int PayloadLength() const {
|
||||||
@ -672,10 +718,6 @@ class SerializedCodeData {
|
|||||||
return payload_length;
|
return payload_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
int GetReservation(int space) const {
|
|
||||||
return GetHeaderValue(kReservationsOffset + space);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void SetHeaderValue(int offset, int value) {
|
void SetHeaderValue(int offset, int value) {
|
||||||
reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
|
reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
|
||||||
@ -696,13 +738,13 @@ class SerializedCodeData {
|
|||||||
// [2] payload length
|
// [2] payload length
|
||||||
// [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
|
// [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
|
||||||
static const int kCheckSumOffset = 0;
|
static const int kCheckSumOffset = 0;
|
||||||
static const int kNumCodeStubKeysOffset = 1;
|
static const int kReservationsOffset = 1;
|
||||||
static const int kPayloadLengthOffset = 2;
|
static const int kNumCodeStubKeysOffset = 2;
|
||||||
static const int kReservationsOffset = 3;
|
static const int kPayloadLengthOffset = 3;
|
||||||
|
static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
|
||||||
|
|
||||||
static const int kHeaderEntries =
|
class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
|
||||||
kReservationsOffset + SerializerDeserializer::kNumberOfSpaces;
|
class IsLastChunkBits : public BitField<bool, 31, 1> {};
|
||||||
static const int kHeaderSize = kHeaderEntries * kIntSize;
|
|
||||||
|
|
||||||
// Following the header, we store, in sequential order
|
// Following the header, we store, in sequential order
|
||||||
// - code stub keys
|
// - code stub keys
|
||||||
|
@ -15,14 +15,14 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
|
void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
|
||||||
deserializer->set_reservation(NEW_SPACE, new_space_used_);
|
deserializer->AddReservation(NEW_SPACE, new_space_used_);
|
||||||
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
|
deserializer->AddReservation(OLD_POINTER_SPACE, pointer_space_used_);
|
||||||
deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
|
deserializer->AddReservation(OLD_DATA_SPACE, data_space_used_);
|
||||||
deserializer->set_reservation(CODE_SPACE, code_space_used_);
|
deserializer->AddReservation(CODE_SPACE, code_space_used_);
|
||||||
deserializer->set_reservation(MAP_SPACE, map_space_used_);
|
deserializer->AddReservation(MAP_SPACE, map_space_used_);
|
||||||
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
|
deserializer->AddReservation(CELL_SPACE, cell_space_used_);
|
||||||
deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
|
deserializer->AddReservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
|
||||||
deserializer->set_reservation(LO_SPACE, lo_space_used_);
|
deserializer->AddReservation(LO_SPACE, lo_space_used_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -59,15 +59,15 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
|
|||||||
context_raw_size_);
|
context_raw_size_);
|
||||||
Deserializer deserializer(&source);
|
Deserializer deserializer(&source);
|
||||||
Object* root;
|
Object* root;
|
||||||
deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
|
deserializer.AddReservation(NEW_SPACE, context_new_space_used_);
|
||||||
deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
|
deserializer.AddReservation(OLD_POINTER_SPACE, context_pointer_space_used_);
|
||||||
deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
|
deserializer.AddReservation(OLD_DATA_SPACE, context_data_space_used_);
|
||||||
deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
|
deserializer.AddReservation(CODE_SPACE, context_code_space_used_);
|
||||||
deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
|
deserializer.AddReservation(MAP_SPACE, context_map_space_used_);
|
||||||
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
|
deserializer.AddReservation(CELL_SPACE, context_cell_space_used_);
|
||||||
deserializer.set_reservation(PROPERTY_CELL_SPACE,
|
deserializer.AddReservation(PROPERTY_CELL_SPACE,
|
||||||
context_property_cell_space_used_);
|
context_property_cell_space_used_);
|
||||||
deserializer.set_reservation(LO_SPACE, context_lo_space_used_);
|
deserializer.AddReservation(LO_SPACE, context_lo_space_used_);
|
||||||
deserializer.DeserializePartial(isolate, &root);
|
deserializer.DeserializePartial(isolate, &root);
|
||||||
CHECK(root->IsContext());
|
CHECK(root->IsContext());
|
||||||
return Handle<Context>(Context::cast(root));
|
return Handle<Context>(Context::cast(root));
|
||||||
|
@ -58,17 +58,16 @@ bool Snapshot::Initialize(Isolate* isolate) {
|
|||||||
}
|
}
|
||||||
SnapshotByteSource source(snapshot_impl_->data, snapshot_impl_->size);
|
SnapshotByteSource source(snapshot_impl_->data, snapshot_impl_->size);
|
||||||
Deserializer deserializer(&source);
|
Deserializer deserializer(&source);
|
||||||
deserializer.set_reservation(NEW_SPACE, snapshot_impl_->new_space_used);
|
deserializer.AddReservation(NEW_SPACE, snapshot_impl_->new_space_used);
|
||||||
deserializer.set_reservation(OLD_POINTER_SPACE,
|
deserializer.AddReservation(OLD_POINTER_SPACE,
|
||||||
snapshot_impl_->pointer_space_used);
|
snapshot_impl_->pointer_space_used);
|
||||||
deserializer.set_reservation(OLD_DATA_SPACE,
|
deserializer.AddReservation(OLD_DATA_SPACE, snapshot_impl_->data_space_used);
|
||||||
snapshot_impl_->data_space_used);
|
deserializer.AddReservation(CODE_SPACE, snapshot_impl_->code_space_used);
|
||||||
deserializer.set_reservation(CODE_SPACE, snapshot_impl_->code_space_used);
|
deserializer.AddReservation(MAP_SPACE, snapshot_impl_->map_space_used);
|
||||||
deserializer.set_reservation(MAP_SPACE, snapshot_impl_->map_space_used);
|
deserializer.AddReservation(CELL_SPACE, snapshot_impl_->cell_space_used);
|
||||||
deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used);
|
deserializer.AddReservation(PROPERTY_CELL_SPACE,
|
||||||
deserializer.set_reservation(PROPERTY_CELL_SPACE,
|
snapshot_impl_->property_cell_space_used);
|
||||||
snapshot_impl_->property_cell_space_used);
|
deserializer.AddReservation(LO_SPACE, snapshot_impl_->lo_space_used);
|
||||||
deserializer.set_reservation(LO_SPACE, snapshot_impl_->lo_space_used);
|
|
||||||
bool success = isolate->Init(&deserializer);
|
bool success = isolate->Init(&deserializer);
|
||||||
if (FLAG_profile_deserialization) {
|
if (FLAG_profile_deserialization) {
|
||||||
double ms = timer.Elapsed().InMillisecondsF();
|
double ms = timer.Elapsed().InMillisecondsF();
|
||||||
@ -85,22 +84,21 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
|
|||||||
SnapshotByteSource source(snapshot_impl_->context_data,
|
SnapshotByteSource source(snapshot_impl_->context_data,
|
||||||
snapshot_impl_->context_size);
|
snapshot_impl_->context_size);
|
||||||
Deserializer deserializer(&source);
|
Deserializer deserializer(&source);
|
||||||
deserializer.set_reservation(NEW_SPACE,
|
deserializer.AddReservation(NEW_SPACE,
|
||||||
snapshot_impl_->context_new_space_used);
|
snapshot_impl_->context_new_space_used);
|
||||||
deserializer.set_reservation(OLD_POINTER_SPACE,
|
deserializer.AddReservation(OLD_POINTER_SPACE,
|
||||||
snapshot_impl_->context_pointer_space_used);
|
snapshot_impl_->context_pointer_space_used);
|
||||||
deserializer.set_reservation(OLD_DATA_SPACE,
|
deserializer.AddReservation(OLD_DATA_SPACE,
|
||||||
snapshot_impl_->context_data_space_used);
|
snapshot_impl_->context_data_space_used);
|
||||||
deserializer.set_reservation(CODE_SPACE,
|
deserializer.AddReservation(CODE_SPACE,
|
||||||
snapshot_impl_->context_code_space_used);
|
snapshot_impl_->context_code_space_used);
|
||||||
deserializer.set_reservation(MAP_SPACE,
|
deserializer.AddReservation(MAP_SPACE,
|
||||||
snapshot_impl_->context_map_space_used);
|
snapshot_impl_->context_map_space_used);
|
||||||
deserializer.set_reservation(CELL_SPACE,
|
deserializer.AddReservation(CELL_SPACE,
|
||||||
snapshot_impl_->context_cell_space_used);
|
snapshot_impl_->context_cell_space_used);
|
||||||
deserializer.set_reservation(PROPERTY_CELL_SPACE,
|
deserializer.AddReservation(PROPERTY_CELL_SPACE,
|
||||||
snapshot_impl_->
|
snapshot_impl_->context_property_cell_space_used);
|
||||||
context_property_cell_space_used);
|
deserializer.AddReservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
|
||||||
deserializer.set_reservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
|
|
||||||
Object* root;
|
Object* root;
|
||||||
deserializer.DeserializePartial(isolate, &root);
|
deserializer.DeserializePartial(isolate, &root);
|
||||||
CHECK(root->IsContext());
|
CHECK(root->IsContext());
|
||||||
|
@ -58,6 +58,10 @@ class Vector {
|
|||||||
|
|
||||||
T& last() { return start_[length_ - 1]; }
|
T& last() { return start_[length_ - 1]; }
|
||||||
|
|
||||||
|
typedef T* iterator;
|
||||||
|
inline iterator begin() const { return &start_[0]; }
|
||||||
|
inline iterator end() const { return &start_[length_]; }
|
||||||
|
|
||||||
// Returns a clone of this vector with a new backing store.
|
// Returns a clone of this vector with a new backing store.
|
||||||
Vector<T> Clone() const {
|
Vector<T> Clone() const {
|
||||||
T* result = NewArray<T>(length_);
|
T* result = NewArray<T>(length_);
|
||||||
|
@ -137,10 +137,7 @@ class FileByteSink : public SnapshotByteSink {
|
|||||||
virtual int Position() {
|
virtual int Position() {
|
||||||
return ftell(fp_);
|
return ftell(fp_);
|
||||||
}
|
}
|
||||||
void WriteSpaceUsed(int new_space_used, int pointer_space_used,
|
void WriteSpaceUsed(Serializer* serializer);
|
||||||
int data_space_used, int code_space_used,
|
|
||||||
int map_space_used, int cell_space_used,
|
|
||||||
int property_cell_space_used, int lo_space_used);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
FILE* fp_;
|
FILE* fp_;
|
||||||
@ -148,24 +145,37 @@ class FileByteSink : public SnapshotByteSink {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
void FileByteSink::WriteSpaceUsed(int new_space_used, int pointer_space_used,
|
void FileByteSink::WriteSpaceUsed(Serializer* ser) {
|
||||||
int data_space_used, int code_space_used,
|
|
||||||
int map_space_used, int cell_space_used,
|
|
||||||
int property_cell_space_used,
|
|
||||||
int lo_space_used) {
|
|
||||||
int file_name_length = StrLength(file_name_) + 10;
|
int file_name_length = StrLength(file_name_) + 10;
|
||||||
Vector<char> name = Vector<char>::New(file_name_length + 1);
|
Vector<char> name = Vector<char>::New(file_name_length + 1);
|
||||||
SNPrintF(name, "%s.size", file_name_);
|
SNPrintF(name, "%s.size", file_name_);
|
||||||
FILE* fp = v8::base::OS::FOpen(name.start(), "w");
|
FILE* fp = v8::base::OS::FOpen(name.start(), "w");
|
||||||
name.Dispose();
|
name.Dispose();
|
||||||
fprintf(fp, "new %d\n", new_space_used);
|
|
||||||
fprintf(fp, "pointer %d\n", pointer_space_used);
|
Vector<const uint32_t> chunks = ser->FinalAllocationChunks(NEW_SPACE);
|
||||||
fprintf(fp, "data %d\n", data_space_used);
|
CHECK_EQ(1, chunks.length());
|
||||||
fprintf(fp, "code %d\n", code_space_used);
|
fprintf(fp, "new %d\n", chunks[0]);
|
||||||
fprintf(fp, "map %d\n", map_space_used);
|
chunks = ser->FinalAllocationChunks(OLD_POINTER_SPACE);
|
||||||
fprintf(fp, "cell %d\n", cell_space_used);
|
CHECK_EQ(1, chunks.length());
|
||||||
fprintf(fp, "property cell %d\n", property_cell_space_used);
|
fprintf(fp, "pointer %d\n", chunks[0]);
|
||||||
fprintf(fp, "lo %d\n", lo_space_used);
|
chunks = ser->FinalAllocationChunks(OLD_DATA_SPACE);
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp, "data %d\n", chunks[0]);
|
||||||
|
chunks = ser->FinalAllocationChunks(CODE_SPACE);
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp, "code %d\n", chunks[0]);
|
||||||
|
chunks = ser->FinalAllocationChunks(MAP_SPACE);
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp, "map %d\n", chunks[0]);
|
||||||
|
chunks = ser->FinalAllocationChunks(CELL_SPACE);
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp, "cell %d\n", chunks[0]);
|
||||||
|
chunks = ser->FinalAllocationChunks(PROPERTY_CELL_SPACE);
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp, "property cell %d\n", chunks[0]);
|
||||||
|
chunks = ser->FinalAllocationChunks(LO_SPACE);
|
||||||
|
CHECK_EQ(1, chunks.length());
|
||||||
|
fprintf(fp, "lo %d\n", chunks[0]);
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,15 +184,9 @@ static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
|
|||||||
FileByteSink file(snapshot_file);
|
FileByteSink file(snapshot_file);
|
||||||
StartupSerializer ser(isolate, &file);
|
StartupSerializer ser(isolate, &file);
|
||||||
ser.Serialize();
|
ser.Serialize();
|
||||||
|
ser.FinalizeAllocation();
|
||||||
|
|
||||||
file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
|
file.WriteSpaceUsed(&ser);
|
||||||
ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
|
|
||||||
ser.CurrentAllocationAddress(OLD_DATA_SPACE),
|
|
||||||
ser.CurrentAllocationAddress(CODE_SPACE),
|
|
||||||
ser.CurrentAllocationAddress(MAP_SPACE),
|
|
||||||
ser.CurrentAllocationAddress(CELL_SPACE),
|
|
||||||
ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
|
|
||||||
ser.CurrentAllocationAddress(LO_SPACE));
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -258,14 +262,14 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
|
|||||||
#undef fscanf
|
#undef fscanf
|
||||||
#endif
|
#endif
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
deserializer->set_reservation(NEW_SPACE, new_size);
|
deserializer->AddReservation(NEW_SPACE, new_size);
|
||||||
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
|
deserializer->AddReservation(OLD_POINTER_SPACE, pointer_size);
|
||||||
deserializer->set_reservation(OLD_DATA_SPACE, data_size);
|
deserializer->AddReservation(OLD_DATA_SPACE, data_size);
|
||||||
deserializer->set_reservation(CODE_SPACE, code_size);
|
deserializer->AddReservation(CODE_SPACE, code_size);
|
||||||
deserializer->set_reservation(MAP_SPACE, map_size);
|
deserializer->AddReservation(MAP_SPACE, map_size);
|
||||||
deserializer->set_reservation(CELL_SPACE, cell_size);
|
deserializer->AddReservation(CELL_SPACE, cell_size);
|
||||||
deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_size);
|
deserializer->AddReservation(PROPERTY_CELL_SPACE, property_cell_size);
|
||||||
deserializer->set_reservation(LO_SPACE, lo_size);
|
deserializer->AddReservation(LO_SPACE, lo_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -445,25 +449,12 @@ UNINITIALIZED_TEST(PartialSerialization) {
|
|||||||
p_ser.Serialize(&raw_foo);
|
p_ser.Serialize(&raw_foo);
|
||||||
startup_serializer.SerializeWeakReferences();
|
startup_serializer.SerializeWeakReferences();
|
||||||
|
|
||||||
partial_sink.WriteSpaceUsed(
|
p_ser.FinalizeAllocation();
|
||||||
p_ser.CurrentAllocationAddress(NEW_SPACE),
|
startup_serializer.FinalizeAllocation();
|
||||||
p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(CODE_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(MAP_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(CELL_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(LO_SPACE));
|
|
||||||
|
|
||||||
startup_sink.WriteSpaceUsed(
|
partial_sink.WriteSpaceUsed(&p_ser);
|
||||||
startup_serializer.CurrentAllocationAddress(NEW_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
|
startup_sink.WriteSpaceUsed(&startup_serializer);
|
||||||
startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(CODE_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(MAP_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(CELL_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(LO_SPACE));
|
|
||||||
startup_name.Dispose();
|
startup_name.Dispose();
|
||||||
}
|
}
|
||||||
v8_isolate->Exit();
|
v8_isolate->Exit();
|
||||||
@ -570,25 +561,12 @@ UNINITIALIZED_TEST(ContextSerialization) {
|
|||||||
p_ser.Serialize(&raw_context);
|
p_ser.Serialize(&raw_context);
|
||||||
startup_serializer.SerializeWeakReferences();
|
startup_serializer.SerializeWeakReferences();
|
||||||
|
|
||||||
partial_sink.WriteSpaceUsed(
|
p_ser.FinalizeAllocation();
|
||||||
p_ser.CurrentAllocationAddress(NEW_SPACE),
|
startup_serializer.FinalizeAllocation();
|
||||||
p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(CODE_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(MAP_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(CELL_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
|
|
||||||
p_ser.CurrentAllocationAddress(LO_SPACE));
|
|
||||||
|
|
||||||
startup_sink.WriteSpaceUsed(
|
partial_sink.WriteSpaceUsed(&p_ser);
|
||||||
startup_serializer.CurrentAllocationAddress(NEW_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
|
startup_sink.WriteSpaceUsed(&startup_serializer);
|
||||||
startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(CODE_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(MAP_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(CELL_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
|
|
||||||
startup_serializer.CurrentAllocationAddress(LO_SPACE));
|
|
||||||
startup_name.Dispose();
|
startup_name.Dispose();
|
||||||
}
|
}
|
||||||
v8_isolate->Dispose();
|
v8_isolate->Dispose();
|
||||||
@ -901,6 +879,78 @@ TEST(SerializeToplevelLargeString) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST(SerializeToplevelThreeBigStrings) {
|
||||||
|
FLAG_serialize_toplevel = true;
|
||||||
|
LocalContext context;
|
||||||
|
Isolate* isolate = CcTest::i_isolate();
|
||||||
|
Factory* f = isolate->factory();
|
||||||
|
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
|
||||||
|
|
||||||
|
v8::HandleScope scope(CcTest::isolate());
|
||||||
|
|
||||||
|
Vector<const uint8_t> source_a =
|
||||||
|
ConstructSource(STATIC_CHAR_VECTOR("var a = \""), STATIC_CHAR_VECTOR("a"),
|
||||||
|
STATIC_CHAR_VECTOR("\";"), 700000);
|
||||||
|
Handle<String> source_a_str =
|
||||||
|
f->NewStringFromOneByte(source_a).ToHandleChecked();
|
||||||
|
|
||||||
|
Vector<const uint8_t> source_b =
|
||||||
|
ConstructSource(STATIC_CHAR_VECTOR("var b = \""), STATIC_CHAR_VECTOR("b"),
|
||||||
|
STATIC_CHAR_VECTOR("\";"), 600000);
|
||||||
|
Handle<String> source_b_str =
|
||||||
|
f->NewStringFromOneByte(source_b).ToHandleChecked();
|
||||||
|
|
||||||
|
Vector<const uint8_t> source_c =
|
||||||
|
ConstructSource(STATIC_CHAR_VECTOR("var c = \""), STATIC_CHAR_VECTOR("c"),
|
||||||
|
STATIC_CHAR_VECTOR("\";"), 500000);
|
||||||
|
Handle<String> source_c_str =
|
||||||
|
f->NewStringFromOneByte(source_c).ToHandleChecked();
|
||||||
|
|
||||||
|
Handle<String> source_str =
|
||||||
|
f->NewConsString(
|
||||||
|
f->NewConsString(source_a_str, source_b_str).ToHandleChecked(),
|
||||||
|
source_c_str).ToHandleChecked();
|
||||||
|
|
||||||
|
Handle<JSObject> global(isolate->context()->global_object());
|
||||||
|
ScriptData* cache = NULL;
|
||||||
|
|
||||||
|
Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
|
||||||
|
source_str, Handle<String>(), 0, 0, false,
|
||||||
|
Handle<Context>(isolate->native_context()), NULL, &cache,
|
||||||
|
v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
|
||||||
|
|
||||||
|
Handle<SharedFunctionInfo> copy;
|
||||||
|
{
|
||||||
|
DisallowCompilation no_compile_expected(isolate);
|
||||||
|
copy = Compiler::CompileScript(
|
||||||
|
source_str, Handle<String>(), 0, 0, false,
|
||||||
|
Handle<Context>(isolate->native_context()), NULL, &cache,
|
||||||
|
v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
|
||||||
|
}
|
||||||
|
CHECK_NE(*orig, *copy);
|
||||||
|
|
||||||
|
Handle<JSFunction> copy_fun =
|
||||||
|
isolate->factory()->NewFunctionFromSharedFunctionInfo(
|
||||||
|
copy, isolate->native_context());
|
||||||
|
|
||||||
|
Execution::Call(isolate, copy_fun, global, 0, NULL);
|
||||||
|
|
||||||
|
CHECK_EQ(600000 + 700000, CompileRun("(a + b).length")->Int32Value());
|
||||||
|
CHECK_EQ(500000 + 600000, CompileRun("(b + c).length")->Int32Value());
|
||||||
|
Heap* heap = isolate->heap();
|
||||||
|
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("a")->ToString()),
|
||||||
|
OLD_DATA_SPACE));
|
||||||
|
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("b")->ToString()),
|
||||||
|
OLD_DATA_SPACE));
|
||||||
|
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("c")->ToString()),
|
||||||
|
OLD_DATA_SPACE));
|
||||||
|
|
||||||
|
delete cache;
|
||||||
|
source_a.Dispose();
|
||||||
|
source_b.Dispose();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class SerializerOneByteResource
|
class SerializerOneByteResource
|
||||||
: public v8::String::ExternalOneByteStringResource {
|
: public v8::String::ExternalOneByteStringResource {
|
||||||
public:
|
public:
|
||||||
|
Loading…
Reference in New Issue
Block a user