2012-01-20 16:17:08 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2014-04-29 06:42:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
#ifndef V8_SERIALIZE_H_
|
|
|
|
#define V8_SERIALIZE_H_
|
|
|
|
|
2014-07-10 10:28:05 +00:00
|
|
|
#include "src/compiler.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/hashmap.h"
|
2014-06-23 13:52:17 +00:00
|
|
|
#include "src/heap-profiler.h"
|
|
|
|
#include "src/isolate.h"
|
|
|
|
#include "src/snapshot-source-sink.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// A TypeCode is used to distinguish different kinds of external reference.
|
|
|
|
// It is a single bit to make testing for types easy.
|
|
|
|
enum TypeCode {
|
2014-08-06 07:20:14 +00:00
|
|
|
UNCLASSIFIED, // One-of-a-kind references.
|
|
|
|
C_BUILTIN,
|
2008-07-03 15:10:15 +00:00
|
|
|
BUILTIN,
|
|
|
|
RUNTIME_FUNCTION,
|
|
|
|
IC_UTILITY,
|
|
|
|
STATS_COUNTER,
|
|
|
|
TOP_ADDRESS,
|
|
|
|
ACCESSOR,
|
2012-12-18 16:25:45 +00:00
|
|
|
STUB_CACHE_TABLE,
|
2014-08-06 07:20:14 +00:00
|
|
|
RUNTIME_ENTRY,
|
2012-12-18 16:25:45 +00:00
|
|
|
LAZY_DEOPTIMIZATION
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
|
2008-07-03 15:10:15 +00:00
|
|
|
const int kFirstTypeCode = UNCLASSIFIED;
|
|
|
|
|
|
|
|
const int kReferenceIdBits = 16;
|
|
|
|
const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
|
|
|
|
const int kReferenceTypeShift = kReferenceIdBits;
|
|
|
|
|
2014-06-12 09:58:10 +00:00
|
|
|
const int kDeoptTableSerializeEntryCount = 64;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-08-17 08:48:54 +00:00
|
|
|
// ExternalReferenceTable is a helper class that defines the relationship
|
|
|
|
// between external references and their encodings. It is used to build
|
|
|
|
// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
|
|
|
|
class ExternalReferenceTable {
|
|
|
|
public:
|
|
|
|
static ExternalReferenceTable* instance(Isolate* isolate);
|
|
|
|
|
|
|
|
~ExternalReferenceTable() { }
|
|
|
|
|
|
|
|
int size() const { return refs_.length(); }
|
|
|
|
|
|
|
|
Address address(int i) { return refs_[i].address; }
|
|
|
|
|
|
|
|
uint32_t code(int i) { return refs_[i].code; }
|
|
|
|
|
|
|
|
const char* name(int i) { return refs_[i].name; }
|
|
|
|
|
|
|
|
int max_id(int code) { return max_id_[code]; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
|
2014-05-22 09:36:20 +00:00
|
|
|
PopulateTable(isolate);
|
2011-08-17 08:48:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ExternalReferenceEntry {
|
|
|
|
Address address;
|
|
|
|
uint32_t code;
|
|
|
|
const char* name;
|
|
|
|
};
|
|
|
|
|
|
|
|
void PopulateTable(Isolate* isolate);
|
|
|
|
|
|
|
|
// For a few types of references, we can get their address from their id.
|
|
|
|
void AddFromId(TypeCode type,
|
|
|
|
uint16_t id,
|
|
|
|
const char* name,
|
|
|
|
Isolate* isolate);
|
|
|
|
|
|
|
|
// For other types of references, the caller will figure out the address.
|
|
|
|
void Add(Address address, TypeCode type, uint16_t id, const char* name);
|
|
|
|
|
2014-08-06 07:20:14 +00:00
|
|
|
void Add(Address address, const char* name) {
|
|
|
|
Add(address, UNCLASSIFIED, ++max_id_[UNCLASSIFIED], name);
|
|
|
|
}
|
|
|
|
|
2011-08-17 08:48:54 +00:00
|
|
|
List<ExternalReferenceEntry> refs_;
|
2014-08-06 07:20:14 +00:00
|
|
|
uint16_t max_id_[kTypeCodeCount];
|
2011-08-17 08:48:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
class ExternalReferenceEncoder {
|
|
|
|
public:
|
2013-09-03 11:54:08 +00:00
|
|
|
explicit ExternalReferenceEncoder(Isolate* isolate);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
uint32_t Encode(Address key) const;
|
|
|
|
|
|
|
|
const char* NameOfAddress(Address key) const;
|
|
|
|
|
|
|
|
private:
|
|
|
|
HashMap encodings_;
|
|
|
|
static uint32_t Hash(Address key) {
|
2009-05-06 07:53:08 +00:00
|
|
|
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int IndexOf(Address key) const;
|
|
|
|
|
|
|
|
void Put(Address key, int index);
|
2011-03-18 20:35:07 +00:00
|
|
|
|
|
|
|
Isolate* isolate_;
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class ExternalReferenceDecoder {
|
|
|
|
public:
|
2013-09-03 11:54:08 +00:00
|
|
|
explicit ExternalReferenceDecoder(Isolate* isolate);
|
2008-07-03 15:10:15 +00:00
|
|
|
~ExternalReferenceDecoder();
|
|
|
|
|
|
|
|
Address Decode(uint32_t key) const {
|
|
|
|
if (key == 0) return NULL;
|
|
|
|
return *Lookup(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Address** encodings_;
|
|
|
|
|
|
|
|
Address* Lookup(uint32_t key) const {
|
|
|
|
int type = key >> kReferenceTypeShift;
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(kFirstTypeCode <= type && type < kTypeCodeCount);
|
2008-07-03 15:10:15 +00:00
|
|
|
int id = key & kReferenceIdMask;
|
|
|
|
return &encodings_[type][id];
|
|
|
|
}
|
|
|
|
|
|
|
|
void Put(uint32_t key, Address value) {
|
|
|
|
*Lookup(key) = value;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
|
|
|
|
Isolate* isolate_;
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
class AddressMapBase {
|
|
|
|
protected:
|
|
|
|
static void SetValue(HashMap::Entry* entry, uint32_t v) {
|
|
|
|
entry->value = reinterpret_cast<void*>(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t GetValue(HashMap::Entry* entry) {
|
2014-10-23 08:56:54 +00:00
|
|
|
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
|
2014-10-23 08:43:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
|
|
|
|
bool insert) {
|
|
|
|
return map->Lookup(Key(obj), Hash(obj), insert);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static uint32_t Hash(HeapObject* obj) {
|
|
|
|
return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void* Key(HeapObject* obj) {
|
|
|
|
return reinterpret_cast<void*>(obj->address());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class RootIndexMap : public AddressMapBase {
|
|
|
|
public:
|
|
|
|
explicit RootIndexMap(Isolate* isolate);
|
|
|
|
|
2014-10-23 09:32:18 +00:00
|
|
|
~RootIndexMap() { delete map_; }
|
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
static const int kInvalidRootIndex = -1;
|
|
|
|
int Lookup(HeapObject* obj) {
|
|
|
|
HashMap::Entry* entry = LookupEntry(map_, obj, false);
|
|
|
|
if (entry) return GetValue(entry);
|
|
|
|
return kInvalidRootIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
HashMap* map_;
|
|
|
|
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class BackReference {
|
|
|
|
public:
|
|
|
|
explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
|
|
|
|
|
2014-10-24 12:40:05 +00:00
|
|
|
BackReference() : bitfield_(kInvalidValue) {}
|
|
|
|
|
|
|
|
static BackReference SourceReference() { return BackReference(kSourceValue); }
|
|
|
|
|
|
|
|
static BackReference LargeObjectReference(uint32_t index) {
|
|
|
|
return BackReference(SpaceBits::encode(LO_SPACE) |
|
|
|
|
ChunkOffsetBits::encode(index));
|
2014-10-23 08:43:17 +00:00
|
|
|
}
|
|
|
|
|
2014-10-24 12:40:05 +00:00
|
|
|
static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
|
|
|
|
uint32_t chunk_offset) {
|
|
|
|
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
|
|
|
|
DCHECK_NE(LO_SPACE, space);
|
|
|
|
return BackReference(
|
|
|
|
SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
|
|
|
|
ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
|
|
|
|
}
|
2014-10-23 08:43:17 +00:00
|
|
|
|
|
|
|
bool is_valid() const { return bitfield_ != kInvalidValue; }
|
2014-10-24 12:40:05 +00:00
|
|
|
bool is_source() const { return bitfield_ == kSourceValue; }
|
2014-10-23 08:43:17 +00:00
|
|
|
|
|
|
|
AllocationSpace space() const {
|
|
|
|
DCHECK(is_valid());
|
|
|
|
return SpaceBits::decode(bitfield_);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t chunk_offset() const {
|
|
|
|
DCHECK(is_valid());
|
|
|
|
return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t chunk_index() const {
|
|
|
|
DCHECK(is_valid());
|
|
|
|
return ChunkIndexBits::decode(bitfield_);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t reference() const {
|
|
|
|
DCHECK(is_valid());
|
|
|
|
return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t bitfield() const { return bitfield_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
static const uint32_t kInvalidValue = 0xFFFFFFFF;
|
2014-10-24 12:40:05 +00:00
|
|
|
static const uint32_t kSourceValue = 0xFFFFFFFE;
|
2014-10-23 08:43:17 +00:00
|
|
|
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
|
|
|
|
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
|
|
|
|
|
|
|
|
public:
|
|
|
|
static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
|
|
|
|
|
|
|
|
private:
|
|
|
|
class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
|
|
|
|
class ChunkIndexBits
|
|
|
|
: public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
|
|
|
|
class SpaceBits
|
|
|
|
: public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
|
|
|
|
};
|
|
|
|
|
|
|
|
uint32_t bitfield_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Mapping objects to their location after deserialization.
|
|
|
|
// This is used during building, but not at runtime by V8.
|
|
|
|
class BackReferenceMap : public AddressMapBase {
|
|
|
|
public:
|
|
|
|
BackReferenceMap()
|
|
|
|
: no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
|
|
|
|
|
|
|
|
~BackReferenceMap() { delete map_; }
|
|
|
|
|
|
|
|
BackReference Lookup(HeapObject* obj) {
|
|
|
|
HashMap::Entry* entry = LookupEntry(map_, obj, false);
|
|
|
|
return entry ? BackReference(GetValue(entry)) : BackReference();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Add(HeapObject* obj, BackReference b) {
|
|
|
|
DCHECK(b.is_valid());
|
|
|
|
DCHECK_EQ(NULL, LookupEntry(map_, obj, false));
|
|
|
|
HashMap::Entry* entry = LookupEntry(map_, obj, true);
|
|
|
|
SetValue(entry, b.bitfield());
|
|
|
|
}
|
|
|
|
|
2014-10-24 12:40:05 +00:00
|
|
|
void AddSourceString(String* string) {
|
|
|
|
Add(string, BackReference::SourceReference());
|
|
|
|
}
|
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
private:
|
|
|
|
DisallowHeapAllocation no_allocation_;
|
|
|
|
HashMap* map_;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-01-27 08:25:48 +00:00
|
|
|
// The Serializer/Deserializer class is a common superclass for Serializer and
|
|
|
|
// Deserializer which is used to store common constants and methods used by
|
|
|
|
// both.
|
|
|
|
class SerializerDeserializer: public ObjectVisitor {
|
|
|
|
public:
|
2013-09-03 11:54:08 +00:00
|
|
|
static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
|
2010-01-27 08:25:48 +00:00
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
static int nop() { return kNop; }
|
|
|
|
|
2014-09-25 07:32:13 +00:00
|
|
|
// No reservation for large object space necessary.
|
|
|
|
static const int kNumberOfPreallocatedSpaces = LO_SPACE;
|
2014-11-03 08:43:20 +00:00
|
|
|
static const int kNumberOfSpaces = LAST_SPACE + 1;
|
2014-09-25 07:32:13 +00:00
|
|
|
|
2009-10-27 11:54:01 +00:00
|
|
|
protected:
|
2010-05-20 13:54:31 +00:00
|
|
|
// Where the pointed-to object can be found:
|
|
|
|
enum Where {
|
2014-07-14 11:22:03 +00:00
|
|
|
kNewObject = 0, // Object is next in snapshot.
|
2014-09-25 07:32:13 +00:00
|
|
|
// 1-7 One per space.
|
2014-07-14 11:22:03 +00:00
|
|
|
kRootArray = 0x9, // Object is found in root array.
|
|
|
|
kPartialSnapshotCache = 0xa, // Object is in the cache.
|
|
|
|
kExternalReference = 0xb, // Pointer to an external reference.
|
|
|
|
kSkip = 0xc, // Skip n bytes.
|
|
|
|
kBuiltin = 0xd, // Builtin code object.
|
2014-07-15 10:17:22 +00:00
|
|
|
kAttachedReference = 0xe, // Object is described in an attached list.
|
|
|
|
kNop = 0xf, // Does nothing, used to pad.
|
|
|
|
kBackref = 0x10, // Object is described relative to end.
|
2014-09-25 07:32:13 +00:00
|
|
|
// 0x11-0x17 One per space.
|
2014-07-14 11:22:03 +00:00
|
|
|
kBackrefWithSkip = 0x18, // Object is described relative to end.
|
2014-09-25 07:32:13 +00:00
|
|
|
// 0x19-0x1f One per space.
|
2012-09-14 11:16:56 +00:00
|
|
|
// 0x20-0x3f Used by misc. tags below.
|
2010-05-20 13:54:31 +00:00
|
|
|
kPointedToMask = 0x3f
|
2009-10-27 11:54:01 +00:00
|
|
|
};
|
2010-05-20 13:54:31 +00:00
|
|
|
|
|
|
|
// How to code the pointer to the object.
|
|
|
|
enum HowToCode {
|
|
|
|
kPlain = 0, // Straight pointer.
|
|
|
|
// What this means depends on the architecture:
|
|
|
|
kFromCode = 0x40, // A pointer inlined in code.
|
|
|
|
kHowToCodeMask = 0x40
|
|
|
|
};
|
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
// For kRootArrayConstants
|
|
|
|
enum WithSkip {
|
|
|
|
kNoSkipDistance = 0,
|
|
|
|
kHasSkipDistance = 0x40,
|
|
|
|
kWithSkipMask = 0x40
|
|
|
|
};
|
|
|
|
|
2010-05-20 13:54:31 +00:00
|
|
|
// Where to point within the object.
|
|
|
|
enum WhereToPoint {
|
|
|
|
kStartOfObject = 0,
|
2012-07-31 09:25:23 +00:00
|
|
|
kInnerPointer = 0x80, // First insn in code object or payload of cell.
|
2010-05-20 13:54:31 +00:00
|
|
|
kWhereToPointMask = 0x80
|
|
|
|
};
|
|
|
|
|
|
|
|
// Misc.
|
2012-09-14 11:16:56 +00:00
|
|
|
// Raw data to be copied from the snapshot. This byte code does not advance
|
|
|
|
// the current pointer, which is used for code objects, where we write the
|
|
|
|
// entire code in one memcpy, then fix up stuff with kSkip and other byte
|
|
|
|
// codes that overwrite data.
|
|
|
|
static const int kRawData = 0x20;
|
|
|
|
// Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer.
|
2010-05-20 13:54:31 +00:00
|
|
|
// A tag emitted at strategic points in the snapshot to delineate sections.
|
|
|
|
// If the deserializer does not find these at the expected moments then it
|
|
|
|
// is an indication that the snapshot and the VM do not fit together.
|
|
|
|
// Examine the build process for architecture, version or configuration
|
|
|
|
// mismatches.
|
2011-05-23 12:59:02 +00:00
|
|
|
static const int kSynchronize = 0x70;
|
2010-05-20 13:54:31 +00:00
|
|
|
// Used for the source code of the natives, which is in the executable, but
|
|
|
|
// is referred to from external strings in the snapshot.
|
2011-05-23 12:59:02 +00:00
|
|
|
static const int kNativesStringResource = 0x71;
|
2012-09-14 11:16:56 +00:00
|
|
|
static const int kRepeat = 0x72;
|
|
|
|
static const int kConstantRepeat = 0x73;
|
|
|
|
// 0x73-0x7f Repeat last word (subtract 0x72 to get the count).
|
|
|
|
static const int kMaxRepeats = 0x7f - 0x72;
|
2011-10-20 12:27:10 +00:00
|
|
|
static int CodeForRepeats(int repeats) {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(repeats >= 1 && repeats <= kMaxRepeats);
|
2012-09-14 11:16:56 +00:00
|
|
|
return 0x72 + repeats;
|
2011-10-20 12:27:10 +00:00
|
|
|
}
|
|
|
|
static int RepeatsForCode(int byte_code) {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(byte_code >= kConstantRepeat && byte_code <= 0x7f);
|
2012-09-14 11:16:56 +00:00
|
|
|
return byte_code - 0x72;
|
2011-10-20 12:27:10 +00:00
|
|
|
}
|
2012-09-14 11:16:56 +00:00
|
|
|
static const int kRootArrayConstants = 0xa0;
|
|
|
|
// 0xa0-0xbf Things from the first 32 elements of the root array.
|
2011-10-20 12:27:10 +00:00
|
|
|
static const int kRootArrayNumberOfConstantEncodings = 0x20;
|
|
|
|
static int RootArrayConstantFromByteCode(int byte_code) {
|
2012-09-14 11:16:56 +00:00
|
|
|
return byte_code & 0x1f;
|
2011-10-20 12:27:10 +00:00
|
|
|
}
|
2010-05-20 13:54:31 +00:00
|
|
|
|
2011-05-23 12:59:02 +00:00
|
|
|
static const int kAnyOldSpace = -1;
|
2009-10-27 11:54:01 +00:00
|
|
|
|
2009-11-03 21:00:43 +00:00
|
|
|
// A bitmask for getting the space out of an instruction.
|
2012-09-14 11:16:56 +00:00
|
|
|
static const int kSpaceMask = 7;
|
2014-09-25 07:32:13 +00:00
|
|
|
STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
|
2009-10-27 11:54:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
|
2010-01-27 08:25:48 +00:00
|
|
|
class Deserializer: public SerializerDeserializer {
|
2009-10-27 11:54:01 +00:00
|
|
|
public:
|
|
|
|
// Create a deserializer from a snapshot byte source.
|
2009-11-16 12:08:40 +00:00
|
|
|
explicit Deserializer(SnapshotByteSource* source);
|
2009-10-27 11:54:01 +00:00
|
|
|
|
2010-01-27 08:25:48 +00:00
|
|
|
virtual ~Deserializer();
|
2009-10-27 11:54:01 +00:00
|
|
|
|
|
|
|
// Deserialize the snapshot into an empty heap.
|
2013-09-03 11:54:08 +00:00
|
|
|
void Deserialize(Isolate* isolate);
|
2010-01-15 14:20:31 +00:00
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM };
|
|
|
|
|
2010-01-15 14:20:31 +00:00
|
|
|
// Deserialize a single object and the objects reachable from it.
|
2014-10-15 14:04:53 +00:00
|
|
|
// We may want to abort gracefully even if deserialization fails.
|
|
|
|
void DeserializePartial(Isolate* isolate, Object** root,
|
|
|
|
OnOOM on_oom = FATAL_ON_OOM);
|
2010-01-15 14:20:31 +00:00
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
void AddReservation(int space, uint32_t chunk) {
|
|
|
|
DCHECK(space >= 0);
|
|
|
|
DCHECK(space < kNumberOfSpaces);
|
|
|
|
reservations_[space].Add({chunk, NULL, NULL});
|
2012-09-14 11:16:56 +00:00
|
|
|
}
|
|
|
|
|
2014-07-09 07:32:55 +00:00
|
|
|
void FlushICacheForNewCodeObjects();
|
|
|
|
|
2014-07-15 10:17:22 +00:00
|
|
|
// Serialized user code reference certain objects that are provided in a list
|
|
|
|
// By calling this method, we assume that we are deserializing user code.
|
2014-09-17 12:50:17 +00:00
|
|
|
void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) {
|
2014-07-15 10:17:22 +00:00
|
|
|
attached_objects_ = attached_objects;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool deserializing_user_code() { return attached_objects_ != NULL; }
|
2014-07-15 08:46:47 +00:00
|
|
|
|
2009-10-27 11:54:01 +00:00
|
|
|
private:
|
|
|
|
virtual void VisitPointers(Object** start, Object** end);
|
|
|
|
|
|
|
|
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
bool ReserveSpace();
|
|
|
|
|
2013-07-17 11:50:24 +00:00
|
|
|
// Allocation sites are present in the snapshot, and must be linked into
|
|
|
|
// a list at deserialization time.
|
|
|
|
void RelinkAllocationSite(AllocationSite* site);
|
|
|
|
|
2011-10-25 08:23:56 +00:00
|
|
|
// Fills in some heap data in an area from start to end (non-inclusive). The
|
|
|
|
// space id is used for the write barrier. The object_address is the address
|
|
|
|
// of the object we are writing into, or NULL if we are not writing into an
|
2012-01-16 12:38:59 +00:00
|
|
|
// object, i.e. if we are writing a series of tagged values that are not on
|
|
|
|
// the heap.
|
2014-10-15 14:04:53 +00:00
|
|
|
void ReadData(Object** start, Object** end, int space,
|
|
|
|
Address object_address);
|
2012-09-14 11:16:56 +00:00
|
|
|
void ReadObject(int space_number, Object** write_back);
|
2014-09-25 07:32:13 +00:00
|
|
|
Address Allocate(int space_index, int size);
|
2012-09-14 11:16:56 +00:00
|
|
|
|
2014-07-23 07:16:32 +00:00
|
|
|
// Special handling for serialized code like hooking up internalized strings.
|
|
|
|
HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
|
|
|
|
Object* ProcessBackRefInSerializedCode(Object* obj);
|
2014-07-15 08:46:47 +00:00
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
// This returns the address of an object that has been described in the
|
2014-10-15 14:04:53 +00:00
|
|
|
// snapshot by chunk index and offset.
|
|
|
|
HeapObject* GetBackReferencedObject(int space) {
|
|
|
|
if (space == LO_SPACE) {
|
|
|
|
uint32_t index = source_->GetInt();
|
|
|
|
return deserialized_large_objects_[index];
|
|
|
|
} else {
|
2014-10-23 08:43:17 +00:00
|
|
|
BackReference back_reference(source_->GetInt());
|
2014-10-15 14:04:53 +00:00
|
|
|
DCHECK(space < kNumberOfPreallocatedSpaces);
|
2014-10-23 08:43:17 +00:00
|
|
|
uint32_t chunk_index = back_reference.chunk_index();
|
2014-10-15 14:04:53 +00:00
|
|
|
DCHECK_LE(chunk_index, current_chunk_[space]);
|
2014-10-23 08:43:17 +00:00
|
|
|
uint32_t chunk_offset = back_reference.chunk_offset();
|
2014-10-15 14:04:53 +00:00
|
|
|
return HeapObject::FromAddress(reservations_[space][chunk_index].start +
|
2014-10-23 08:43:17 +00:00
|
|
|
chunk_offset);
|
2014-10-15 14:04:53 +00:00
|
|
|
}
|
2012-09-14 11:16:56 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
// Cached current isolate.
|
|
|
|
Isolate* isolate_;
|
2014-07-15 10:17:22 +00:00
|
|
|
|
|
|
|
// Objects from the attached object descriptions in the serialized user code.
|
2014-09-17 12:50:17 +00:00
|
|
|
Vector<Handle<Object> >* attached_objects_;
|
2011-03-18 20:35:07 +00:00
|
|
|
|
2009-10-27 11:54:01 +00:00
|
|
|
SnapshotByteSource* source_;
|
2014-10-15 14:04:53 +00:00
|
|
|
// The address of the next object that will be allocated in each space.
|
|
|
|
// Each space has a number of chunks reserved by the GC, with each chunk
|
|
|
|
// fitting into a page. Deserialized objects are allocated into the
|
|
|
|
// current chunk of the target space by bumping up high water mark.
|
|
|
|
Heap::Reservation reservations_[kNumberOfSpaces];
|
|
|
|
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
|
2014-09-25 07:32:13 +00:00
|
|
|
Address high_water_[kNumberOfPreallocatedSpaces];
|
2012-09-14 11:16:56 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
ExternalReferenceDecoder* external_reference_decoder_;
|
|
|
|
|
2014-09-25 07:32:13 +00:00
|
|
|
List<HeapObject*> deserialized_large_objects_;
|
|
|
|
|
2009-11-16 12:08:40 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(Deserializer);
|
2009-10-27 11:54:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-07-26 13:50:23 +00:00
|
|
|
class CodeAddressMap;
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
// There can be only one serializer per V8 process.
|
2011-05-23 12:59:02 +00:00
|
|
|
class Serializer : public SerializerDeserializer {
|
2009-10-27 11:54:01 +00:00
|
|
|
public:
|
2013-09-03 11:54:08 +00:00
|
|
|
Serializer(Isolate* isolate, SnapshotByteSink* sink);
|
2010-03-23 11:40:38 +00:00
|
|
|
~Serializer();
|
2014-10-23 08:43:17 +00:00
|
|
|
virtual void VisitPointers(Object** start, Object** end) OVERRIDE;
|
2014-10-15 14:04:53 +00:00
|
|
|
|
|
|
|
void FinalizeAllocation();
|
|
|
|
|
|
|
|
Vector<const uint32_t> FinalAllocationChunks(int space) const {
|
2014-10-31 14:43:21 +00:00
|
|
|
if (space == LO_SPACE) {
|
|
|
|
return Vector<const uint32_t>(&large_objects_total_size_, 1);
|
|
|
|
} else {
|
|
|
|
DCHECK_EQ(0, pending_chunk_[space]); // No pending chunks.
|
|
|
|
return completed_chunks_[space].ToConstVector();
|
|
|
|
}
|
2010-01-12 15:16:23 +00:00
|
|
|
}
|
2009-11-16 12:08:40 +00:00
|
|
|
|
2013-09-03 11:54:08 +00:00
|
|
|
Isolate* isolate() const { return isolate_; }
|
2014-05-22 09:36:20 +00:00
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
BackReferenceMap* back_reference_map() { return &back_reference_map_; }
|
|
|
|
RootIndexMap* root_index_map() { return &root_index_map_; }
|
2009-10-27 11:54:01 +00:00
|
|
|
|
2010-01-27 08:25:48 +00:00
|
|
|
protected:
|
2009-10-27 11:54:01 +00:00
|
|
|
class ObjectSerializer : public ObjectVisitor {
|
|
|
|
public:
|
2009-11-16 12:08:40 +00:00
|
|
|
ObjectSerializer(Serializer* serializer,
|
2009-10-27 11:54:01 +00:00
|
|
|
Object* o,
|
|
|
|
SnapshotByteSink* sink,
|
2010-05-20 13:54:31 +00:00
|
|
|
HowToCode how_to_code,
|
|
|
|
WhereToPoint where_to_point)
|
2009-10-27 11:54:01 +00:00
|
|
|
: serializer_(serializer),
|
|
|
|
object_(HeapObject::cast(o)),
|
|
|
|
sink_(sink),
|
2010-05-20 13:54:31 +00:00
|
|
|
reference_representation_(how_to_code + where_to_point),
|
2012-09-14 11:16:56 +00:00
|
|
|
bytes_processed_so_far_(0),
|
|
|
|
code_object_(o->IsCode()),
|
|
|
|
code_has_been_output_(false) { }
|
2009-10-27 11:54:01 +00:00
|
|
|
void Serialize();
|
|
|
|
void VisitPointers(Object** start, Object** end);
|
2011-11-11 12:28:42 +00:00
|
|
|
void VisitEmbeddedPointer(RelocInfo* target);
|
2013-10-23 10:47:51 +00:00
|
|
|
void VisitExternalReference(Address* p);
|
2011-11-11 12:28:42 +00:00
|
|
|
void VisitExternalReference(RelocInfo* rinfo);
|
2009-10-27 11:54:01 +00:00
|
|
|
void VisitCodeTarget(RelocInfo* target);
|
2010-08-20 07:10:18 +00:00
|
|
|
void VisitCodeEntry(Address entry_address);
|
2013-06-12 15:03:44 +00:00
|
|
|
void VisitCell(RelocInfo* rinfo);
|
2009-10-30 10:23:12 +00:00
|
|
|
void VisitRuntimeEntry(RelocInfo* reloc);
|
2009-11-06 13:48:33 +00:00
|
|
|
// Used for seralizing the external strings that hold the natives source.
|
2014-09-10 12:38:12 +00:00
|
|
|
void VisitExternalOneByteString(
|
|
|
|
v8::String::ExternalOneByteStringResource** resource);
|
2009-11-06 13:48:33 +00:00
|
|
|
// We can't serialize a heap with external two byte strings.
|
|
|
|
void VisitExternalTwoByteString(
|
|
|
|
v8::String::ExternalStringResource** resource) {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2009-10-27 11:54:01 +00:00
|
|
|
|
|
|
|
private:
|
2014-10-23 08:43:17 +00:00
|
|
|
void SerializePrologue(AllocationSpace space, int size, Map* map);
|
2014-10-02 07:12:46 +00:00
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
|
|
|
|
// This function outputs or skips the raw data between the last pointer and
|
|
|
|
// up to the current position. It optionally can just return the number of
|
|
|
|
// bytes to skip instead of performing a skip instruction, in case the skip
|
|
|
|
// can be merged into the next instruction.
|
|
|
|
int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
|
2014-10-02 07:12:46 +00:00
|
|
|
// External strings are serialized in a way to resemble sequential strings.
|
|
|
|
void SerializeExternalString();
|
2009-10-27 11:54:01 +00:00
|
|
|
|
2009-11-16 12:08:40 +00:00
|
|
|
Serializer* serializer_;
|
2009-10-27 11:54:01 +00:00
|
|
|
HeapObject* object_;
|
|
|
|
SnapshotByteSink* sink_;
|
2010-05-20 13:54:31 +00:00
|
|
|
int reference_representation_;
|
2009-10-27 11:54:01 +00:00
|
|
|
int bytes_processed_so_far_;
|
2012-09-14 11:16:56 +00:00
|
|
|
bool code_object_;
|
|
|
|
bool code_has_been_output_;
|
2009-10-27 11:54:01 +00:00
|
|
|
};
|
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
|
|
|
|
WhereToPoint where_to_point, int skip) = 0;
|
|
|
|
|
|
|
|
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
|
|
|
|
int skip);
|
|
|
|
|
|
|
|
void SerializeBackReference(BackReference back_reference,
|
|
|
|
HowToCode how_to_code,
|
|
|
|
WhereToPoint where_to_point, int skip);
|
2009-10-27 11:54:01 +00:00
|
|
|
void InitializeAllocators();
|
2012-09-14 11:16:56 +00:00
|
|
|
// This will return the space for an object.
|
2014-10-23 08:43:17 +00:00
|
|
|
static AllocationSpace SpaceOfObject(HeapObject* object);
|
|
|
|
BackReference AllocateLargeObject(int size);
|
|
|
|
BackReference Allocate(AllocationSpace space, int size);
|
2009-10-27 11:54:01 +00:00
|
|
|
int EncodeExternalReference(Address addr) {
|
|
|
|
return external_reference_encoder_->Encode(addr);
|
|
|
|
}
|
|
|
|
|
2014-10-31 14:43:21 +00:00
|
|
|
// GetInt reads 4 bytes at once, requiring padding at the end.
|
|
|
|
void Pad();
|
2012-02-23 12:11:24 +00:00
|
|
|
|
2013-10-22 08:50:46 +00:00
|
|
|
// Some roots should not be serialized, because their actual value depends on
|
|
|
|
// absolute addresses and they are reset after deserialization, anyway.
|
|
|
|
bool ShouldBeSkipped(Object** current);
|
|
|
|
|
2014-10-31 14:43:21 +00:00
|
|
|
// We may not need the code address map for logging for every instance
|
|
|
|
// of the serializer. Initialize it on demand.
|
|
|
|
void InitializeCodeAddressMap();
|
2014-10-15 14:04:53 +00:00
|
|
|
|
2014-10-31 14:43:21 +00:00
|
|
|
inline uint32_t max_chunk_size(int space) const {
|
|
|
|
DCHECK_LE(0, space);
|
|
|
|
DCHECK_LT(space, kNumberOfSpaces);
|
|
|
|
return max_chunk_size_[space];
|
|
|
|
}
|
|
|
|
|
|
|
|
Isolate* isolate_;
|
2014-10-15 14:04:53 +00:00
|
|
|
|
2009-10-27 11:54:01 +00:00
|
|
|
SnapshotByteSink* sink_;
|
|
|
|
ExternalReferenceEncoder* external_reference_encoder_;
|
2014-04-17 14:45:06 +00:00
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
BackReferenceMap back_reference_map_;
|
|
|
|
RootIndexMap root_index_map_;
|
2009-10-27 11:54:01 +00:00
|
|
|
|
|
|
|
friend class ObjectSerializer;
|
2009-11-16 12:08:40 +00:00
|
|
|
friend class Deserializer;
|
2009-10-27 11:54:01 +00:00
|
|
|
|
2012-01-20 16:17:08 +00:00
|
|
|
private:
|
2014-05-22 09:36:20 +00:00
|
|
|
CodeAddressMap* code_address_map_;
|
2014-10-31 14:43:21 +00:00
|
|
|
// Objects from the same space are put into chunks for bulk-allocation
|
|
|
|
// when deserializing. We have to make sure that each chunk fits into a
|
|
|
|
// page. So we track the chunk size in pending_chunk_ of a space, but
|
|
|
|
// when it exceeds a page, we complete the current chunk and start a new one.
|
|
|
|
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
|
|
|
|
List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
|
|
|
|
uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
|
|
|
|
|
2014-09-25 07:32:13 +00:00
|
|
|
// We map serialized large objects to indexes for back-referencing.
|
2014-10-31 14:43:21 +00:00
|
|
|
uint32_t large_objects_total_size_;
|
2014-10-15 14:04:53 +00:00
|
|
|
uint32_t seen_large_objects_index_;
|
2014-10-31 14:43:21 +00:00
|
|
|
|
2009-11-16 12:08:40 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(Serializer);
|
2009-10-27 11:54:01 +00:00
|
|
|
};
|
|
|
|
|
2010-01-27 08:25:48 +00:00
|
|
|
|
|
|
|
class PartialSerializer : public Serializer {
|
|
|
|
public:
|
2013-09-03 11:54:08 +00:00
|
|
|
PartialSerializer(Isolate* isolate,
|
|
|
|
Serializer* startup_snapshot_serializer,
|
2010-01-27 08:25:48 +00:00
|
|
|
SnapshotByteSink* sink)
|
2013-09-03 11:54:08 +00:00
|
|
|
: Serializer(isolate, sink),
|
2010-01-27 08:25:48 +00:00
|
|
|
startup_serializer_(startup_snapshot_serializer) {
|
2014-05-22 09:36:20 +00:00
|
|
|
InitializeCodeAddressMap();
|
2010-01-27 08:25:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize the objects reachable from a single object pointer.
|
2014-06-17 14:24:19 +00:00
|
|
|
void Serialize(Object** o);
|
2014-10-23 08:43:17 +00:00
|
|
|
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
|
2014-10-29 18:31:01 +00:00
|
|
|
WhereToPoint where_to_point, int skip) OVERRIDE;
|
2010-01-27 08:25:48 +00:00
|
|
|
|
2014-06-17 14:24:19 +00:00
|
|
|
private:
|
|
|
|
int PartialSnapshotCacheIndex(HeapObject* o);
|
|
|
|
bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
|
2010-03-23 11:40:38 +00:00
|
|
|
// Scripts should be referred only through shared function infos. We can't
|
|
|
|
// allow them to be part of the partial snapshot because they contain a
|
|
|
|
// unique ID, and deserializing several partial snapshots containing script
|
|
|
|
// would cause dupes.
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(!o->IsScript());
|
2013-06-20 09:10:19 +00:00
|
|
|
return o->IsName() || o->IsSharedFunctionInfo() ||
|
2010-08-16 16:06:46 +00:00
|
|
|
o->IsHeapNumber() || o->IsCode() ||
|
2011-11-03 10:36:55 +00:00
|
|
|
o->IsScopeInfo() ||
|
2013-09-11 07:14:41 +00:00
|
|
|
o->map() ==
|
|
|
|
startup_serializer_->isolate()->heap()->fixed_cow_array_map();
|
2010-01-27 08:25:48 +00:00
|
|
|
}
|
|
|
|
|
2014-06-17 14:24:19 +00:00
|
|
|
|
2010-01-27 08:25:48 +00:00
|
|
|
Serializer* startup_serializer_;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class StartupSerializer : public Serializer {
|
|
|
|
public:
|
2013-09-03 11:54:08 +00:00
|
|
|
StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
|
2014-10-23 08:43:17 +00:00
|
|
|
: Serializer(isolate, sink), root_index_wave_front_(0) {
|
2010-01-27 08:25:48 +00:00
|
|
|
// Clear the cache of objects used by the partial snapshot. After the
|
|
|
|
// strong roots have been serialized we can create a partial snapshot
|
2011-10-20 12:27:10 +00:00
|
|
|
// which will repopulate the cache with objects needed by that partial
|
2010-01-27 08:25:48 +00:00
|
|
|
// snapshot.
|
2013-09-03 11:54:08 +00:00
|
|
|
isolate->set_serialize_partial_snapshot_cache_length(0);
|
2014-05-22 09:36:20 +00:00
|
|
|
InitializeCodeAddressMap();
|
2010-01-27 08:25:48 +00:00
|
|
|
}
|
2014-10-23 08:43:17 +00:00
|
|
|
|
|
|
|
// The StartupSerializer has to serialize the root array, which is slightly
|
|
|
|
// different.
|
|
|
|
virtual void VisitPointers(Object** start, Object** end) OVERRIDE;
|
|
|
|
|
2010-01-27 08:25:48 +00:00
|
|
|
// Serialize the current state of the heap. The order is:
|
|
|
|
// 1) Strong references.
|
|
|
|
// 2) Partial snapshot cache.
|
2013-02-28 17:03:34 +00:00
|
|
|
// 3) Weak references (e.g. the string table).
|
2010-01-27 08:25:48 +00:00
|
|
|
virtual void SerializeStrongReferences();
|
2014-10-23 08:43:17 +00:00
|
|
|
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
|
2014-10-29 18:31:01 +00:00
|
|
|
WhereToPoint where_to_point, int skip) OVERRIDE;
|
2010-01-27 08:25:48 +00:00
|
|
|
void SerializeWeakReferences();
|
|
|
|
void Serialize() {
|
|
|
|
SerializeStrongReferences();
|
|
|
|
SerializeWeakReferences();
|
2012-09-14 11:16:56 +00:00
|
|
|
Pad();
|
2010-01-27 08:25:48 +00:00
|
|
|
}
|
2014-07-15 10:17:22 +00:00
|
|
|
|
|
|
|
private:
|
2014-10-23 09:01:09 +00:00
|
|
|
intptr_t root_index_wave_front_;
|
2014-07-15 10:17:22 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
|
2010-01-27 08:25:48 +00:00
|
|
|
};
|
|
|
|
|
2010-03-23 11:40:38 +00:00
|
|
|
|
2014-07-08 09:04:08 +00:00
|
|
|
class CodeSerializer : public Serializer {
|
|
|
|
public:
|
2014-07-15 10:17:22 +00:00
|
|
|
static ScriptData* Serialize(Isolate* isolate,
|
|
|
|
Handle<SharedFunctionInfo> info,
|
|
|
|
Handle<String> source);
|
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
|
2014-11-17 12:16:27 +00:00
|
|
|
Isolate* isolate, ScriptData* cached_data, Handle<String> source);
|
2014-07-15 10:17:22 +00:00
|
|
|
|
|
|
|
static const int kSourceObjectIndex = 0;
|
2014-09-17 12:50:17 +00:00
|
|
|
static const int kCodeStubsBaseIndex = 1;
|
2014-07-08 09:04:08 +00:00
|
|
|
|
2014-10-23 08:25:42 +00:00
|
|
|
String* source() const {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(!AllowHeapAllocation::IsAllowed());
|
2014-07-16 06:59:14 +00:00
|
|
|
return source_;
|
|
|
|
}
|
|
|
|
|
2014-09-17 12:50:17 +00:00
|
|
|
List<uint32_t>* stub_keys() { return &stub_keys_; }
|
2014-10-23 08:25:42 +00:00
|
|
|
int num_internalized_strings() const { return num_internalized_strings_; }
|
2014-09-17 12:50:17 +00:00
|
|
|
|
2014-07-14 11:22:03 +00:00
|
|
|
private:
|
2014-09-29 07:53:22 +00:00
|
|
|
CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
|
|
|
|
Code* main_code)
|
2014-10-23 08:25:42 +00:00
|
|
|
: Serializer(isolate, sink),
|
|
|
|
source_(source),
|
|
|
|
main_code_(main_code),
|
2014-10-24 12:40:05 +00:00
|
|
|
num_internalized_strings_(0) {
|
|
|
|
back_reference_map_.AddSourceString(source);
|
|
|
|
}
|
2014-09-29 07:53:22 +00:00
|
|
|
|
2014-10-23 08:43:17 +00:00
|
|
|
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
|
2014-10-29 18:31:01 +00:00
|
|
|
WhereToPoint where_to_point, int skip) OVERRIDE;
|
2014-09-29 07:53:22 +00:00
|
|
|
|
2014-10-20 08:46:11 +00:00
|
|
|
void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
|
2014-09-29 07:53:22 +00:00
|
|
|
WhereToPoint where_to_point);
|
2014-10-20 08:46:11 +00:00
|
|
|
void SerializeIC(Code* ic, HowToCode how_to_code,
|
|
|
|
WhereToPoint where_to_point);
|
|
|
|
void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
|
2014-09-29 07:53:22 +00:00
|
|
|
WhereToPoint where_to_point);
|
|
|
|
void SerializeSourceObject(HowToCode how_to_code,
|
|
|
|
WhereToPoint where_to_point);
|
2014-10-23 08:43:17 +00:00
|
|
|
void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
|
|
|
|
WhereToPoint where_to_point);
|
2014-09-17 12:50:17 +00:00
|
|
|
int AddCodeStubKey(uint32_t stub_key);
|
2014-07-15 10:17:22 +00:00
|
|
|
|
|
|
|
DisallowHeapAllocation no_gc_;
|
|
|
|
String* source_;
|
2014-09-29 07:53:22 +00:00
|
|
|
Code* main_code_;
|
2014-10-23 08:25:42 +00:00
|
|
|
int num_internalized_strings_;
|
2014-09-17 12:50:17 +00:00
|
|
|
List<uint32_t> stub_keys_;
|
2014-07-15 10:17:22 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
|
2014-07-08 09:04:08 +00:00
|
|
|
};
|
2014-07-10 10:28:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
// Wrapper around ScriptData to provide code-serializer-specific functionality.
|
|
|
|
class SerializedCodeData {
|
|
|
|
public:
|
|
|
|
// Used by when consuming.
|
2014-11-17 12:16:27 +00:00
|
|
|
static SerializedCodeData* FromCachedData(ScriptData* cached_data,
|
|
|
|
String* source) {
|
2014-07-16 06:59:14 +00:00
|
|
|
DisallowHeapAllocation no_gc;
|
2014-11-17 12:16:27 +00:00
|
|
|
SerializedCodeData* scd = new SerializedCodeData(cached_data);
|
|
|
|
if (scd->IsSane(source)) return scd;
|
|
|
|
cached_data->Reject();
|
|
|
|
delete scd;
|
|
|
|
return NULL;
|
2014-07-10 10:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Used when producing.
|
2014-10-23 11:23:57 +00:00
|
|
|
SerializedCodeData(const List<byte>& payload, CodeSerializer* cs);
|
2014-07-10 10:28:05 +00:00
|
|
|
|
|
|
|
~SerializedCodeData() {
|
|
|
|
if (owns_script_data_) delete script_data_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return ScriptData object and relinquish ownership over it to the caller.
|
|
|
|
ScriptData* GetScriptData() {
|
|
|
|
ScriptData* result = script_data_;
|
|
|
|
script_data_ = NULL;
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(owns_script_data_);
|
2014-07-10 10:28:05 +00:00
|
|
|
owns_script_data_ = false;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
class Reservation {
|
|
|
|
public:
|
|
|
|
uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation); }
|
|
|
|
bool is_last_chunk() const { return IsLastChunkBits::decode(reservation); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint32_t reservation;
|
|
|
|
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(Reservation);
|
|
|
|
};
|
|
|
|
|
2014-10-23 08:25:42 +00:00
|
|
|
int NumInternalizedStrings() const {
|
|
|
|
return GetHeaderValue(kNumInternalizedStringsOffset);
|
|
|
|
}
|
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
Vector<const Reservation> Reservations() const {
|
|
|
|
return Vector<const Reservation>(reinterpret_cast<const Reservation*>(
|
|
|
|
script_data_->data() + kHeaderSize),
|
|
|
|
GetHeaderValue(kReservationsOffset));
|
|
|
|
}
|
|
|
|
|
2014-09-17 12:50:17 +00:00
|
|
|
Vector<const uint32_t> CodeStubKeys() const {
|
2014-10-15 14:04:53 +00:00
|
|
|
int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
|
|
|
|
const byte* start = script_data_->data() + kHeaderSize + reservations_size;
|
|
|
|
return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
|
|
|
|
GetHeaderValue(kNumCodeStubKeysOffset));
|
2014-09-17 12:50:17 +00:00
|
|
|
}
|
|
|
|
|
2014-07-10 10:28:05 +00:00
|
|
|
const byte* Payload() const {
|
2014-10-15 14:04:53 +00:00
|
|
|
int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
|
2014-09-17 12:50:17 +00:00
|
|
|
int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
|
2014-10-15 14:04:53 +00:00
|
|
|
return script_data_->data() + kHeaderSize + reservations_size +
|
|
|
|
code_stubs_size;
|
2014-07-10 10:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int PayloadLength() const {
|
2014-09-17 12:50:17 +00:00
|
|
|
int payload_length = GetHeaderValue(kPayloadLengthOffset);
|
|
|
|
DCHECK_EQ(script_data_->data() + script_data_->length(),
|
|
|
|
Payload() + payload_length);
|
|
|
|
return payload_length;
|
2014-07-10 10:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2014-11-17 12:16:27 +00:00
|
|
|
explicit SerializedCodeData(ScriptData* data)
|
|
|
|
: script_data_(data), owns_script_data_(false) {}
|
|
|
|
|
2014-07-10 10:28:05 +00:00
|
|
|
void SetHeaderValue(int offset, int value) {
|
|
|
|
reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
|
|
|
|
value;
|
|
|
|
}
|
|
|
|
|
|
|
|
int GetHeaderValue(int offset) const {
|
|
|
|
return reinterpret_cast<const int*>(script_data_->data())[offset];
|
|
|
|
}
|
|
|
|
|
2014-07-16 06:59:14 +00:00
|
|
|
bool IsSane(String* source);
|
|
|
|
|
|
|
|
int CheckSum(String* source);
|
2014-07-10 10:28:05 +00:00
|
|
|
|
|
|
|
// The data header consists of int-sized entries:
|
|
|
|
// [0] version hash
|
2014-10-23 08:25:42 +00:00
|
|
|
// [1] number of internalized strings
|
|
|
|
// [2] number of code stub keys
|
|
|
|
// [3] payload length
|
|
|
|
// [4..10] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
|
2014-07-16 06:59:14 +00:00
|
|
|
static const int kCheckSumOffset = 0;
|
2014-10-23 08:25:42 +00:00
|
|
|
static const int kNumInternalizedStringsOffset = 1;
|
|
|
|
static const int kReservationsOffset = 2;
|
|
|
|
static const int kNumCodeStubKeysOffset = 3;
|
|
|
|
static const int kPayloadLengthOffset = 4;
|
2014-10-15 14:04:53 +00:00
|
|
|
static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
|
2014-09-17 12:50:17 +00:00
|
|
|
|
2014-10-15 14:04:53 +00:00
|
|
|
class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
|
|
|
|
class IsLastChunkBits : public BitField<bool, 31, 1> {};
|
2014-09-17 12:50:17 +00:00
|
|
|
|
|
|
|
// Following the header, we store, in sequential order
|
|
|
|
// - code stub keys
|
|
|
|
// - serialization payload
|
2014-07-10 10:28:05 +00:00
|
|
|
|
|
|
|
ScriptData* script_data_;
|
|
|
|
bool owns_script_data_;
|
|
|
|
};
|
2008-07-03 15:10:15 +00:00
|
|
|
} } // namespace v8::internal
|
|
|
|
|
|
|
|
#endif // V8_SERIALIZE_H_
|