[serializer] reserve maps one by one to avoid fragmentation.
R=hpayer@chromium.org BUG=chromium:612816,chromium:634900 Review-Url: https://codereview.chromium.org/2229583003 Cr-Commit-Position: refs/heads/master@{#38515}
This commit is contained in:
parent
7060bab81c
commit
ead3188129
@ -77,6 +77,11 @@ class SerializerReference {
|
||||
ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
|
||||
}
|
||||
|
||||
static SerializerReference MapReference(uint32_t index) {
|
||||
return SerializerReference(SpaceBits::encode(MAP_SPACE) |
|
||||
ValueIndexBits::encode(index));
|
||||
}
|
||||
|
||||
static SerializerReference LargeObjectReference(uint32_t index) {
|
||||
return SerializerReference(SpaceBits::encode(LO_SPACE) |
|
||||
ValueIndexBits::encode(index));
|
||||
@ -107,10 +112,14 @@ class SerializerReference {
|
||||
return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
|
||||
}
|
||||
|
||||
uint32_t map_index() const {
|
||||
DCHECK(is_back_reference());
|
||||
return ValueIndexBits::decode(bitfield_);
|
||||
}
|
||||
|
||||
uint32_t large_object_index() const {
|
||||
DCHECK(is_back_reference());
|
||||
DCHECK(chunk_index() == 0);
|
||||
return ChunkOffsetBits::decode(bitfield_);
|
||||
return ValueIndexBits::decode(bitfield_);
|
||||
}
|
||||
|
||||
uint32_t chunk_index() const {
|
||||
|
@ -1119,8 +1119,7 @@ static void VerifyStringTable(Heap* heap) {
|
||||
}
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
|
||||
bool Heap::ReserveSpace(Reservation* reservations) {
|
||||
bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
|
||||
bool gc_performed = true;
|
||||
int counter = 0;
|
||||
static const int kThreshold = 20;
|
||||
@ -1132,7 +1131,30 @@ bool Heap::ReserveSpace(Reservation* reservations) {
|
||||
DCHECK_LE(1, reservation->length());
|
||||
if (reservation->at(0).size == 0) continue;
|
||||
bool perform_gc = false;
|
||||
if (space == LO_SPACE) {
|
||||
if (space == MAP_SPACE) {
|
||||
// We allocate each map individually to avoid fragmentation.
|
||||
maps->Clear();
|
||||
DCHECK_EQ(1, reservation->length());
|
||||
int num_maps = reservation->at(0).size / Map::kSize;
|
||||
for (int i = 0; i < num_maps; i++) {
|
||||
// The deserializer will update the skip list.
|
||||
AllocationResult allocation = map_space()->AllocateRawUnaligned(
|
||||
Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
|
||||
HeapObject* free_space = nullptr;
|
||||
if (allocation.To(&free_space)) {
|
||||
// Mark with a free list node, in case we have a GC before
|
||||
// deserializing.
|
||||
Address free_space_address = free_space->address();
|
||||
CreateFillerObjectAt(free_space_address, Map::kSize,
|
||||
ClearRecordedSlots::kNo);
|
||||
maps->Add(free_space_address);
|
||||
} else {
|
||||
perform_gc = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (space == LO_SPACE) {
|
||||
// Just check that we can allocate during deserialization.
|
||||
DCHECK_EQ(1, reservation->length());
|
||||
perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
|
||||
} else {
|
||||
|
@ -754,7 +754,7 @@ class Heap {
|
||||
inline AllocationMemento* FindAllocationMemento(HeapObject* object);
|
||||
|
||||
// Returns false if not able to reserve.
|
||||
bool ReserveSpace(Reservation* reservations);
|
||||
bool ReserveSpace(Reservation* reservations, List<Address>* maps);
|
||||
|
||||
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
|
||||
|
||||
|
@ -55,7 +55,9 @@ bool Deserializer::ReserveSpace() {
|
||||
CHECK(reservations_[i].length() > 0);
|
||||
}
|
||||
#endif // DEBUG
|
||||
if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
|
||||
DCHECK(allocated_maps_.is_empty());
|
||||
if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
|
||||
return false;
|
||||
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
|
||||
high_water_[i] = reservations_[i][0].start;
|
||||
}
|
||||
@ -161,6 +163,12 @@ MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
|
||||
Deserializer::~Deserializer() {
|
||||
// TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
|
||||
// DCHECK(source_.AtEOF());
|
||||
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
|
||||
int chunk_index = current_chunk_[space];
|
||||
CHECK_EQ(reservations_[space].length(), chunk_index + 1);
|
||||
CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
|
||||
}
|
||||
CHECK_EQ(allocated_maps_.length(), next_map_index_);
|
||||
}
|
||||
|
||||
// This is called on the roots. It is the driver of the deserialization
|
||||
@ -311,9 +319,12 @@ HeapObject* Deserializer::GetBackReferencedObject(int space) {
|
||||
SerializerReference back_reference =
|
||||
SerializerReference::FromBitfield(source_.GetInt());
|
||||
if (space == LO_SPACE) {
|
||||
CHECK(back_reference.chunk_index() == 0);
|
||||
uint32_t index = back_reference.large_object_index();
|
||||
obj = deserialized_large_objects_[index];
|
||||
} else if (space == MAP_SPACE) {
|
||||
int index = back_reference.map_index();
|
||||
DCHECK(index < next_map_index_);
|
||||
obj = HeapObject::FromAddress(allocated_maps_[index]);
|
||||
} else {
|
||||
DCHECK(space < kNumberOfPreallocatedSpaces);
|
||||
uint32_t chunk_index = back_reference.chunk_index();
|
||||
@ -404,6 +415,9 @@ Address Deserializer::Allocate(int space_index, int size) {
|
||||
HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
|
||||
deserialized_large_objects_.Add(obj);
|
||||
return obj->address();
|
||||
} else if (space_index == MAP_SPACE) {
|
||||
DCHECK_EQ(Map::kSize, size);
|
||||
return allocated_maps_[next_map_index_++];
|
||||
} else {
|
||||
DCHECK(space_index < kNumberOfPreallocatedSpaces);
|
||||
Address address = high_water_[space_index];
|
||||
|
@ -34,6 +34,7 @@ class Deserializer : public SerializerDeserializer {
|
||||
: isolate_(NULL),
|
||||
source_(data->Payload()),
|
||||
magic_number_(data->GetMagicNumber()),
|
||||
next_map_index_(0),
|
||||
external_reference_table_(NULL),
|
||||
deserialized_large_objects_(0),
|
||||
deserializing_user_code_(deserializing_user_code),
|
||||
@ -129,6 +130,8 @@ class Deserializer : public SerializerDeserializer {
|
||||
Heap::Reservation reservations_[kNumberOfSpaces];
|
||||
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
|
||||
Address high_water_[kNumberOfPreallocatedSpaces];
|
||||
int next_map_index_;
|
||||
List<Address> allocated_maps_;
|
||||
|
||||
ExternalReferenceTable* external_reference_table_;
|
||||
|
||||
|
@ -80,7 +80,9 @@ class SerializerDeserializer : public ObjectVisitor {
|
||||
static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
|
||||
|
||||
// No reservation for large object space necessary.
|
||||
static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
|
||||
// We also handle map space differenly.
|
||||
STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
|
||||
static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
|
||||
static const int kNumberOfSpaces = LAST_SPACE + 1;
|
||||
|
||||
protected:
|
||||
|
@ -16,6 +16,7 @@ Serializer::Serializer(Isolate* isolate)
|
||||
root_index_map_(isolate),
|
||||
recursion_depth_(0),
|
||||
code_address_map_(NULL),
|
||||
num_maps_(0),
|
||||
large_objects_total_size_(0),
|
||||
seen_large_objects_index_(0) {
|
||||
// The serializer is meant to be used only to generate initial heap images
|
||||
@ -117,7 +118,8 @@ void Serializer::EncodeReservations(
|
||||
}
|
||||
out->last().mark_as_last();
|
||||
}
|
||||
|
||||
out->Add(SerializedData::Reservation(num_maps_ * Map::kSize));
|
||||
out->last().mark_as_last();
|
||||
out->Add(SerializedData::Reservation(large_objects_total_size_));
|
||||
out->last().mark_as_last();
|
||||
}
|
||||
@ -127,15 +129,18 @@ bool Serializer::BackReferenceIsAlreadyAllocated(
|
||||
SerializerReference reference) {
|
||||
DCHECK(reference.is_back_reference());
|
||||
AllocationSpace space = reference.space();
|
||||
int chunk_index = reference.chunk_index();
|
||||
if (space == LO_SPACE) {
|
||||
return chunk_index == 0 &&
|
||||
reference.large_object_index() < seen_large_objects_index_;
|
||||
} else if (chunk_index == completed_chunks_[space].length()) {
|
||||
return reference.chunk_offset() < pending_chunk_[space];
|
||||
return reference.large_object_index() < seen_large_objects_index_;
|
||||
} else if (space == MAP_SPACE) {
|
||||
return reference.map_index() < num_maps_;
|
||||
} else {
|
||||
return chunk_index < completed_chunks_[space].length() &&
|
||||
reference.chunk_offset() < completed_chunks_[space][chunk_index];
|
||||
int chunk_index = reference.chunk_index();
|
||||
if (chunk_index == completed_chunks_[space].length()) {
|
||||
return reference.chunk_offset() < pending_chunk_[space];
|
||||
} else {
|
||||
return chunk_index < completed_chunks_[space].length() &&
|
||||
reference.chunk_offset() < completed_chunks_[space][chunk_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // DEBUG
|
||||
@ -266,6 +271,11 @@ SerializerReference Serializer::AllocateLargeObject(int size) {
|
||||
return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
|
||||
}
|
||||
|
||||
SerializerReference Serializer::AllocateMap() {
|
||||
// Maps are allocated one-by-one when deserializing.
|
||||
return SerializerReference::MapReference(num_maps_++);
|
||||
}
|
||||
|
||||
SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
|
||||
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
|
||||
DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
|
||||
@ -336,6 +346,12 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
|
||||
sink_->Put(NOT_EXECUTABLE, "not executable large object");
|
||||
}
|
||||
back_reference = serializer_->AllocateLargeObject(size);
|
||||
} else if (space == MAP_SPACE) {
|
||||
DCHECK_EQ(Map::kSize, size);
|
||||
back_reference = serializer_->AllocateMap();
|
||||
sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
|
||||
// This is redundant, but we include it anyways.
|
||||
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
|
||||
} else {
|
||||
int fill = serializer_->PutAlignmentPrefix(object_);
|
||||
back_reference = serializer_->Allocate(space, size + fill);
|
||||
|
@ -190,6 +190,7 @@ class Serializer : public SerializerDeserializer {
|
||||
|
||||
// This will return the space for an object.
|
||||
SerializerReference AllocateLargeObject(int size);
|
||||
SerializerReference AllocateMap();
|
||||
SerializerReference Allocate(AllocationSpace space, int size);
|
||||
int EncodeExternalReference(Address addr) {
|
||||
return external_reference_encoder_.Encode(addr);
|
||||
@ -245,6 +246,8 @@ class Serializer : public SerializerDeserializer {
|
||||
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
|
||||
List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
|
||||
uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
|
||||
// Number of maps that we need to allocate.
|
||||
uint32_t num_maps_;
|
||||
|
||||
// We map serialized large objects to indexes for back-referencing.
|
||||
uint32_t large_objects_total_size_;
|
||||
|
Loading…
Reference in New Issue
Block a user