Reland "[serializer] Remove new space"

This is a reland of 1c7618abad

The revert was due to an missing dependency in the incremental build,
fixed in https://crrev.com/c/2400987.

Original change's description:
> [serializer] Remove new space
>
> The new space is unused in the snapshot, as we convert all new objects
> to old space objects when serializing. This means we can get rid of
> the snapshot new space entirely, and as a result get rid of the write
> barrier checks.
>
> This also rejiggles the order of the general spaces enum so that the new
> spaces are at the end, and can be truncated off for the SnapshotSpace
> enum.
>
> As a drive by, fix a bug in an unrelated test-api test which this patch
> exposed.
>
> Change-Id: If67ff8be5bf03104a3ffae7df707c22460bba3a1
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2390762
> Commit-Queue: Leszek Swirski <leszeks@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69761}

Tbr: jgruber@chromium.org,dinfuehr@chromium.org
Change-Id: I9fbc61a124fae09d12d6281baaca60eb6c39a6e5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2401420
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69785}
This commit is contained in:
Leszek Swirski 2020-09-08 15:58:29 +02:00 committed by Commit Bot
parent 698f8caca0
commit 81231c23a9
11 changed files with 79 additions and 110 deletions

View File

@ -746,20 +746,20 @@ using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer);
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
enum AllocationSpace {
RO_SPACE, // Immortal, immovable and immutable objects,
NEW_SPACE, // Young generation semispaces for regular objects collected with
// Scavenger.
OLD_SPACE, // Old generation regular object space.
CODE_SPACE, // Old generation code object space, marked executable.
MAP_SPACE, // Old generation map object space, non-movable.
LO_SPACE, // Old generation large object space.
RO_SPACE, // Immortal, immovable and immutable objects,
OLD_SPACE, // Old generation regular object space.
CODE_SPACE, // Old generation code object space, marked executable.
MAP_SPACE, // Old generation map object space, non-movable.
LO_SPACE, // Old generation large object space.
CODE_LO_SPACE, // Old generation large code object space.
NEW_LO_SPACE, // Young generation large object space.
NEW_SPACE, // Young generation semispaces for regular objects collected with
// Scavenger.
FIRST_SPACE = RO_SPACE,
LAST_SPACE = NEW_LO_SPACE,
FIRST_MUTABLE_SPACE = NEW_SPACE,
LAST_MUTABLE_SPACE = NEW_LO_SPACE,
LAST_SPACE = NEW_SPACE,
FIRST_MUTABLE_SPACE = OLD_SPACE,
LAST_MUTABLE_SPACE = NEW_SPACE,
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
};

View File

@ -2794,7 +2794,7 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
map->set_may_have_interesting_symbols(true);
LOG(isolate(), MapDetails(*map));
Handle<JSGlobalProxy> proxy = Handle<JSGlobalProxy>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
NewJSObjectFromMap(map, AllocationType::kOld));
// Create identity hash early in case there is any JS collection containing
// a global proxy key and needs to be rehashed after deserialization.
proxy->GetOrCreateIdentityHash(isolate());

View File

@ -1874,6 +1874,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
for (int space = FIRST_SPACE;
space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
space++) {
DCHECK_NE(space, NEW_SPACE);
DCHECK_NE(space, NEW_LO_SPACE);
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) {
@ -1934,10 +1936,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
allocation =
AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
#else
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(
size, AllocationAlignment::kWordAligned);
} else if (space == RO_SPACE) {
if (space == RO_SPACE) {
allocation = read_only_space()->AllocateRaw(
size, AllocationAlignment::kWordAligned);
} else {
@ -1969,16 +1968,11 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
V8::FatalProcessOutOfMemory(
isolate(), "insufficient memory to create an Isolate");
}
if (space == NEW_SPACE) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
if (counter > 1) {
CollectAllGarbage(kReduceMemoryFootprintMask,
GarbageCollectionReason::kDeserializer);
} else {
if (counter > 1) {
CollectAllGarbage(kReduceMemoryFootprintMask,
GarbageCollectionReason::kDeserializer);
} else {
CollectAllGarbage(kNoGCFlags,
GarbageCollectionReason::kDeserializer);
}
CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kDeserializer);
}
gc_performed = true;
break; // Abort for-loop over spaces and retry.
@ -5884,9 +5878,9 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
}
PagedSpace* PagedSpaceIterator::Next() {
switch (counter_++) {
int space = counter_++;
switch (space) {
case RO_SPACE:
case NEW_SPACE:
UNREACHABLE();
case OLD_SPACE:
return heap_->old_space();
@ -5895,6 +5889,7 @@ PagedSpace* PagedSpaceIterator::Next() {
case MAP_SPACE:
return heap_->map_space();
default:
DCHECK_GT(space, LAST_GROWABLE_PAGED_SPACE);
return nullptr;
}
}

View File

@ -2499,7 +2499,8 @@ class VerifySmisVisitor : public RootVisitor {
// is done.
class V8_EXPORT_PRIVATE PagedSpaceIterator {
public:
explicit PagedSpaceIterator(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
explicit PagedSpaceIterator(Heap* heap)
: heap_(heap), counter_(FIRST_GROWABLE_PAGED_SPACE) {}
PagedSpace* Next();
private:

View File

@ -102,10 +102,7 @@ Deserializer::~Deserializer() {
// process. It is also called on the body of each function.
void Deserializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
// We are reading to a location outside of JS heap, so pass kNew to avoid
// triggering write barriers.
ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end),
SnapshotSpace::kNew, kNullAddress);
ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), kNullAddress);
}
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
@ -337,10 +334,8 @@ HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
HeapObject Deserializer::ReadObject() {
MaybeObject object;
// We are reading to a location outside of JS heap, so pass kNew to avoid
// triggering write barriers.
ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
SnapshotSpace::kNew, kNullAddress);
kNullAddress);
return object.GetHeapObjectAssumeStrong();
}
@ -380,7 +375,7 @@ HeapObject Deserializer::ReadObject(SnapshotSpace space) {
MaybeObjectSlot limit(address + size);
current.store(MaybeObject::FromObject(map));
ReadData(current + 1, limit, space, address);
ReadData(current + 1, limit, address);
obj = PostProcessNewObject(obj, space);
#ifdef DEBUG
@ -410,19 +405,18 @@ HeapObject Deserializer::ReadMetaMap() {
current.store(MaybeObject(current.address() + kHeapObjectTag));
// Set the instance-type manually, to allow backrefs to read it.
Map::unchecked_cast(obj).set_instance_type(MAP_TYPE);
ReadData(current + 1, limit, space, address);
ReadData(current + 1, limit, address);
return obj;
}
void Deserializer::ReadCodeObjectBody(SnapshotSpace space,
Address code_object_address) {
void Deserializer::ReadCodeObjectBody(Address code_object_address) {
// At this point the code object is already allocated, its map field is
// initialized and its raw data fields and code stream are also read.
// Now we read the rest of code header's fields.
MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
MaybeObjectSlot limit(code_object_address + Code::kDataStart);
ReadData(current, limit, space, code_object_address);
ReadData(current, limit, code_object_address);
// Now iterate RelocInfos the same way it was done by the serialzier and
// deserialize respective data into RelocInfos.
@ -536,28 +530,19 @@ constexpr byte VerifyBytecodeCount(byte bytecode) {
template <typename TSlot>
void Deserializer::ReadData(TSlot current, TSlot limit,
SnapshotSpace source_space,
Address current_object_address) {
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
bool write_barrier_needed =
(current_object_address != kNullAddress &&
source_space != SnapshotSpace::kNew &&
source_space != SnapshotSpace::kCode && !FLAG_disable_write_barriers);
while (current < limit) {
byte data = source_.Get();
switch (data) {
#define READ_DATA_CASE_BODY(bytecode) \
current = ReadDataCase<TSlot, bytecode>(current, current_object_address, \
data, write_barrier_needed); \
#define READ_DATA_CASE_BODY(bytecode) \
current = \
ReadDataCase<TSlot, bytecode>(current, current_object_address, data); \
break;
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with fall-through cases
// and one body.
#define ALL_SPACES(bytecode) \
case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kNew): \
case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld): \
case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode): \
case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap): \
@ -586,10 +571,6 @@ void Deserializer::ReadData(TSlot current, TSlot limit,
// to the current object.
case kReadOnlyObjectCache:
READ_DATA_CASE_BODY(kReadOnlyObjectCache)
// Find an object in the attached references and write a pointer to it to
// the current object.
case kAttachedReference:
READ_DATA_CASE_BODY(kAttachedReference)
// Deserialize a new meta-map and write a pointer to it to the current
// object.
case kNewMetaMap:
@ -631,6 +612,26 @@ void Deserializer::ReadData(TSlot current, TSlot limit,
break;
}
// Find an object in the attached references and write a pointer to it to
// the current object.
case kAttachedReference: {
int index = source_.GetInt();
MaybeObject ref = MaybeObject::FromObject(*attached_objects_[index]);
// This is the only case where we might encounter new space objects, so
// don't update current pointer here yet as it may be needed for write
// barrier.
Write(current, ref);
if (Heap::InYoungGeneration(ref)) {
HeapObject current_object =
HeapObject::FromAddress(current_object_address);
GenerationalBarrier(current_object,
MaybeObjectSlot(current.address()), ref);
}
++current;
break;
}
case kNop:
break;
@ -696,7 +697,7 @@ void Deserializer::ReadData(TSlot current, TSlot limit,
reinterpret_cast<void*>(current_object_address + Code::kDataStart),
size_in_bytes);
// Deserialize tagged fields in the code object header and reloc infos.
ReadCodeObjectBody(source_space, current_object_address);
ReadCodeObjectBody(current_object_address);
// Set current to the code object end.
current = TSlot(current.address() + Code::kDataStart -
HeapObject::kHeaderSize + size_in_bytes);
@ -782,17 +783,8 @@ void Deserializer::ReadData(TSlot current, TSlot limit,
if (allocator()->GetAndClearNextReferenceIsWeak()) {
hot_maybe_object = MaybeObject::MakeWeak(hot_maybe_object);
}
// Don't update current pointer here as it may be needed for write
// barrier.
Write(current, hot_maybe_object);
if (write_barrier_needed && Heap::InYoungGeneration(hot_object)) {
HeapObject current_object =
HeapObject::FromAddress(current_object_address);
GenerationalBarrier(current_object,
MaybeObjectSlot(current.address()),
hot_maybe_object);
}
++current;
DCHECK(!Heap::InYoungGeneration(hot_maybe_object));
current = Write(current, hot_maybe_object);
break;
}
@ -842,8 +834,7 @@ Address Deserializer::ReadExternalReferenceCase() {
template <typename TSlot, SerializerDeserializer::Bytecode bytecode>
TSlot Deserializer::ReadDataCase(TSlot current, Address current_object_address,
byte data, bool write_barrier_needed) {
bool emit_write_barrier = false;
byte data) {
HeapObject heap_object;
HeapObjectReferenceType reference_type =
allocator()->GetAndClearNextReferenceIsWeak()
@ -853,51 +844,32 @@ TSlot Deserializer::ReadDataCase(TSlot current, Address current_object_address,
if (bytecode == kNewObject) {
SnapshotSpace space = SpaceEncoder<bytecode>::Decode(data);
heap_object = ReadObject(space);
emit_write_barrier = (space == SnapshotSpace::kNew);
} else if (bytecode == kBackref) {
SnapshotSpace space = SpaceEncoder<bytecode>::Decode(data);
heap_object = GetBackReferencedObject(space);
emit_write_barrier = (space == SnapshotSpace::kNew);
} else if (bytecode == kNewMetaMap) {
heap_object = ReadMetaMap();
emit_write_barrier = false;
} else if (bytecode == kRootArray) {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
heap_object = HeapObject::cast(isolate()->root(root_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
hot_objects_.Add(heap_object);
} else if (bytecode == kReadOnlyObjectCache) {
int cache_index = source_.GetInt();
heap_object = HeapObject::cast(
isolate()->read_only_heap()->cached_read_only_object(cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
} else if (bytecode == kStartupObjectCache) {
} else {
DCHECK_EQ(bytecode, kStartupObjectCache);
int cache_index = source_.GetInt();
heap_object =
HeapObject::cast(isolate()->startup_object_cache()->at(cache_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
} else {
DCHECK_EQ(bytecode, kAttachedReference);
int index = source_.GetInt();
heap_object = *attached_objects_[index];
emit_write_barrier = Heap::InYoungGeneration(heap_object);
}
HeapObjectReference heap_object_ref =
reference_type == HeapObjectReferenceType::STRONG
? HeapObjectReference::Strong(heap_object)
: HeapObjectReference::Weak(heap_object);
// Don't update current pointer here as it may be needed for write barrier.
Write(current, heap_object_ref);
if (emit_write_barrier && write_barrier_needed) {
DCHECK_IMPLIES(FLAG_disable_write_barriers, !write_barrier_needed);
HeapObject host_object = HeapObject::FromAddress(current_object_address);
SLOW_DCHECK(isolate()->heap()->Contains(host_object));
GenerationalBarrier(host_object, MaybeObjectSlot(current.address()),
heap_object_ref);
}
return current + 1;
DCHECK(!Heap::InYoungGeneration(heap_object));
return Write(current, heap_object_ref);
}
} // namespace internal

View File

@ -131,28 +131,25 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
template <typename TSlot>
inline TSlot WriteExternalPointer(TSlot dest, Address value);
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on
// the heap.
// Fills in some heap data in an area from start to end (non-inclusive). The
// object_address is the address of the object we are writing into, or nullptr
// if we are not writing into an object, i.e. if we are writing a series of
// tagged values that are not on the heap.
template <typename TSlot>
void ReadData(TSlot start, TSlot end, SnapshotSpace space,
Address object_address);
void ReadData(TSlot start, TSlot end, Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
template <typename TSlot, Bytecode bytecode>
inline TSlot ReadDataCase(TSlot current, Address current_object_address,
byte data, bool write_barrier_needed);
byte data);
// A helper function for ReadData for reading external references.
inline Address ReadExternalReferenceCase();
HeapObject ReadObject(SnapshotSpace space_number);
HeapObject ReadMetaMap();
void ReadCodeObjectBody(SnapshotSpace space_number,
Address code_object_address);
void ReadCodeObjectBody(Address code_object_address);
protected:
HeapObject ReadObject();

View File

@ -17,7 +17,6 @@ namespace internal {
// and CODE_LO_SPACE) are not supported.
enum class SnapshotSpace : byte {
kReadOnlyHeap = RO_SPACE,
kNew = NEW_SPACE,
kOld = OLD_SPACE,
kCode = CODE_SPACE,
kMap = MAP_SPACE,

View File

@ -71,7 +71,7 @@ class SerializerDeserializer : public RootVisitor {
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \
V(0x05) V(0x06) V(0x07) V(0x0d) V(0x0e) V(0x0f) \
/* Free range 0x2a..0x2f */ \
V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \
@ -102,7 +102,7 @@ class SerializerDeserializer : public RootVisitor {
// The static assert below will trigger when the number of preallocated spaces
// changed. If that happens, update the kNewObject and kBackref bytecode
// ranges in the comments below.
STATIC_ASSERT(6 == kNumberOfSpaces);
STATIC_ASSERT(5 == kNumberOfSpaces);
// First 32 root array items.
static const int kRootArrayConstantsCount = 0x20;
@ -124,9 +124,9 @@ class SerializerDeserializer : public RootVisitor {
// ---------- byte code range 0x00..0x0f ----------
//
// 0x00..0x05 Allocate new object, in specified space.
// 0x00..0x04 Allocate new object, in specified space.
kNewObject = 0x00,
// 0x08..0x0d Reference to previous object from specified space.
// 0x08..0x0c Reference to previous object from specified space.
kBackref = 0x08,
//

View File

@ -662,7 +662,7 @@ SnapshotSpace GetSnapshotSpace(HeapObject object) {
} else if (object.IsMap()) {
return SnapshotSpace::kMap;
} else {
return SnapshotSpace::kNew; // avoid new/young distinction in TPH
return SnapshotSpace::kOld; // avoid new/young distinction in TPH
}
} else if (ReadOnlyHeap::Contains(object)) {
return SnapshotSpace::kReadOnlyHeap;

View File

@ -5328,6 +5328,7 @@ TEST(NewSpaceAllocationCounter) {
TEST(OldSpaceAllocationCounter) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();

View File

@ -28313,7 +28313,9 @@ TEST(TriggerDelayedMainThreadMetricsEvent) {
CHECK_EQ(recorder->count_, 0); // Unchanged.
CHECK_EQ(recorder->time_in_us_, -1); // Unchanged.
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1100));
v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(), iso);
while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
iso)) {
}
CHECK_EQ(recorder->count_, 1); // Increased.
CHECK_GT(recorder->time_in_us_, 100);
}
@ -28324,7 +28326,9 @@ TEST(TriggerDelayedMainThreadMetricsEvent) {
// invalid.
i_iso->metrics_recorder()->DelayMainThreadEvent(event, context_id);
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1100));
v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(), iso);
while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
iso)) {
}
CHECK_EQ(recorder->count_, 1); // Unchanged.
}