diff --git a/BUILD.gn b/BUILD.gn index 0b6498609e..38c71ac665 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -3183,8 +3183,6 @@ v8_source_set("v8_base_without_compiler") { "src/snapshot/context-deserializer.h", "src/snapshot/context-serializer.cc", "src/snapshot/context-serializer.h", - "src/snapshot/deserializer-allocator.cc", - "src/snapshot/deserializer-allocator.h", "src/snapshot/deserializer.cc", "src/snapshot/deserializer.h", "src/snapshot/embedded/embedded-data.cc", @@ -3198,8 +3196,6 @@ v8_source_set("v8_base_without_compiler") { "src/snapshot/references.h", "src/snapshot/roots-serializer.cc", "src/snapshot/roots-serializer.h", - "src/snapshot/serializer-allocator.cc", - "src/snapshot/serializer-allocator.h", "src/snapshot/serializer-deserializer.cc", "src/snapshot/serializer-deserializer.h", "src/snapshot/serializer.cc", diff --git a/src/common/assert-scope.cc b/src/common/assert-scope.cc index 531ac4e024..520826349d 100644 --- a/src/common/assert-scope.cc +++ b/src/common/assert-scope.cc @@ -130,6 +130,8 @@ template class PerThreadAssertScope; template class PerThreadAssertScope; template class PerThreadAssertScope; template class PerThreadAssertScope; +template class PerThreadAssertScope; +template class PerThreadAssertScope; template class PerIsolateAssertScope; template class PerIsolateAssertScope; diff --git a/src/common/assert-scope.h b/src/common/assert-scope.h index b958ca4bed..8937197d26 100644 --- a/src/common/assert-scope.h +++ b/src/common/assert-scope.h @@ -33,6 +33,7 @@ enum PerThreadAssertType { HANDLE_ALLOCATION_ASSERT, HANDLE_DEREFERENCE_ASSERT, CODE_DEPENDENCY_CHANGE_ASSERT, + CODE_ALLOCATION_ASSERT, LAST_PER_THREAD_ASSERT_TYPE }; @@ -128,9 +129,17 @@ using AllowHandleAllocation = PerThreadAssertScopeDebugOnly; // Scope to document where we do not expect garbage collections. It differs from -// DisallowHeapAllocation by also forbiding safepoints. +// DisallowHeapAllocation by also forbidding safepoints. using DisallowGarbageCollection = PerThreadAssertScopeDebugOnly; +// The DISALLOW_GARBAGE_COLLECTION macro can be used to define a +// DisallowGarbageCollection field in classes that isn't present in release +// builds. +#ifdef DEBUG +#define DISALLOW_GARBAGE_COLLECTION(name) DisallowGarbageCollection name; +#else +#define DISALLOW_GARBAGE_COLLECTION(name) +#endif // Scope to introduce an exception to DisallowGarbageCollection. using AllowGarbageCollection = @@ -140,6 +149,9 @@ using AllowGarbageCollection = // and will eventually be removed, use DisallowGarbageCollection instead. using DisallowHeapAllocation = PerThreadAssertScopeDebugOnly; +// The DISALLOW_HEAP_ALLOCATION macro can be used to define a +// DisallowHeapAllocation field in classes that isn't present in release +// builds. #ifdef DEBUG #define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name; #else @@ -166,6 +178,14 @@ using DisallowCodeDependencyChange = using AllowCodeDependencyChange = PerThreadAssertScopeDebugOnly; +// Scope to document where we do not expect code to be allocated. +using DisallowCodeAllocation = + PerThreadAssertScopeDebugOnly; + +// Scope to introduce an exception to DisallowCodeAllocation. +using AllowCodeAllocation = + PerThreadAssertScopeDebugOnly; + class DisallowHeapAccess { DisallowCodeDependencyChange no_dependency_change_; DisallowHandleAllocation no_handle_allocation_; @@ -273,6 +293,8 @@ extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; +extern template class PerThreadAssertScope; +extern template class PerThreadAssertScope; extern template class PerIsolateAssertScope; extern template class PerIsolateAssertScope; diff --git a/src/common/globals.h b/src/common/globals.h index 08e0cb0f24..83a2420197 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -794,12 +794,7 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType kind) { } // TODO(ishell): review and rename kWordAligned to kTaggedAligned. -enum AllocationAlignment { - kWordAligned, - kDoubleAligned, - kDoubleUnaligned, - kCodeAligned -}; +enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned }; enum class AccessMode { ATOMIC, NON_ATOMIC }; diff --git a/src/compiler/types.cc b/src/compiler/types.cc index 0daf20d78a..4b5b8e1b73 100644 --- a/src/compiler/types.cc +++ b/src/compiler/types.cc @@ -295,6 +295,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case OBJECT_BOILERPLATE_DESCRIPTION_TYPE: case ARRAY_BOILERPLATE_DESCRIPTION_TYPE: case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: case TRANSITION_ARRAY_TYPE: case FEEDBACK_CELL_TYPE: case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE: diff --git a/src/diagnostics/objects-debug.cc b/src/diagnostics/objects-debug.cc index b8dcda4ef1..9e1ed7a2fa 100644 --- a/src/diagnostics/objects-debug.cc +++ b/src/diagnostics/objects-debug.cc @@ -27,6 +27,7 @@ #include "src/objects/free-space-inl.h" #include "src/objects/function-kind.h" #include "src/objects/hash-table-inl.h" +#include "src/objects/instance-type.h" #include "src/objects/js-array-inl.h" #include "src/objects/layout-descriptor.h" #include "src/objects/objects-inl.h" @@ -250,6 +251,11 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) { TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE) #undef MAKE_TORQUE_CASE + case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: + DescriptorArray::cast(*this).DescriptorArrayVerify(isolate); + break; + case FOREIGN_TYPE: break; // No interesting fields. diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 9afe8e9445..662a62abf7 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -220,6 +220,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE) #undef MAKE_TORQUE_CASE + case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: + DescriptorArray::cast(*this).DescriptorArrayPrint(os); + break; case FOREIGN_TYPE: Foreign::cast(*this).ForeignPrint(os); break; diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index 24f4afc58a..b6ec98e2b3 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -2933,6 +2933,9 @@ Isolate::Isolate(std::unique_ptr isolate_allocator) id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)), allocator_(new TracingAccountingAllocator(this)), builtins_(this), +#ifdef VERIFY_HEAP + num_active_deserializers_(0), +#endif rail_mode_(PERFORMANCE_ANIMATION), code_event_dispatcher_(new CodeEventDispatcher()), persistent_handles_list_(new PersistentHandlesList()), diff --git a/src/execution/isolate.h b/src/execution/isolate.h index 13477fb95f..0d75b483ba 100644 --- a/src/execution/isolate.h +++ b/src/execution/isolate.h @@ -698,6 +698,21 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { return &thread_local_top()->c_function_; } +#ifdef VERIFY_HEAP + // Count the number of active deserializers, so that the heap verifier knows + // whether there is currently an active deserialization happening. + // + // This is needed as the verifier currently doesn't support verifying objects + // which are partially deserialized. + // + // TODO(leszeks): Make the verifier a bit more deserialization compatible. + void RegisterDeserializerStarted() { num_active_deserializers_++; } + void RegisterDeserializerFinished() { num_active_deserializers_--; } + bool has_active_deserializer() const { + return num_active_deserializers_.load(std::memory_order_acquire) > 0; + } +#endif + // Bottom JS entry. Address js_entry_sp() { return thread_local_top()->js_entry_sp_; } inline Address* js_entry_sp_address() { @@ -1719,6 +1734,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { RuntimeState runtime_state_; Builtins builtins_; SetupIsolateDelegate* setup_delegate_ = nullptr; +#ifdef VERIFY_HEAP + std::atomic num_active_deserializers_; +#endif #ifndef V8_INTL_SUPPORT unibrow::Mapping jsregexp_uncanonicalize_; unibrow::Mapping jsregexp_canonrange_; diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index ab689283e9..7b050431ad 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1443,13 +1443,6 @@ DEFINE_BOOL(profile_deserialization, false, "Print the time it takes to deserialize the snapshot.") DEFINE_BOOL(serialization_statistics, false, "Collect statistics on serialized objects.") -#ifdef V8_ENABLE_THIRD_PARTY_HEAP -DEFINE_UINT_READONLY(serialization_chunk_size, 1, - "Custom size for serialization chunks") -#else -DEFINE_UINT(serialization_chunk_size, 4096, - "Custom size for serialization chunks") -#endif // Regexp DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code") DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.") diff --git a/src/heap/factory.cc b/src/heap/factory.cc index de4504ae8e..6f921df661 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -147,15 +147,12 @@ MaybeHandle Factory::CodeBuilder::BuildInternal( HeapObject result; AllocationType allocation_type = is_executable_ ? AllocationType::kCode : AllocationType::kReadOnly; - AllocationAlignment alignment = is_executable_ - ? AllocationAlignment::kCodeAligned - : AllocationAlignment::kWordAligned; if (retry_allocation_or_fail) { result = heap->AllocateRawWith( - object_size, allocation_type, AllocationOrigin::kRuntime, alignment); + object_size, allocation_type, AllocationOrigin::kRuntime); } else { result = heap->AllocateRawWith( - object_size, allocation_type, AllocationOrigin::kRuntime, alignment); + object_size, allocation_type, AllocationOrigin::kRuntime); // Return an empty handle if we cannot allocate the code object. if (result.is_null()) return MaybeHandle(); } @@ -2126,8 +2123,7 @@ Handle Factory::CopyCode(Handle code) { int obj_size = code->Size(); CodePageCollectionMemoryModificationScope code_allocation(heap); HeapObject result = heap->AllocateRawWith( - obj_size, AllocationType::kCode, AllocationOrigin::kRuntime, - AllocationAlignment::kCodeAligned); + obj_size, AllocationType::kCode, AllocationOrigin::kRuntime); // Copy code object. Address old_addr = code->address(); diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index d4dc7e2b8c..85a63a273f 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -171,8 +171,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, DCHECK(AllowHandleAllocation::IsAllowed()); DCHECK(AllowHeapAllocation::IsAllowed()); DCHECK(AllowGarbageCollection::IsAllowed()); - DCHECK_IMPLIES(type == AllocationType::kCode, - alignment == AllocationAlignment::kCodeAligned); + DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap, + alignment == AllocationAlignment::kWordAligned); DCHECK_EQ(gc_state(), NOT_IN_GC); #ifdef V8_ENABLE_ALLOCATION_TIMEOUT if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) { @@ -223,6 +223,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin); } } else if (AllocationType::kCode == type) { + DCHECK(AllowCodeAllocation::IsAllowed()); if (large_object) { allocation = code_lo_space_->AllocateRaw(size_in_bytes); } else { @@ -231,7 +232,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, } else if (AllocationType::kMap == type) { allocation = map_space_->AllocateRawUnaligned(size_in_bytes); } else if (AllocationType::kReadOnly == type) { - DCHECK(isolate_->serializer_enabled()); DCHECK(!large_object); DCHECK(CanAllocateInReadOnlySpace()); DCHECK_EQ(AllocationOrigin::kRuntime, origin); diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 4506ed71aa..a3c1e3d93f 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -1878,125 +1878,6 @@ static void VerifyStringTable(Isolate* isolate) { } #endif // VERIFY_HEAP -bool Heap::ReserveSpace(Reservation* reservations, std::vector
* maps) { - bool gc_performed = true; - int counter = 0; - static const int kThreshold = 20; - while (gc_performed && counter++ < kThreshold) { - gc_performed = false; - for (int space = FIRST_SPACE; - space < static_cast(SnapshotSpace::kNumberOfHeapSpaces); - space++) { - DCHECK_NE(space, NEW_SPACE); - DCHECK_NE(space, NEW_LO_SPACE); - Reservation* reservation = &reservations[space]; - DCHECK_LE(1, reservation->size()); - if (reservation->at(0).size == 0) { - DCHECK_EQ(1, reservation->size()); - continue; - } - bool perform_gc = false; - if (space == MAP_SPACE) { - // We allocate each map individually to avoid fragmentation. - maps->clear(); - DCHECK_LE(reservation->size(), 2); - int reserved_size = 0; - for (const Chunk& c : *reservation) reserved_size += c.size; - DCHECK_EQ(0, reserved_size % Map::kSize); - int num_maps = reserved_size / Map::kSize; - for (int i = 0; i < num_maps; i++) { - AllocationResult allocation; -#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL - allocation = AllocateRaw(Map::kSize, AllocationType::kMap, - AllocationOrigin::kRuntime, kWordAligned); -#else - allocation = map_space()->AllocateRawUnaligned(Map::kSize); -#endif - HeapObject free_space; - if (allocation.To(&free_space)) { - // Mark with a free list node, in case we have a GC before - // deserializing. - Address free_space_address = free_space.address(); - CreateFillerObjectAt(free_space_address, Map::kSize, - ClearRecordedSlots::kNo); - maps->push_back(free_space_address); - } else { - perform_gc = true; - break; - } - } - } else if (space == LO_SPACE) { - // Just check that we can allocate during deserialization. - DCHECK_LE(reservation->size(), 2); - int reserved_size = 0; - for (const Chunk& c : *reservation) reserved_size += c.size; - perform_gc = !CanExpandOldGeneration(reserved_size); - } else { - for (auto& chunk : *reservation) { - AllocationResult allocation; - int size = chunk.size; - DCHECK_LE(static_cast(size), - MemoryChunkLayout::AllocatableMemoryInMemoryChunk( - static_cast(space))); -#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL - AllocationType type = (space == CODE_SPACE) - ? AllocationType::kCode - : (space == RO_SPACE) - ? AllocationType::kReadOnly - : AllocationType::kYoung; - AllocationAlignment align = - (space == CODE_SPACE) ? kCodeAligned : kWordAligned; - allocation = - AllocateRaw(size, type, AllocationOrigin::kRuntime, align); -#else - if (space == RO_SPACE) { - allocation = read_only_space()->AllocateRaw( - size, AllocationAlignment::kWordAligned); - } else { - // The deserializer will update the skip list. - allocation = paged_space(space)->AllocateRawUnaligned(size); - } -#endif - HeapObject free_space; - if (allocation.To(&free_space)) { - // Mark with a free list node, in case we have a GC before - // deserializing. - Address free_space_address = free_space.address(); - CreateFillerObjectAt(free_space_address, size, - ClearRecordedSlots::kNo); - DCHECK(IsPreAllocatedSpace(static_cast(space))); - chunk.start = free_space_address; - chunk.end = free_space_address + size; - } else { - perform_gc = true; - break; - } - } - } - if (perform_gc) { - // We cannot perfom a GC with an uninitialized isolate. This check - // fails for example if the max old space size is chosen unwisely, - // so that we cannot allocate space to deserialize the initial heap. - if (!deserialization_complete_) { - V8::FatalProcessOutOfMemory( - isolate(), "insufficient memory to create an Isolate"); - } - if (counter > 1) { - CollectAllGarbage(kReduceMemoryFootprintMask, - GarbageCollectionReason::kDeserializer); - } else { - CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kDeserializer); - } - gc_performed = true; - break; // Abort for-loop over spaces and retry. - } - } - } - - return !gc_performed; -} - - void Heap::EnsureFromSpaceIsCommitted() { if (new_space_->CommitFromSpaceIfNeeded()) return; @@ -3538,47 +3419,6 @@ void Heap::FinalizeIncrementalMarkingIncrementally( InvokeIncrementalMarkingEpilogueCallbacks(); } -void Heap::RegisterDeserializedObjectsForBlackAllocation( - Reservation* reservations, const std::vector& large_objects, - const std::vector
& maps) { - // TODO(ulan): pause black allocation during deserialization to avoid - // iterating all these objects in one go. - - if (!incremental_marking()->black_allocation()) return; - - // Iterate black objects in old space, code space, map space, and large - // object space for side effects. - IncrementalMarking::MarkingState* marking_state = - incremental_marking()->marking_state(); - for (int i = OLD_SPACE; - i < static_cast(SnapshotSpace::kNumberOfHeapSpaces); i++) { - const Heap::Reservation& res = reservations[i]; - for (auto& chunk : res) { - Address addr = chunk.start; - while (addr < chunk.end) { - HeapObject obj = HeapObject::FromAddress(addr); - // Objects can have any color because incremental marking can - // start in the middle of Heap::ReserveSpace(). - if (marking_state->IsBlack(obj)) { - incremental_marking()->ProcessBlackAllocatedObject(obj); - } - addr += obj.Size(); - } - } - } - - // Large object space doesn't use reservations, so it needs custom handling. - for (HeapObject object : large_objects) { - incremental_marking()->ProcessBlackAllocatedObject(object); - } - - // Map space doesn't use reservations, so it needs custom handling. - for (Address addr : maps) { - incremental_marking()->ProcessBlackAllocatedObject( - HeapObject::FromAddress(addr)); - } -} - void Heap::NotifyObjectLayoutChange( HeapObject object, const DisallowHeapAllocation&, InvalidateRecordedSlots invalidate_recorded_slots) { @@ -4149,6 +3989,7 @@ void Heap::Verify() { // We have to wait here for the sweeper threads to have an iterable heap. mark_compact_collector()->EnsureSweepingCompleted(); + array_buffer_sweeper()->EnsureFinished(); VerifyPointersVisitor visitor(this); @@ -4160,6 +4001,12 @@ void Heap::Verify() { .NormalizedMapCacheVerify(isolate()); } + // The heap verifier can't deal with partially deserialized objects, so + // disable it if a deserializer is active. + // TODO(leszeks): Enable verification during deserialization, e.g. by only + // blocklisting objects that are in a partially deserialized state. + if (isolate()->has_active_deserializer()) return; + VerifySmisVisitor smis_visitor; IterateSmiRoots(&smis_visitor); @@ -5146,7 +4993,14 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath( HeapObject result; AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment); if (alloc.To(&result)) { - DCHECK(result != ReadOnlyRoots(this).exception()); + // DCHECK that the successful allocation is not "exception". The one + // exception to this is when allocating the "exception" object itself, in + // which case this must be an ROSpace allocation and the exception object + // in the roots has to be unset. + DCHECK((CanAllocateInReadOnlySpace() && + allocation == AllocationType::kReadOnly && + ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) || + result != ReadOnlyRoots(this).exception()); return result; } // Two GCs before panicking. In newspace will almost always succeed. diff --git a/src/heap/heap.h b/src/heap/heap.h index b8220dad5e..6f03204cc1 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -666,9 +666,6 @@ class Heap { template inline AllocationMemento FindAllocationMemento(Map map, HeapObject object); - // Returns false if not able to reserve. - bool ReserveSpace(Reservation* reservations, std::vector
* maps); - void RequestAndWaitForCollection(); // @@ -1061,10 +1058,6 @@ class Heap { V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically( GarbageCollectionReason gc_reason); - void RegisterDeserializedObjectsForBlackAllocation( - Reservation* reservations, const std::vector& large_objects, - const std::vector
& maps); - IncrementalMarking* incremental_marking() { return incremental_marking_.get(); } @@ -2347,6 +2340,7 @@ class Heap { // The allocator interface. friend class Factory; + friend class Deserializer; // The Isolate constructs us. friend class Isolate; diff --git a/src/heap/local-heap-inl.h b/src/heap/local-heap-inl.h index 770e1cb8e9..d147a0b1b4 100644 --- a/src/heap/local-heap-inl.h +++ b/src/heap/local-heap-inl.h @@ -20,8 +20,8 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type, DCHECK(AllowHandleAllocation::IsAllowed()); DCHECK(AllowHeapAllocation::IsAllowed()); DCHECK(AllowGarbageCollection::IsAllowed()); - DCHECK_IMPLIES(type == AllocationType::kCode, - alignment == AllocationAlignment::kCodeAligned); + DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap, + alignment == AllocationAlignment::kWordAligned); Heap::HeapState state = heap()->gc_state(); DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC); #endif diff --git a/src/heap/marking-visitor-inl.h b/src/heap/marking-visitor-inl.h index 532e1c9fd5..bdc955b4bb 100644 --- a/src/heap/marking-visitor-inl.h +++ b/src/heap/marking-visitor-inl.h @@ -9,6 +9,8 @@ #include "src/heap/objects-visiting-inl.h" #include "src/heap/objects-visiting.h" #include "src/heap/spaces.h" +#include "src/objects/objects.h" +#include "src/snapshot/deserializer.h" namespace v8 { namespace internal { @@ -349,8 +351,7 @@ int MarkingVisitorBase::VisitWeakCell( // =========================================================================== template -size_t -MarkingVisitorBase::MarkDescriptorArrayBlack( +int MarkingVisitorBase::MarkDescriptorArrayBlack( DescriptorArray descriptors) { concrete_visitor()->marking_state()->WhiteToGrey(descriptors); if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) { @@ -388,37 +389,65 @@ int MarkingVisitorBase::VisitDescriptorArray( return size; } +template +int MarkingVisitorBase::VisitDescriptorsForMap( + Map map) { + if (!map.CanTransition()) return 0; + + // Maps that can transition share their descriptor arrays and require + // special visiting logic to avoid memory leaks. + // Since descriptor arrays are potentially shared, ensure that only the + // descriptors that belong to this map are marked. The first time a + // non-empty descriptor array is marked, its header is also visited. The + // slot holding the descriptor array will be implicitly recorded when the + // pointer fields of this map are visited. + + Object maybe_descriptors = + TaggedField::Acquire_Load( + heap_->isolate(), map); + + // If the descriptors are a Smi, then this Map is in the process of being + // deserialized, and doesn't yet have an initialized descriptor field. + if (maybe_descriptors.IsSmi()) { + DCHECK_EQ(maybe_descriptors, Deserializer::uninitialized_field_value()); + return 0; + } + + DescriptorArray descriptors = DescriptorArray::cast(maybe_descriptors); + + // Don't do any special processing of strong descriptor arrays, let them get + // marked through the normal visitor mechanism. + if (descriptors.IsStrongDescriptorArray()) { + return 0; + } + + int size = MarkDescriptorArrayBlack(descriptors); + int number_of_own_descriptors = map.NumberOfOwnDescriptors(); + if (number_of_own_descriptors) { + // It is possible that the concurrent marker observes the + // number_of_own_descriptors out of sync with the descriptors. In that + // case the marking write barrier for the descriptor array will ensure + // that all required descriptors are marked. The concurrent marker + // just should avoid crashing in that case. That's why we need the + // std::min() below. + VisitDescriptors(descriptors, + std::min(number_of_own_descriptors, + descriptors.number_of_descriptors())); + } + + return size; +} + template int MarkingVisitorBase::VisitMap(Map meta_map, Map map) { if (!concrete_visitor()->ShouldVisit(map)) return 0; int size = Map::BodyDescriptor::SizeOf(meta_map, map); - if (map.CanTransition()) { - // Maps that can transition share their descriptor arrays and require - // special visiting logic to avoid memory leaks. - // Since descriptor arrays are potentially shared, ensure that only the - // descriptors that belong to this map are marked. The first time a - // non-empty descriptor array is marked, its header is also visited. The - // slot holding the descriptor array will be implicitly recorded when the - // pointer fields of this map are visited. - DescriptorArray descriptors = map.synchronized_instance_descriptors(); - size += MarkDescriptorArrayBlack(descriptors); - int number_of_own_descriptors = map.NumberOfOwnDescriptors(); - if (number_of_own_descriptors) { - // It is possible that the concurrent marker observes the - // number_of_own_descriptors out of sync with the descriptors. In that - // case the marking write barrier for the descriptor array will ensure - // that all required descriptors are marked. The concurrent marker - // just should avoid crashing in that case. That's why we need the - // std::min() below. - VisitDescriptors(descriptors, - std::min(number_of_own_descriptors, - descriptors.number_of_descriptors())); - } - // Mark the pointer fields of the Map. Since the transitions array has - // been marked already, it is fine that one of these fields contains a - // pointer to it. - } + size += VisitDescriptorsForMap(map); + + // Mark the pointer fields of the Map. If there is a transitions array, it has + // been marked already, so it is fine that one of these fields contains a + // pointer to it. Map::BodyDescriptor::IterateBody(meta_map, map, size, this); return size; } diff --git a/src/heap/marking-visitor.h b/src/heap/marking-visitor.h index 3707fc6031..37219a2385 100644 --- a/src/heap/marking-visitor.h +++ b/src/heap/marking-visitor.h @@ -220,6 +220,9 @@ class MarkingVisitorBase : public HeapVisitor { V8_INLINE void VisitDescriptors(DescriptorArray descriptors, int number_of_own_descriptors); + + V8_INLINE int VisitDescriptorsForMap(Map map); + template int VisitEmbedderTracingSubclass(Map map, T object); V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object, @@ -227,7 +230,7 @@ class MarkingVisitorBase : public HeapVisitor { // Marks the descriptor array black without pushing it on the marking work // list and visits its header. Returns the size of the descriptor array // if it was successully marked as black. - V8_INLINE size_t MarkDescriptorArrayBlack(DescriptorArray descriptors); + V8_INLINE int MarkDescriptorArrayBlack(DescriptorArray descriptors); // Marks the object grey and pushes it on the marking work list. V8_INLINE void MarkObject(HeapObject host, HeapObject obj); diff --git a/src/heap/setup-heap-internal.cc b/src/heap/setup-heap-internal.cc index b1844256e8..a95063194e 100644 --- a/src/heap/setup-heap-internal.cc +++ b/src/heap/setup-heap-internal.cc @@ -386,6 +386,7 @@ bool Heap::CreateInitialMaps() { ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol, Context::SYMBOL_FUNCTION_INDEX) ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) + ALLOCATE_VARSIZE_MAP(STRONG_DESCRIPTOR_ARRAY_TYPE, strong_descriptor_array) ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean, Context::BOOLEAN_FUNCTION_INDEX); diff --git a/src/objects/descriptor-array.tq b/src/objects/descriptor-array.tq index 0b088b3d73..c791f3988c 100644 --- a/src/objects/descriptor-array.tq +++ b/src/objects/descriptor-array.tq @@ -25,3 +25,7 @@ extern class DescriptorArray extends HeapObject { enum_cache: EnumCache; descriptors[number_of_all_descriptors]: DescriptorEntry; } + +// A descriptor array where all values are held strongly. +extern class StrongDescriptorArray extends DescriptorArray + generates 'TNode'; diff --git a/src/objects/map-inl.h b/src/objects/map-inl.h index 01beb50652..1cc5c2bb2e 100644 --- a/src/objects/map-inl.h +++ b/src/objects/map-inl.h @@ -20,6 +20,7 @@ #include "src/objects/shared-function-info.h" #include "src/objects/templates-inl.h" #include "src/objects/transitions-inl.h" +#include "src/objects/transitions.h" #include "src/wasm/wasm-objects-inl.h" // Has to be the last include (doesn't have include guards): diff --git a/src/objects/map.cc b/src/objects/map.cc index e380fd85f0..2b37c5e41f 100644 --- a/src/objects/map.cc +++ b/src/objects/map.cc @@ -204,6 +204,7 @@ VisitorId Map::GetVisitorId(Map map) { return kVisitPropertyCell; case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: return kVisitDescriptorArray; case TRANSITION_ARRAY_TYPE: diff --git a/src/objects/object-list-macros.h b/src/objects/object-list-macros.h index 9eef5c0dbf..6de6f553e6 100644 --- a/src/objects/object-list-macros.h +++ b/src/objects/object-list-macros.h @@ -281,6 +281,7 @@ class ZoneForwardList; V(ModuleContext) \ V(NonNullForeign) \ V(ScriptContext) \ + V(StrongDescriptorArray) \ V(WithContext) #define HEAP_OBJECT_TYPE_LIST(V) \ diff --git a/src/objects/objects-body-descriptors-inl.h b/src/objects/objects-body-descriptors-inl.h index 275ac9a9e6..c4ef19d345 100644 --- a/src/objects/objects-body-descriptors-inl.h +++ b/src/objects/objects-body-descriptors-inl.h @@ -947,6 +947,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) { case PROPERTY_ARRAY_TYPE: return Op::template apply(p1, p2, p3, p4); case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: return Op::template apply(p1, p2, p3, p4); case TRANSITION_ARRAY_TYPE: diff --git a/src/objects/objects.cc b/src/objects/objects.cc index 9e3a4aa903..fd95270263 100644 --- a/src/objects/objects.cc +++ b/src/objects/objects.cc @@ -63,6 +63,7 @@ #include "src/objects/free-space-inl.h" #include "src/objects/function-kind.h" #include "src/objects/hash-table-inl.h" +#include "src/objects/instance-type.h" #include "src/objects/js-array-inl.h" #include "src/objects/keys.h" #include "src/objects/lookup-inl.h" @@ -2241,7 +2242,8 @@ int HeapObject::SizeFromMap(Map map) const { return FeedbackMetadata::SizeFor( FeedbackMetadata::unchecked_cast(*this).synchronized_slot_count()); } - if (instance_type == DESCRIPTOR_ARRAY_TYPE) { + if (base::IsInRange(instance_type, FIRST_DESCRIPTOR_ARRAY_TYPE, + LAST_DESCRIPTOR_ARRAY_TYPE)) { return DescriptorArray::SizeFor( DescriptorArray::unchecked_cast(*this).number_of_all_descriptors()); } @@ -2306,6 +2308,7 @@ int HeapObject::SizeFromMap(Map map) const { bool HeapObject::NeedsRehashing() const { switch (map().instance_type()) { case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: return DescriptorArray::cast(*this).number_of_descriptors() > 1; case TRANSITION_ARRAY_TYPE: return TransitionArray::cast(*this).number_of_entries() > 1; @@ -2345,6 +2348,7 @@ bool HeapObject::CanBeRehashed() const { case SIMPLE_NUMBER_DICTIONARY_TYPE: return true; case DESCRIPTOR_ARRAY_TYPE: + case STRONG_DESCRIPTOR_ARRAY_TYPE: return true; case TRANSITION_ARRAY_TYPE: return true; diff --git a/src/objects/transitions-inl.h b/src/objects/transitions-inl.h index ac5b1034c2..fca6be40e2 100644 --- a/src/objects/transitions-inl.h +++ b/src/objects/transitions-inl.h @@ -5,13 +5,13 @@ #ifndef V8_OBJECTS_TRANSITIONS_INL_H_ #define V8_OBJECTS_TRANSITIONS_INL_H_ -#include "src/objects/transitions.h" - #include "src/ic/handler-configuration-inl.h" #include "src/objects/fixed-array-inl.h" #include "src/objects/maybe-object-inl.h" #include "src/objects/slots.h" #include "src/objects/smi.h" +#include "src/objects/transitions.h" +#include "src/snapshot/deserializer.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -157,6 +157,13 @@ bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate, Map* target) { MaybeObject raw = GetRawTarget(transition_number); HeapObject heap_object; + // If the raw target is a Smi, then this TransitionArray is in the process of + // being deserialized, and doesn't yet have an initialized entry for this + // transition. + if (raw.IsSmi()) { + DCHECK_EQ(raw.ToSmi(), Deserializer::uninitialized_field_value()); + return false; + } if (raw->GetHeapObjectIfStrong(&heap_object) && heap_object.IsUndefined(isolate)) { return false; diff --git a/src/roots/roots.h b/src/roots/roots.h index 27f2f5792a..5a21ae67ed 100644 --- a/src/roots/roots.h +++ b/src/roots/roots.h @@ -86,6 +86,7 @@ class Symbol; V(Map, code_data_container_map, CodeDataContainerMap) \ V(Map, coverage_info_map, CoverageInfoMap) \ V(Map, descriptor_array_map, DescriptorArrayMap) \ + V(Map, strong_descriptor_array_map, StrongDescriptorArrayMap) \ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ V(Map, global_dictionary_map, GlobalDictionaryMap) \ V(Map, many_closures_cell_map, ManyClosuresCellMap) \ diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc index 5eec7668a2..b497613ce2 100644 --- a/src/snapshot/code-serializer.cc +++ b/src/snapshot/code-serializer.cc @@ -36,9 +36,7 @@ ScriptData::ScriptData(const byte* data, int length) CodeSerializer::CodeSerializer(Isolate* isolate, uint32_t source_hash) : Serializer(isolate, Snapshot::kDefaultSerializerFlags), - source_hash_(source_hash) { - allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size); -} + source_hash_(source_hash) {} // static ScriptCompiler::CachedData* CodeSerializer::Serialize( @@ -64,11 +62,11 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize( // Serialize code object. Handle source(String::cast(script->source()), isolate); + HandleScope scope(isolate); CodeSerializer cs(isolate, SerializedCodeData::SourceHash( source, script->origin_options())); DisallowGarbageCollection no_gc; - cs.reference_map()->AddAttachedReference( - reinterpret_cast(source->ptr())); + cs.reference_map()->AddAttachedReference(*source); ScriptData* script_data = cs.SerializeSharedFunctionInfo(info); if (FLAG_profile_deserialization) { @@ -100,13 +98,13 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo( return data.GetScriptData(); } -bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) { - if (!ReadOnlyHeap::Contains(obj)) return false; +bool CodeSerializer::SerializeReadOnlyObject(Handle obj) { + if (!ReadOnlyHeap::Contains(*obj)) return false; // For objects on the read-only heap, never serialize the object, but instead // create a back reference that encodes the page number as the chunk_index and // the offset within the page as the chunk_offset. - Address address = obj.address(); + Address address = obj->address(); BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address); uint32_t chunk_index = 0; ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space(); @@ -115,14 +113,13 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) { ++chunk_index; } uint32_t chunk_offset = static_cast(chunk->Offset(address)); - SerializerReference back_reference = SerializerReference::BackReference( - SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset); - reference_map()->Add(reinterpret_cast(obj.ptr()), back_reference); - CHECK(SerializeBackReference(obj)); + sink_.Put(kReadOnlyHeapRef, "ReadOnlyHeapRef"); + sink_.PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex"); + sink_.PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset"); return true; } -void CodeSerializer::SerializeObject(HeapObject obj) { +void CodeSerializer::SerializeObject(Handle obj) { if (SerializeHotObject(obj)) return; if (SerializeRoot(obj)) return; @@ -131,60 +128,60 @@ void CodeSerializer::SerializeObject(HeapObject obj) { if (SerializeReadOnlyObject(obj)) return; - CHECK(!obj.IsCode()); + CHECK(!obj->IsCode()); ReadOnlyRoots roots(isolate()); - if (ElideObject(obj)) { - return SerializeObject(roots.undefined_value()); + if (ElideObject(*obj)) { + return SerializeObject(roots.undefined_value_handle()); } - if (obj.IsScript()) { - Script script_obj = Script::cast(obj); - DCHECK_NE(script_obj.compilation_type(), Script::COMPILATION_TYPE_EVAL); + if (obj->IsScript()) { + Handle