diff --git a/src/deoptimizer/translated-state.cc b/src/deoptimizer/translated-state.cc index c066e94c0b..68eaeefaa8 100644 --- a/src/deoptimizer/translated-state.cc +++ b/src/deoptimizer/translated-state.cc @@ -13,6 +13,7 @@ #include "src/diagnostics/disasm.h" #include "src/execution/frames.h" #include "src/execution/isolate.h" +#include "src/heap/heap.h" #include "src/numbers/conversions.h" #include "src/objects/arguments.h" #include "src/objects/heap-number-inl.h" @@ -1850,7 +1851,9 @@ void TranslatedState::InitializeJSObjectAt( CHECK_GE(children_count, 2); // Notify the concurrent marker about the layout change. - isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc); + isolate()->heap()->NotifyObjectLayoutChange( + *object_storage, no_gc, InvalidateRecordedSlots::kYes, + slot->GetChildrenCount() * kTaggedSize); // Fill the property array field. { @@ -1902,7 +1905,9 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( } // Notify the concurrent marker about the layout change. - isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc); + isolate()->heap()->NotifyObjectLayoutChange( + *object_storage, no_gc, InvalidateRecordedSlots::kYes, + slot->GetChildrenCount() * kTaggedSize); // Write the fields to the object. for (int i = 1; i < children_count; i++) { diff --git a/src/heap/heap.cc b/src/heap/heap.cc index e9551286f9..bfe087603d 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -3954,22 +3954,24 @@ void Heap::FinalizeIncrementalMarkingIncrementally( void Heap::NotifyObjectLayoutChange( HeapObject object, const DisallowGarbageCollection&, - InvalidateRecordedSlots invalidate_recorded_slots) { + InvalidateRecordedSlots invalidate_recorded_slots, int new_size) { + DCHECK_IMPLIES(invalidate_recorded_slots == InvalidateRecordedSlots::kYes, + new_size > 0); if (incremental_marking()->IsMarking()) { incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); if (incremental_marking()->IsCompacting() && invalidate_recorded_slots == InvalidateRecordedSlots::kYes && MayContainRecordedSlots(object)) { MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots(object); + ->RegisterObjectWithInvalidatedSlots(object, new_size); } } if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes && MayContainRecordedSlots(object)) { MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots(object); + ->RegisterObjectWithInvalidatedSlots(object, new_size); MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots(object); + ->RegisterObjectWithInvalidatedSlots(object, new_size); } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { @@ -4646,11 +4648,34 @@ void Heap::Verify() { if (new_lo_space_) new_lo_space_->Verify(isolate()); isolate()->string_table()->VerifyIfOwnedBy(isolate()); + VerifyInvalidatedObjectSize(); + #if DEBUG VerifyCommittedPhysicalMemory(); #endif // DEBUG } +namespace { +void VerifyInvalidatedSlots(InvalidatedSlots* invalidated_slots) { + if (!invalidated_slots) return; + for (std::pair object_and_size : *invalidated_slots) { + HeapObject object = object_and_size.first; + int size = object_and_size.second; + CHECK_EQ(object.Size(), size); + } +} +} // namespace + +void Heap::VerifyInvalidatedObjectSize() { + OldGenerationMemoryChunkIterator chunk_iterator(this); + MemoryChunk* chunk; + + while ((chunk = chunk_iterator.next()) != nullptr) { + VerifyInvalidatedSlots(chunk->invalidated_slots()); + VerifyInvalidatedSlots(chunk->invalidated_slots()); + } +} + void Heap::VerifyReadOnlyHeap() { CHECK(!read_only_space_->writable()); read_only_space_->Verify(isolate()); diff --git a/src/heap/heap.h b/src/heap/heap.h index e4d55d539a..d69c36d367 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1114,8 +1114,7 @@ class Heap { // manually. void NotifyObjectLayoutChange( HeapObject object, const DisallowGarbageCollection&, - InvalidateRecordedSlots invalidate_recorded_slots = - InvalidateRecordedSlots::kYes); + InvalidateRecordedSlots invalidate_recorded_slots, int new_size = 0); #ifdef VERIFY_HEAP // This function checks that either @@ -1584,6 +1583,9 @@ class Heap { // created. void VerifyReadOnlyHeap(); void VerifyRememberedSetFor(HeapObject object); + + // Verify that cached size of invalidated object is up-to-date. + void VerifyInvalidatedObjectSize(); #endif #ifdef V8_ENABLE_ALLOCATION_TIMEOUT diff --git a/src/heap/invalidated-slots-inl.h b/src/heap/invalidated-slots-inl.h index 67551618e7..62287a6884 100644 --- a/src/heap/invalidated-slots-inl.h +++ b/src/heap/invalidated-slots-inl.h @@ -28,22 +28,18 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { NextInvalidatedObject(); } - HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_); - - if (invalidated_size_ == 0) { - DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map())); - invalidated_size_ = invalidated_object.Size(); - } - int offset = static_cast(slot - invalidated_start_); // OLD_TO_OLD can have slots in map word unlike other remembered sets. DCHECK_GE(offset, 0); DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0); - if (offset < invalidated_size_) - return offset == 0 || - invalidated_object.IsValidSlot(invalidated_object.map(), offset); + if (offset < invalidated_size_) { + if (offset == 0) return true; + HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_); + DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map())); + return invalidated_object.IsValidSlot(invalidated_object.map(), offset); + } NextInvalidatedObject(); return true; @@ -51,12 +47,14 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { void InvalidatedSlotsFilter::NextInvalidatedObject() { invalidated_start_ = next_invalidated_start_; - invalidated_size_ = 0; + invalidated_size_ = next_invalidated_size_; if (iterator_ == iterator_end_) { next_invalidated_start_ = sentinel_; + next_invalidated_size_ = 0; } else { - next_invalidated_start_ = iterator_->address(); + next_invalidated_start_ = iterator_->first.address(); + next_invalidated_size_ = iterator_->second; iterator_++; } } @@ -87,7 +85,7 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { void InvalidatedSlotsCleanup::NextInvalidatedObject() { if (iterator_ != iterator_end_) { - invalidated_start_ = iterator_->address(); + invalidated_start_ = iterator_->first.address(); } else { invalidated_start_ = sentinel_; } diff --git a/src/heap/invalidated-slots.h b/src/heap/invalidated-slots.h index a1aaf33372..fa2b2d5ab6 100644 --- a/src/heap/invalidated-slots.h +++ b/src/heap/invalidated-slots.h @@ -21,7 +21,7 @@ namespace internal { // that potentially invalidates slots recorded concurrently. The second part // of each element is the size of the corresponding object before the layout // change. -using InvalidatedSlots = std::set; +using InvalidatedSlots = std::map; // This class provides IsValid predicate that takes into account the set // of invalidated objects in the given memory chunk. @@ -45,9 +45,10 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { InvalidatedSlots::const_iterator iterator_; InvalidatedSlots::const_iterator iterator_end_; Address sentinel_; - Address invalidated_start_; - Address next_invalidated_start_; - int invalidated_size_; + Address invalidated_start_{kNullAddress}; + Address next_invalidated_start_{kNullAddress}; + int invalidated_size_{0}; + int next_invalidated_size_{0}; InvalidatedSlots empty_; #ifdef DEBUG Address last_slot_; diff --git a/src/heap/memory-chunk.cc b/src/heap/memory-chunk.cc index 56e8cf6bdd..1925d50b3f 100644 --- a/src/heap/memory-chunk.cc +++ b/src/heap/memory-chunk.cc @@ -364,14 +364,17 @@ void MemoryChunk::ReleaseInvalidatedSlots() { } template V8_EXPORT_PRIVATE void -MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object); +MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, + int new_size); template V8_EXPORT_PRIVATE void -MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object); +MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, + int new_size); template V8_EXPORT_PRIVATE void MemoryChunk::RegisterObjectWithInvalidatedSlots< - OLD_TO_SHARED>(HeapObject object); + OLD_TO_SHARED>(HeapObject object, int new_size); template -void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { +void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, + int new_size) { bool skip_slot_recording; switch (type) { @@ -399,23 +402,27 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { AllocateInvalidatedSlots(); } - invalidated_slots()->insert(object); + DCHECK_GT(new_size, 0); + InvalidatedSlots& invalidated_slots = *this->invalidated_slots(); + DCHECK_IMPLIES(invalidated_slots[object] > 0, + new_size <= invalidated_slots[object]); + invalidated_slots.insert_or_assign(object, new_size); } -void MemoryChunk::InvalidateRecordedSlots(HeapObject object) { +void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int new_size) { if (V8_DISABLE_WRITE_BARRIERS_BOOL) return; if (heap()->incremental_marking()->IsCompacting()) { // We cannot check slot_set_[OLD_TO_OLD] here, since the // concurrent markers might insert slots concurrently. - RegisterObjectWithInvalidatedSlots(object); + RegisterObjectWithInvalidatedSlots(object, new_size); } if (slot_set_[OLD_TO_NEW] != nullptr) { - RegisterObjectWithInvalidatedSlots(object); + RegisterObjectWithInvalidatedSlots(object, new_size); } if (slot_set_[OLD_TO_SHARED] != nullptr) { - RegisterObjectWithInvalidatedSlots(object); + RegisterObjectWithInvalidatedSlots(object, new_size); } } diff --git a/src/heap/memory-chunk.h b/src/heap/memory-chunk.h index 103d6d59d7..b7929ab5be 100644 --- a/src/heap/memory-chunk.h +++ b/src/heap/memory-chunk.h @@ -143,8 +143,9 @@ class MemoryChunk : public BasicMemoryChunk { template void ReleaseInvalidatedSlots(); template - V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object); - void InvalidateRecordedSlots(HeapObject object); + V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object, + int new_size); + void InvalidateRecordedSlots(HeapObject object, int new_size); template bool RegisteredObjectWithInvalidatedSlots(HeapObject object); template diff --git a/src/objects/shared-function-info-inl.h b/src/objects/shared-function-info-inl.h index 6fffdf6b19..75e0c88024 100644 --- a/src/objects/shared-function-info-inl.h +++ b/src/objects/shared-function-info-inl.h @@ -11,6 +11,7 @@ #include "src/common/globals.h" #include "src/handles/handles-inl.h" #include "src/heap/heap-write-barrier-inl.h" +#include "src/heap/heap.h" #include "src/objects/debug-objects-inl.h" #include "src/objects/feedback-vector-inl.h" #include "src/objects/scope-info-inl.h" @@ -826,7 +827,8 @@ void SharedFunctionInfo::ClearPreparseData() { Heap* heap = GetHeapFromWritableObject(data); // Swap the map. - heap->NotifyObjectLayoutChange(data, no_gc); + heap->NotifyObjectLayoutChange(data, no_gc, InvalidateRecordedSlots::kYes, + UncompiledDataWithoutPreparseData::kSize); STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize < UncompiledDataWithPreparseData::kSize); STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize == diff --git a/src/objects/string.cc b/src/objects/string.cc index 1670a8114c..d9fb334cad 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -391,11 +391,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool is_internalized = this->IsInternalizedString(); bool has_pointers = StringShape(*this).IsIndirect(); - if (has_pointers) { - isolate->heap()->NotifyObjectLayoutChange(*this, no_gc, - InvalidateRecordedSlots::kYes); - } - base::SharedMutexGuard shared_mutex_guard( isolate->internalized_string_access()); // Morph the string to an external string by replacing the map and @@ -419,6 +414,12 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { // Byte size of the external String object. int new_size = this->SizeFromMap(new_map); + + if (has_pointers) { + isolate->heap()->NotifyObjectLayoutChange( + *this, no_gc, InvalidateRecordedSlots::kYes, new_size); + } + if (!isolate->heap()->IsLargeObject(*this)) { isolate->heap()->CreateFillerObjectAt( this->address() + new_size, size - new_size, @@ -475,11 +476,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { bool is_internalized = this->IsInternalizedString(); bool has_pointers = StringShape(*this).IsIndirect(); - if (has_pointers) { - isolate->heap()->NotifyObjectLayoutChange(*this, no_gc, - InvalidateRecordedSlots::kYes); - } - base::SharedMutexGuard shared_mutex_guard( isolate->internalized_string_access()); // Morph the string to an external string by replacing the map and @@ -504,6 +500,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { // Byte size of the external String object. int new_size = this->SizeFromMap(new_map); + if (has_pointers) { + isolate->heap()->NotifyObjectLayoutChange( + *this, no_gc, InvalidateRecordedSlots::kYes, new_size); + } + isolate->heap()->CreateFillerObjectAt( this->address() + new_size, size - new_size, has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo); diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc index 46eb14ddff..a24251f8da 100644 --- a/src/runtime/runtime-object.cc +++ b/src/runtime/runtime-object.cc @@ -219,7 +219,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle receiver, receiver->RawField(index.offset())); if (!FLAG_enable_third_party_heap) { MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver); - chunk->InvalidateRecordedSlots(*receiver); + int new_size = parent_map->instance_size(); + chunk->InvalidateRecordedSlots(*receiver, new_size); } } } diff --git a/test/cctest/heap/test-compaction.cc b/test/cctest/heap/test-compaction.cc index 61325bb3b7..f1e7565606 100644 --- a/test/cctest/heap/test-compaction.cc +++ b/test/cctest/heap/test-compaction.cc @@ -234,13 +234,15 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) { } } // First object is going to be evacuated. + HeapObject front_object = *compaction_page_handles.front(); to_be_aborted_page->RegisterObjectWithInvalidatedSlots( - *compaction_page_handles.front()); + front_object, front_object.Size()); // Last object is NOT going to be evacuated. // This happens since not all objects fit on the only other page in the // old space, the GC isn't allowed to allocate another page. + HeapObject back_object = *compaction_page_handles.back(); to_be_aborted_page->RegisterObjectWithInvalidatedSlots( - *compaction_page_handles.back()); + back_object, back_object.Size()); to_be_aborted_page->SetFlag( MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); diff --git a/test/cctest/heap/test-invalidated-slots.cc b/test/cctest/heap/test-invalidated-slots.cc index c161a7456a..8730b12021 100644 --- a/test/cctest/heap/test-invalidated-slots.cc +++ b/test/cctest/heap/test-invalidated-slots.cc @@ -71,7 +71,9 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register every second byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i += 2) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + ByteArray byte_array = byte_arrays[i]; + page->RegisterObjectWithInvalidatedSlots(byte_array, + byte_array.Size()); } InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (size_t i = 0; i < byte_arrays.size(); i++) { @@ -96,7 +98,9 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register the all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + ByteArray byte_array = byte_arrays[i]; + page->RegisterObjectWithInvalidatedSlots(byte_array, + byte_array.Size()); } InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (size_t i = 0; i < byte_arrays.size(); i++) { @@ -117,16 +121,18 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register the all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + page->RegisterObjectWithInvalidatedSlots( + byte_arrays[i], ByteArray::kHeaderSize); } // Trim byte arrays and check that the slots outside the byte arrays are // considered invalid if the old space page was swept. - InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (size_t i = 0; i < byte_arrays.size(); i++) { ByteArray byte_array = byte_arrays[i]; Address start = byte_array.address() + ByteArray::kHeaderSize; Address end = byte_array.address() + byte_array.Size(); heap->RightTrimFixedArray(byte_array, byte_array.length()); + + InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (Address addr = start; addr < end; addr += kTaggedSize) { CHECK_EQ(filter.IsValid(addr), page->SweepingDone()); } @@ -144,7 +150,9 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) { // This should be no-op because the page is marked as evacuation // candidate. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + ByteArray byte_array = byte_arrays[i]; + page->RegisterObjectWithInvalidatedSlots(byte_array, + byte_array.Size()); } // All slots must still be valid. InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); @@ -168,7 +176,9 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) { heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8); // Register the all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + ByteArray byte_array = byte_arrays[i]; + page->RegisterObjectWithInvalidatedSlots(byte_array, + byte_array.Size()); } // All slots must still be invalid. InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); @@ -361,7 +371,9 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + ByteArray byte_array = byte_arrays[i]; + page->RegisterObjectWithInvalidatedSlots(byte_array, + byte_array.Size()); } // Mark full page as free @@ -380,7 +392,9 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); + ByteArray byte_array = byte_arrays[i]; + page->RegisterObjectWithInvalidatedSlots(byte_array, + byte_array.Size()); } // Mark each object as free on page @@ -407,7 +421,8 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ByteArray& invalidated = byte_arrays[1]; heap->RightTrimFixedArray(invalidated, invalidated.length() - 8); - page->RegisterObjectWithInvalidatedSlots(invalidated); + page->RegisterObjectWithInvalidatedSlots(invalidated, + invalidated.Size()); // Free memory at end of invalidated object InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);