diff --git a/src/deoptimizer/translated-state.cc b/src/deoptimizer/translated-state.cc index 68eaeefaa8..c066e94c0b 100644 --- a/src/deoptimizer/translated-state.cc +++ b/src/deoptimizer/translated-state.cc @@ -13,7 +13,6 @@ #include "src/diagnostics/disasm.h" #include "src/execution/frames.h" #include "src/execution/isolate.h" -#include "src/heap/heap.h" #include "src/numbers/conversions.h" #include "src/objects/arguments.h" #include "src/objects/heap-number-inl.h" @@ -1851,9 +1850,7 @@ void TranslatedState::InitializeJSObjectAt( CHECK_GE(children_count, 2); // Notify the concurrent marker about the layout change. - isolate()->heap()->NotifyObjectLayoutChange( - *object_storage, no_gc, InvalidateRecordedSlots::kYes, - slot->GetChildrenCount() * kTaggedSize); + isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc); // Fill the property array field. { @@ -1905,9 +1902,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( } // Notify the concurrent marker about the layout change. - isolate()->heap()->NotifyObjectLayoutChange( - *object_storage, no_gc, InvalidateRecordedSlots::kYes, - slot->GetChildrenCount() * kTaggedSize); + isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc); // Write the fields to the object. for (int i = 1; i < children_count; i++) { diff --git a/src/heap/heap.cc b/src/heap/heap.cc index bfe087603d..e9551286f9 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -3954,24 +3954,22 @@ void Heap::FinalizeIncrementalMarkingIncrementally( void Heap::NotifyObjectLayoutChange( HeapObject object, const DisallowGarbageCollection&, - InvalidateRecordedSlots invalidate_recorded_slots, int new_size) { - DCHECK_IMPLIES(invalidate_recorded_slots == InvalidateRecordedSlots::kYes, - new_size > 0); + InvalidateRecordedSlots invalidate_recorded_slots) { if (incremental_marking()->IsMarking()) { incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); if (incremental_marking()->IsCompacting() && invalidate_recorded_slots == InvalidateRecordedSlots::kYes && MayContainRecordedSlots(object)) { MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots(object, new_size); + ->RegisterObjectWithInvalidatedSlots(object); } } if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes && MayContainRecordedSlots(object)) { MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots(object, new_size); + ->RegisterObjectWithInvalidatedSlots(object); MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots(object, new_size); + ->RegisterObjectWithInvalidatedSlots(object); } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { @@ -4648,34 +4646,11 @@ void Heap::Verify() { if (new_lo_space_) new_lo_space_->Verify(isolate()); isolate()->string_table()->VerifyIfOwnedBy(isolate()); - VerifyInvalidatedObjectSize(); - #if DEBUG VerifyCommittedPhysicalMemory(); #endif // DEBUG } -namespace { -void VerifyInvalidatedSlots(InvalidatedSlots* invalidated_slots) { - if (!invalidated_slots) return; - for (std::pair object_and_size : *invalidated_slots) { - HeapObject object = object_and_size.first; - int size = object_and_size.second; - CHECK_EQ(object.Size(), size); - } -} -} // namespace - -void Heap::VerifyInvalidatedObjectSize() { - OldGenerationMemoryChunkIterator chunk_iterator(this); - MemoryChunk* chunk; - - while ((chunk = chunk_iterator.next()) != nullptr) { - VerifyInvalidatedSlots(chunk->invalidated_slots()); - VerifyInvalidatedSlots(chunk->invalidated_slots()); - } -} - void Heap::VerifyReadOnlyHeap() { CHECK(!read_only_space_->writable()); read_only_space_->Verify(isolate()); diff --git a/src/heap/heap.h b/src/heap/heap.h index d69c36d367..e4d55d539a 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1114,7 +1114,8 @@ class Heap { // manually. void NotifyObjectLayoutChange( HeapObject object, const DisallowGarbageCollection&, - InvalidateRecordedSlots invalidate_recorded_slots, int new_size = 0); + InvalidateRecordedSlots invalidate_recorded_slots = + InvalidateRecordedSlots::kYes); #ifdef VERIFY_HEAP // This function checks that either @@ -1583,9 +1584,6 @@ class Heap { // created. void VerifyReadOnlyHeap(); void VerifyRememberedSetFor(HeapObject object); - - // Verify that cached size of invalidated object is up-to-date. - void VerifyInvalidatedObjectSize(); #endif #ifdef V8_ENABLE_ALLOCATION_TIMEOUT diff --git a/src/heap/invalidated-slots-inl.h b/src/heap/invalidated-slots-inl.h index 62287a6884..67551618e7 100644 --- a/src/heap/invalidated-slots-inl.h +++ b/src/heap/invalidated-slots-inl.h @@ -28,18 +28,22 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { NextInvalidatedObject(); } + HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_); + + if (invalidated_size_ == 0) { + DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map())); + invalidated_size_ = invalidated_object.Size(); + } + int offset = static_cast(slot - invalidated_start_); // OLD_TO_OLD can have slots in map word unlike other remembered sets. DCHECK_GE(offset, 0); DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0); - if (offset < invalidated_size_) { - if (offset == 0) return true; - HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_); - DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map())); - return invalidated_object.IsValidSlot(invalidated_object.map(), offset); - } + if (offset < invalidated_size_) + return offset == 0 || + invalidated_object.IsValidSlot(invalidated_object.map(), offset); NextInvalidatedObject(); return true; @@ -47,14 +51,12 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { void InvalidatedSlotsFilter::NextInvalidatedObject() { invalidated_start_ = next_invalidated_start_; - invalidated_size_ = next_invalidated_size_; + invalidated_size_ = 0; if (iterator_ == iterator_end_) { next_invalidated_start_ = sentinel_; - next_invalidated_size_ = 0; } else { - next_invalidated_start_ = iterator_->first.address(); - next_invalidated_size_ = iterator_->second; + next_invalidated_start_ = iterator_->address(); iterator_++; } } @@ -85,7 +87,7 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { void InvalidatedSlotsCleanup::NextInvalidatedObject() { if (iterator_ != iterator_end_) { - invalidated_start_ = iterator_->first.address(); + invalidated_start_ = iterator_->address(); } else { invalidated_start_ = sentinel_; } diff --git a/src/heap/invalidated-slots.h b/src/heap/invalidated-slots.h index fa2b2d5ab6..a1aaf33372 100644 --- a/src/heap/invalidated-slots.h +++ b/src/heap/invalidated-slots.h @@ -21,7 +21,7 @@ namespace internal { // that potentially invalidates slots recorded concurrently. The second part // of each element is the size of the corresponding object before the layout // change. -using InvalidatedSlots = std::map; +using InvalidatedSlots = std::set; // This class provides IsValid predicate that takes into account the set // of invalidated objects in the given memory chunk. @@ -45,10 +45,9 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { InvalidatedSlots::const_iterator iterator_; InvalidatedSlots::const_iterator iterator_end_; Address sentinel_; - Address invalidated_start_{kNullAddress}; - Address next_invalidated_start_{kNullAddress}; - int invalidated_size_{0}; - int next_invalidated_size_{0}; + Address invalidated_start_; + Address next_invalidated_start_; + int invalidated_size_; InvalidatedSlots empty_; #ifdef DEBUG Address last_slot_; diff --git a/src/heap/memory-chunk.cc b/src/heap/memory-chunk.cc index 1925d50b3f..56e8cf6bdd 100644 --- a/src/heap/memory-chunk.cc +++ b/src/heap/memory-chunk.cc @@ -364,17 +364,14 @@ void MemoryChunk::ReleaseInvalidatedSlots() { } template V8_EXPORT_PRIVATE void -MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, - int new_size); +MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object); template V8_EXPORT_PRIVATE void -MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, - int new_size); +MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object); template V8_EXPORT_PRIVATE void MemoryChunk::RegisterObjectWithInvalidatedSlots< - OLD_TO_SHARED>(HeapObject object, int new_size); + OLD_TO_SHARED>(HeapObject object); template -void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, - int new_size) { +void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { bool skip_slot_recording; switch (type) { @@ -402,27 +399,23 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, AllocateInvalidatedSlots(); } - DCHECK_GT(new_size, 0); - InvalidatedSlots& invalidated_slots = *this->invalidated_slots(); - DCHECK_IMPLIES(invalidated_slots[object] > 0, - new_size <= invalidated_slots[object]); - invalidated_slots.insert_or_assign(object, new_size); + invalidated_slots()->insert(object); } -void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int new_size) { +void MemoryChunk::InvalidateRecordedSlots(HeapObject object) { if (V8_DISABLE_WRITE_BARRIERS_BOOL) return; if (heap()->incremental_marking()->IsCompacting()) { // We cannot check slot_set_[OLD_TO_OLD] here, since the // concurrent markers might insert slots concurrently. - RegisterObjectWithInvalidatedSlots(object, new_size); + RegisterObjectWithInvalidatedSlots(object); } if (slot_set_[OLD_TO_NEW] != nullptr) { - RegisterObjectWithInvalidatedSlots(object, new_size); + RegisterObjectWithInvalidatedSlots(object); } if (slot_set_[OLD_TO_SHARED] != nullptr) { - RegisterObjectWithInvalidatedSlots(object, new_size); + RegisterObjectWithInvalidatedSlots(object); } } diff --git a/src/heap/memory-chunk.h b/src/heap/memory-chunk.h index b7929ab5be..103d6d59d7 100644 --- a/src/heap/memory-chunk.h +++ b/src/heap/memory-chunk.h @@ -143,9 +143,8 @@ class MemoryChunk : public BasicMemoryChunk { template void ReleaseInvalidatedSlots(); template - V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object, - int new_size); - void InvalidateRecordedSlots(HeapObject object, int new_size); + V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object); + void InvalidateRecordedSlots(HeapObject object); template bool RegisteredObjectWithInvalidatedSlots(HeapObject object); template diff --git a/src/objects/shared-function-info-inl.h b/src/objects/shared-function-info-inl.h index 75e0c88024..6fffdf6b19 100644 --- a/src/objects/shared-function-info-inl.h +++ b/src/objects/shared-function-info-inl.h @@ -11,7 +11,6 @@ #include "src/common/globals.h" #include "src/handles/handles-inl.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/heap/heap.h" #include "src/objects/debug-objects-inl.h" #include "src/objects/feedback-vector-inl.h" #include "src/objects/scope-info-inl.h" @@ -827,8 +826,7 @@ void SharedFunctionInfo::ClearPreparseData() { Heap* heap = GetHeapFromWritableObject(data); // Swap the map. - heap->NotifyObjectLayoutChange(data, no_gc, InvalidateRecordedSlots::kYes, - UncompiledDataWithoutPreparseData::kSize); + heap->NotifyObjectLayoutChange(data, no_gc); STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize < UncompiledDataWithPreparseData::kSize); STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize == diff --git a/src/objects/string.cc b/src/objects/string.cc index d9fb334cad..1670a8114c 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -391,6 +391,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool is_internalized = this->IsInternalizedString(); bool has_pointers = StringShape(*this).IsIndirect(); + if (has_pointers) { + isolate->heap()->NotifyObjectLayoutChange(*this, no_gc, + InvalidateRecordedSlots::kYes); + } + base::SharedMutexGuard shared_mutex_guard( isolate->internalized_string_access()); // Morph the string to an external string by replacing the map and @@ -414,12 +419,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { // Byte size of the external String object. int new_size = this->SizeFromMap(new_map); - - if (has_pointers) { - isolate->heap()->NotifyObjectLayoutChange( - *this, no_gc, InvalidateRecordedSlots::kYes, new_size); - } - if (!isolate->heap()->IsLargeObject(*this)) { isolate->heap()->CreateFillerObjectAt( this->address() + new_size, size - new_size, @@ -476,6 +475,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { bool is_internalized = this->IsInternalizedString(); bool has_pointers = StringShape(*this).IsIndirect(); + if (has_pointers) { + isolate->heap()->NotifyObjectLayoutChange(*this, no_gc, + InvalidateRecordedSlots::kYes); + } + base::SharedMutexGuard shared_mutex_guard( isolate->internalized_string_access()); // Morph the string to an external string by replacing the map and @@ -500,11 +504,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { // Byte size of the external String object. int new_size = this->SizeFromMap(new_map); - if (has_pointers) { - isolate->heap()->NotifyObjectLayoutChange( - *this, no_gc, InvalidateRecordedSlots::kYes, new_size); - } - isolate->heap()->CreateFillerObjectAt( this->address() + new_size, size - new_size, has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo); diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc index a24251f8da..46eb14ddff 100644 --- a/src/runtime/runtime-object.cc +++ b/src/runtime/runtime-object.cc @@ -219,8 +219,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle receiver, receiver->RawField(index.offset())); if (!FLAG_enable_third_party_heap) { MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver); - int new_size = parent_map->instance_size(); - chunk->InvalidateRecordedSlots(*receiver, new_size); + chunk->InvalidateRecordedSlots(*receiver); } } } diff --git a/test/cctest/heap/test-compaction.cc b/test/cctest/heap/test-compaction.cc index f1e7565606..61325bb3b7 100644 --- a/test/cctest/heap/test-compaction.cc +++ b/test/cctest/heap/test-compaction.cc @@ -234,15 +234,13 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) { } } // First object is going to be evacuated. - HeapObject front_object = *compaction_page_handles.front(); to_be_aborted_page->RegisterObjectWithInvalidatedSlots( - front_object, front_object.Size()); + *compaction_page_handles.front()); // Last object is NOT going to be evacuated. // This happens since not all objects fit on the only other page in the // old space, the GC isn't allowed to allocate another page. - HeapObject back_object = *compaction_page_handles.back(); to_be_aborted_page->RegisterObjectWithInvalidatedSlots( - back_object, back_object.Size()); + *compaction_page_handles.back()); to_be_aborted_page->SetFlag( MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); diff --git a/test/cctest/heap/test-invalidated-slots.cc b/test/cctest/heap/test-invalidated-slots.cc index 8730b12021..c161a7456a 100644 --- a/test/cctest/heap/test-invalidated-slots.cc +++ b/test/cctest/heap/test-invalidated-slots.cc @@ -71,9 +71,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register every second byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i += 2) { - ByteArray byte_array = byte_arrays[i]; - page->RegisterObjectWithInvalidatedSlots(byte_array, - byte_array.Size()); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (size_t i = 0; i < byte_arrays.size(); i++) { @@ -98,9 +96,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register the all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray byte_array = byte_arrays[i]; - page->RegisterObjectWithInvalidatedSlots(byte_array, - byte_array.Size()); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (size_t i = 0; i < byte_arrays.size(); i++) { @@ -121,18 +117,16 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register the all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots( - byte_arrays[i], ByteArray::kHeaderSize); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } // Trim byte arrays and check that the slots outside the byte arrays are // considered invalid if the old space page was swept. + InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (size_t i = 0; i < byte_arrays.size(); i++) { ByteArray byte_array = byte_arrays[i]; Address start = byte_array.address() + ByteArray::kHeaderSize; Address end = byte_array.address() + byte_array.Size(); heap->RightTrimFixedArray(byte_array, byte_array.length()); - - InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); for (Address addr = start; addr < end; addr += kTaggedSize) { CHECK_EQ(filter.IsValid(addr), page->SweepingDone()); } @@ -150,9 +144,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) { // This should be no-op because the page is marked as evacuation // candidate. for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray byte_array = byte_arrays[i]; - page->RegisterObjectWithInvalidatedSlots(byte_array, - byte_array.Size()); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } // All slots must still be valid. InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); @@ -176,9 +168,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) { heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8); // Register the all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray byte_array = byte_arrays[i]; - page->RegisterObjectWithInvalidatedSlots(byte_array, - byte_array.Size()); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } // All slots must still be invalid. InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); @@ -371,9 +361,7 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray byte_array = byte_arrays[i]; - page->RegisterObjectWithInvalidatedSlots(byte_array, - byte_array.Size()); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } // Mark full page as free @@ -392,9 +380,7 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) { Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); // Register all byte arrays as invalidated. for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray byte_array = byte_arrays[i]; - page->RegisterObjectWithInvalidatedSlots(byte_array, - byte_array.Size()); + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i]); } // Mark each object as free on page @@ -421,8 +407,7 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ByteArray& invalidated = byte_arrays[1]; heap->RightTrimFixedArray(invalidated, invalidated.length() - 8); - page->RegisterObjectWithInvalidatedSlots(invalidated, - invalidated.Size()); + page->RegisterObjectWithInvalidatedSlots(invalidated); // Free memory at end of invalidated object InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);