From d4a742fdf16598872944c4378e41b59f35c1f9d6 Mon Sep 17 00:00:00 2001 From: Ulan Degenbaev Date: Tue, 1 Aug 2017 21:56:07 +0200 Subject: [PATCH] Reland "[heap] Add mechanism for tracking invalidated slots per memory chunk." This reverts commit c59b81d7b8e0062afdbbdb9b72bebaf8b056ccc0. Original change's description: > [heap] Add mechanism for tracking invalidated slots per memory chunk. > For correct slots recording in concurrent marker, we need to resolve > the race that happens when > 1) the mutator is invalidating slots for double unboxing or string > conversions > 2) and the concurrent marker is recording these slots. > This patch adds a data-structure for tracking the invalidated objects. > Thus we can allow the concurrent marker to record slots without > worrying about clearing them. During old-to-old pointer updating phase > we re-check all slots that belong to the invalidated objects. BUG=chromium:694255 Change-Id: Idf8927d162377a7bbdff34f81a87e52db27d6a9f Reviewed-on: https://chromium-review.googlesource.com/596868 Reviewed-by: Michael Lippautz Commit-Queue: Ulan Degenbaev Cr-Commit-Position: refs/heads/master@{#47068} --- BUILD.gn | 3 + src/heap/heap.cc | 9 +- src/heap/heap.h | 3 +- src/heap/invalidated-slots-inl.h | 61 ++++++++++++++ src/heap/invalidated-slots.cc | 35 ++++++++ src/heap/invalidated-slots.h | 55 +++++++++++++ src/heap/mark-compact.cc | 32 ++++++-- src/heap/remembered-set.h | 4 +- src/heap/spaces.cc | 24 ++++++ src/heap/spaces.h | 11 ++- src/objects.cc | 19 +++-- src/runtime/runtime-object.cc | 3 +- src/v8.gyp | 3 + test/cctest/BUILD.gn | 1 + test/cctest/cctest.gyp | 1 + test/cctest/heap/heap-tester.h | 1 + test/cctest/heap/test-invalidated-slots.cc | 96 ++++++++++++++++++++++ 17 files changed, 341 insertions(+), 20 deletions(-) create mode 100644 src/heap/invalidated-slots-inl.h create mode 100644 src/heap/invalidated-slots.cc create mode 100644 src/heap/invalidated-slots.h create mode 100644 test/cctest/heap/test-invalidated-slots.cc diff --git a/BUILD.gn b/BUILD.gn index e3152ace0f..a57c152050 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1581,6 +1581,9 @@ v8_source_set("v8_base") { "src/heap/incremental-marking-job.h", "src/heap/incremental-marking.cc", "src/heap/incremental-marking.h", + "src/heap/invalidated-slots-inl.h", + "src/heap/invalidated-slots.cc", + "src/heap/invalidated-slots.h", "src/heap/item-parallel-job.h", "src/heap/local-allocator.h", "src/heap/mark-compact-inl.h", diff --git a/src/heap/heap.cc b/src/heap/heap.cc index f04204ea95..729dea5ddb 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4605,10 +4605,17 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( } } -void Heap::NotifyObjectLayoutChange(HeapObject* object, +void Heap::NotifyObjectLayoutChange(HeapObject* object, int size, const DisallowHeapAllocation&) { + DCHECK(InOldSpace(object) || InNewSpace(object)); if (FLAG_incremental_marking && incremental_marking()->IsMarking()) { incremental_marking()->MarkBlackAndPush(object); + if (InOldSpace(object) && incremental_marking()->IsCompacting()) { + // The concurrent marker might have recorded slots for the object. + // Register this object as invalidated to filter out the slots. + MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); + chunk->RegisterObjectWithInvalidatedSlots(object, size); + } } #ifdef VERIFY_HEAP DCHECK(pending_layout_change_object_ == nullptr); diff --git a/src/heap/heap.h b/src/heap/heap.h index d1ab66d192..a3bee56d6a 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1186,7 +1186,8 @@ class Heap { // The runtime uses this function to notify potentially unsafe object layout // changes that require special synchronization with the concurrent marker. - void NotifyObjectLayoutChange(HeapObject* object, + // The old size is the size of the object before layout change. + void NotifyObjectLayoutChange(HeapObject* object, int old_size, const DisallowHeapAllocation&); #ifdef VERIFY_HEAP diff --git a/src/heap/invalidated-slots-inl.h b/src/heap/invalidated-slots-inl.h new file mode 100644 index 0000000000..8f71575187 --- /dev/null +++ b/src/heap/invalidated-slots-inl.h @@ -0,0 +1,61 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_INVALIDATED_SLOTS_INL_H +#define V8_INVALIDATED_SLOTS_INL_H + +#include + +#include "src/allocation.h" +#include "src/heap/invalidated-slots.h" +#include "src/heap/spaces.h" +#include "src/objects-body-descriptors-inl.h" +#include "src/objects-body-descriptors.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { + +bool InvalidatedSlotsFilter::IsValid(Address slot) { +#ifdef DEBUG + DCHECK_LT(slot, sentinel_); + // Slots must come in non-decreasing order. + DCHECK_LE(last_slot_, slot); + last_slot_ = slot; +#endif + while (slot >= invalidated_end_) { + ++iterator_; + if (iterator_ != iterator_end_) { + // Invalidated ranges must not overlap. + DCHECK_LE(invalidated_end_, iterator_->first->address()); + invalidated_start_ = iterator_->first->address(); + invalidated_end_ = invalidated_start_ + iterator_->second; + } else { + invalidated_start_ = sentinel_; + invalidated_end_ = sentinel_; + } + } + // Now the invalidated region ends after the slot. + if (slot < invalidated_start_) { + // The invalidated region starts after the slot. + return true; + } + // The invalidated region includes the slot. + // Ask the object if the slot is valid. + if (invalidated_object_ == nullptr) { + invalidated_object_ = HeapObject::FromAddress(invalidated_start_); + invalidated_object_size_ = + invalidated_object_->SizeFromMap(invalidated_object_->map()); + } + int offset = static_cast(slot - invalidated_start_); + DCHECK_GT(offset, 0); + + return offset < invalidated_object_size_ && + invalidated_object_->IsValidSlot(offset); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_INVALIDATED_SLOTS_INL_H diff --git a/src/heap/invalidated-slots.cc b/src/heap/invalidated-slots.cc new file mode 100644 index 0000000000..85430e58bc --- /dev/null +++ b/src/heap/invalidated-slots.cc @@ -0,0 +1,35 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/invalidated-slots.h" +#include "src/heap/spaces.h" + +namespace v8 { +namespace internal { + +InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) { + DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr, + chunk->owner()->identity() == OLD_SPACE); + InvalidatedSlots* invalidated_slots = + chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_; + iterator_ = invalidated_slots->begin(); + iterator_end_ = invalidated_slots->end(); + sentinel_ = chunk->area_end(); + if (iterator_ != iterator_end_) { + invalidated_start_ = iterator_->first->address(); + invalidated_end_ = invalidated_start_ + iterator_->second; + } else { + invalidated_start_ = sentinel_; + invalidated_end_ = sentinel_; + } + // These values will be lazily set when needed. + invalidated_object_ = nullptr; + invalidated_object_size_ = 0; +#ifdef DEBUG + last_slot_ = chunk->area_start(); +#endif +} + +} // namespace internal +} // namespace v8 diff --git a/src/heap/invalidated-slots.h b/src/heap/invalidated-slots.h new file mode 100644 index 0000000000..6aa4ded748 --- /dev/null +++ b/src/heap/invalidated-slots.h @@ -0,0 +1,55 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_INVALIDATED_SLOTS_H +#define V8_INVALIDATED_SLOTS_H + +#include +#include + +#include "src/allocation.h" +#include "src/base/atomic-utils.h" +#include "src/base/bits.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + +class HeapObject; + +// This data structure stores objects that went through object layout change +// that potentially invalidates slots recorded concurrently. The second part +// of each element is the size of the corresponding object before the layout +// change. +using InvalidatedSlots = std::map; + +// This class provides IsValid predicate that takes into account the set +// of invalidated objects in the given memory chunk. +// The sequence of queried slot must be non-decreasing. This allows fast +// implementation with complexity O(m*log(m) + n), where +// m is the number of invalidated objects in the memory chunk. +// n is the number of IsValid queries. +class InvalidatedSlotsFilter { + public: + explicit InvalidatedSlotsFilter(MemoryChunk* chunk); + inline bool IsValid(Address slot); + + private: + InvalidatedSlots::const_iterator iterator_; + InvalidatedSlots::const_iterator iterator_end_; + Address sentinel_; + Address invalidated_start_; + Address invalidated_end_; + HeapObject* invalidated_object_; + int invalidated_object_size_; + InvalidatedSlots empty_; +#ifdef DEBUG + Address last_slot_; +#endif +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_INVALIDATED_SLOTS_H diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 9562e1fc34..a39a6ada17 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -21,6 +21,8 @@ #include "src/heap/concurrent-marking.h" #include "src/heap/gc-tracer.h" #include "src/heap/incremental-marking.h" +#include "src/heap/invalidated-slots-inl.h" +#include "src/heap/invalidated-slots.h" #include "src/heap/item-parallel-job.h" #include "src/heap/local-allocator.h" #include "src/heap/mark-compact-inl.h" @@ -3263,6 +3265,14 @@ void MarkCompactCollector::EvacuateEpilogue() { heap()->new_space()->set_age_mark(heap()->new_space()->top()); // Old space. Deallocate evacuated candidate pages. ReleaseEvacuationCandidates(); +#ifdef DEBUG + // Old-to-old slot sets must be empty after evacuation. + for (Page* p : *heap()->old_space()) { + DCHECK_NULL((p->slot_set())); + DCHECK_NULL((p->typed_slot_set())); + DCHECK_NULL(p->invalidated_slots()); + } +#endif } class Evacuator : public Malloced { @@ -4130,13 +4140,21 @@ class RememberedSetUpdatingItem : public UpdatingItem { } if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && (chunk_->slot_set() != nullptr)) { - RememberedSet::Iterate( - chunk_, - [](Address slot) { - return UpdateSlot( - reinterpret_cast(slot)); - }, - SlotSet::PREFREE_EMPTY_BUCKETS); + InvalidatedSlotsFilter filter(chunk_); + RememberedSet::Iterate( + chunk_, + [&filter](Address slot) { + if (!filter.IsValid(slot)) return REMOVE_SLOT; + return UpdateSlot( + reinterpret_cast(slot)); + }, + SlotSet::PREFREE_EMPTY_BUCKETS); + } + if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && + chunk_->invalidated_slots() != nullptr) { + // The invalidated slots are not needed after old-to-old slots were + // processsed. + chunk_->ReleaseInvalidatedSlots(); } } diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h index f44d45852b..6ebedae078 100644 --- a/src/heap/remembered-set.h +++ b/src/heap/remembered-set.h @@ -120,7 +120,8 @@ class RememberedSet : public AllStatic { while ((chunk = it.next()) != nullptr) { SlotSet* slots = chunk->slot_set(); TypedSlotSet* typed_slots = chunk->typed_slot_set(); - if (slots != nullptr || typed_slots != nullptr) { + if (slots != nullptr || typed_slots != nullptr || + chunk->invalidated_slots() != nullptr) { callback(chunk); } } @@ -230,6 +231,7 @@ class RememberedSet : public AllStatic { while ((chunk = it.next()) != nullptr) { chunk->ReleaseSlotSet(); chunk->ReleaseTypedSlotSet(); + chunk->ReleaseInvalidatedSlots(); } } diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 0a416d1373..a7c7b723f4 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -549,6 +549,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, nullptr); base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD], nullptr); + chunk->invalidated_slots_ = nullptr; chunk->skip_list_ = nullptr; chunk->progress_bar_ = 0; chunk->high_water_mark_.SetValue(static_cast(area_start - base)); @@ -1216,6 +1217,7 @@ void MemoryChunk::ReleaseAllocatedMemory() { ReleaseSlotSet(); ReleaseTypedSlotSet(); ReleaseTypedSlotSet(); + ReleaseInvalidatedSlots(); if (local_tracker_ != nullptr) ReleaseLocalTracker(); if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap(); } @@ -1286,6 +1288,28 @@ void MemoryChunk::ReleaseTypedSlotSet() { } } +InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() { + DCHECK_NULL(invalidated_slots_); + invalidated_slots_ = new InvalidatedSlots(); + return invalidated_slots_; +} + +void MemoryChunk::ReleaseInvalidatedSlots() { + if (invalidated_slots_) { + delete invalidated_slots_; + invalidated_slots_ = nullptr; + } +} + +void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object, + int size) { + if (invalidated_slots() == nullptr) { + AllocateInvalidatedSlots(); + } + int old_size = (*invalidated_slots())[object]; + (*invalidated_slots())[object] = std::max(old_size, size); +} + void MemoryChunk::AllocateLocalTracker() { DCHECK_NULL(local_tracker_); local_tracker_ = new LocalArrayBufferTracker(heap()); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index f409a50e19..afac9812c9 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -6,6 +6,7 @@ #define V8_HEAP_SPACES_H_ #include +#include #include #include @@ -19,6 +20,7 @@ #include "src/flags.h" #include "src/globals.h" #include "src/heap/heap.h" +#include "src/heap/invalidated-slots.h" #include "src/heap/marking.h" #include "src/list.h" #include "src/objects.h" @@ -354,7 +356,8 @@ class MemoryChunk { + kIntptrSize // intptr_t live_byte_count_ + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array - + kPointerSize // SkipList* skip_list_ + + kPointerSize // InvalidatedSlots* invalidated_slots_ + + kPointerSize // SkipList* skip_list_ + kPointerSize // AtomicValue high_water_mark_ + kPointerSize // base::RecursiveMutex* mutex_ + kPointerSize // base::AtomicWord concurrent_sweeping_ @@ -472,6 +475,11 @@ class MemoryChunk { template void ReleaseTypedSlotSet(); + InvalidatedSlots* AllocateInvalidatedSlots(); + void ReleaseInvalidatedSlots(); + void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size); + InvalidatedSlots* invalidated_slots() { return invalidated_slots_; } + void AllocateLocalTracker(); void ReleaseLocalTracker(); inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; } @@ -631,6 +639,7 @@ class MemoryChunk { // is ceil(size() / kPageSize). SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; + InvalidatedSlots* invalidated_slots_; SkipList* skip_list_; diff --git a/src/objects.cc b/src/objects.cc index aa3ddacc12..27988c7541 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -2598,7 +2598,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool is_internalized = this->IsInternalizedString(); bool has_pointers = StringShape(this).IsIndirect(); if (has_pointers) { - heap->NotifyObjectLayoutChange(this, no_allocation); + heap->NotifyObjectLayoutChange(this, size, no_allocation); } // Morph the string to an external string by replacing the map and // reinitializing the fields. This won't work if the space the existing @@ -2674,7 +2674,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { bool has_pointers = StringShape(this).IsIndirect(); if (has_pointers) { - heap->NotifyObjectLayoutChange(this, no_allocation); + heap->NotifyObjectLayoutChange(this, size, no_allocation); } // Morph the string to an external string by replacing the map and @@ -3980,7 +3980,9 @@ void MigrateFastToFast(Handle object, Handle new_map) { Heap* heap = isolate->heap(); - heap->NotifyObjectLayoutChange(*object, no_allocation); + int old_instance_size = old_map->instance_size(); + + heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); // Copy (real) inobject properties. If necessary, stop at number_of_fields to // avoid overwriting |one_pointer_filler_map|. @@ -4014,7 +4016,7 @@ void MigrateFastToFast(Handle object, Handle new_map) { // Create filler object past the new instance size. int new_instance_size = new_map->instance_size(); - int instance_size_delta = old_map->instance_size() - new_instance_size; + int instance_size_delta = old_instance_size - new_instance_size; DCHECK(instance_size_delta >= 0); if (instance_size_delta > 0) { @@ -4096,11 +4098,12 @@ void MigrateFastToSlow(Handle object, Handle new_map, DisallowHeapAllocation no_allocation; Heap* heap = isolate->heap(); - heap->NotifyObjectLayoutChange(*object, no_allocation); + int old_instance_size = map->instance_size(); + heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); // Resize the object in the heap if necessary. int new_instance_size = new_map->instance_size(); - int instance_size_delta = map->instance_size() - new_instance_size; + int instance_size_delta = old_instance_size - new_instance_size; DCHECK(instance_size_delta >= 0); if (instance_size_delta > 0) { @@ -17076,11 +17079,11 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) { if (!string->IsInternalizedString()) { DisallowHeapAllocation no_gc; - isolate->heap()->NotifyObjectLayoutChange(string, no_gc); + int old_size = string->Size(); + isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc); bool one_byte = internalized->IsOneByteRepresentation(); Handle map = one_byte ? isolate->factory()->thin_one_byte_string_map() : isolate->factory()->thin_string_map(); - int old_size = string->Size(); DCHECK(old_size >= ThinString::kSize); string->synchronized_set_map(*map); ThinString* thin = ThinString::cast(string); diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc index 2b9d79ca48..3f4a104d44 100644 --- a/src/runtime/runtime-object.cc +++ b/src/runtime/runtime-object.cc @@ -160,7 +160,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle receiver, // Zap the property to avoid keeping objects alive. Zapping is not necessary // for properties stored in the descriptor array. if (details.location() == kField) { - isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation); + isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(), + no_allocation); Object* filler = isolate->heap()->one_pointer_filler_map(); FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index()); JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler); diff --git a/src/v8.gyp b/src/v8.gyp index 07ee64d8e4..0b91565a54 100644 --- a/src/v8.gyp +++ b/src/v8.gyp @@ -1027,6 +1027,9 @@ 'heap/incremental-marking-job.h', 'heap/incremental-marking.cc', 'heap/incremental-marking.h', + 'heap/invalidated-slots-inl.h', + 'heap/invalidated-slots.cc', + 'heap/invalidated-slots.h', 'heap/item-parallel-job.h', 'heap/local-allocator.h', 'heap/mark-compact-inl.h', diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn index 8b6404be95..4cd9550d12 100644 --- a/test/cctest/BUILD.gn +++ b/test/cctest/BUILD.gn @@ -78,6 +78,7 @@ v8_executable("cctest") { "heap/test-concurrent-marking.cc", "heap/test-heap.cc", "heap/test-incremental-marking.cc", + "heap/test-invalidated-slots.cc", "heap/test-lab.cc", "heap/test-mark-compact.cc", "heap/test-page-promotion.cc", diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index ccf79fc450..f383374c8e 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -96,6 +96,7 @@ 'heap/test-concurrent-marking.cc', 'heap/test-heap.cc', 'heap/test-incremental-marking.cc', + 'heap/test-invalidated-slots.cc', 'heap/test-lab.cc', 'heap/test-mark-compact.cc', 'heap/test-page-promotion.cc', diff --git a/test/cctest/heap/heap-tester.h b/test/cctest/heap/heap-tester.h index 099a231ff1..51f9430271 100644 --- a/test/cctest/heap/heap-tester.h +++ b/test/cctest/heap/heap-tester.h @@ -16,6 +16,7 @@ V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \ V(CompactionSpaceDivideMultiplePages) \ V(CompactionSpaceDivideSinglePage) \ + V(InvalidatedSlots) \ V(TestNewSpaceRefsInCopiedCode) \ V(GCFlags) \ V(MarkCompactCollector) \ diff --git a/test/cctest/heap/test-invalidated-slots.cc b/test/cctest/heap/test-invalidated-slots.cc new file mode 100644 index 0000000000..795b8d994e --- /dev/null +++ b/test/cctest/heap/test-invalidated-slots.cc @@ -0,0 +1,96 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include + +#include "src/v8.h" + +#include "src/heap/heap-inl.h" +#include "src/heap/heap.h" +#include "src/heap/invalidated-slots-inl.h" +#include "src/heap/invalidated-slots.h" +#include "test/cctest/cctest.h" +#include "test/cctest/heap/heap-tester.h" +#include "test/cctest/heap/heap-utils.h" + +namespace v8 { +namespace internal { + +HEAP_TEST(InvalidatedSlots) { + CcTest::InitializeVM(); + Heap* heap = CcTest::heap(); + Isolate* isolate = heap->isolate(); + PagedSpace* old_space = heap->old_space(); + Page* page; + std::vector byte_arrays; + const int kLength = 256 - ByteArray::kHeaderSize; + const int kSize = ByteArray::SizeFor(kLength); + CHECK_EQ(kSize, 256); + // Fill a page with byte arrays. + { + AlwaysAllocateScope always_allocate(isolate); + heap::SimulateFullSpace(old_space); + ByteArray* byte_array; + CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array)); + byte_arrays.push_back(byte_array); + page = Page::FromAddress(byte_array->address()); + CHECK_EQ(page->area_size() % kSize, 0u); + size_t n = page->area_size() / kSize; + for (size_t i = 1; i < n; i++) { + CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array)); + byte_arrays.push_back(byte_array); + CHECK_EQ(page, Page::FromAddress(byte_array->address())); + } + } + CHECK_NULL(page->invalidated_slots()); + { + // Without invalidated slots on the page, the filter considers + // all slots as valid. + InvalidatedSlotsFilter filter(page); + for (auto byte_array : byte_arrays) { + Address start = byte_array->address() + ByteArray::kHeaderSize; + Address end = byte_array->address() + kSize; + for (Address addr = start; addr < end; addr += kPointerSize) { + CHECK(filter.IsValid(addr)); + } + } + } + // Register every second byte arrays as invalidated. + for (size_t i = 0; i < byte_arrays.size(); i += 2) { + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize); + } + { + InvalidatedSlotsFilter filter(page); + for (size_t i = 0; i < byte_arrays.size(); i++) { + ByteArray* byte_array = byte_arrays[i]; + Address start = byte_array->address() + ByteArray::kHeaderSize; + Address end = byte_array->address() + kSize; + for (Address addr = start; addr < end; addr += kPointerSize) { + if (i % 2 == 0) { + CHECK(!filter.IsValid(addr)); + } else { + CHECK(filter.IsValid(addr)); + } + } + } + } + // Register the remaining byte arrays as invalidated. + for (size_t i = 1; i < byte_arrays.size(); i += 2) { + page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize); + } + { + InvalidatedSlotsFilter filter(page); + for (size_t i = 0; i < byte_arrays.size(); i++) { + ByteArray* byte_array = byte_arrays[i]; + Address start = byte_array->address() + ByteArray::kHeaderSize; + Address end = byte_array->address() + kSize; + for (Address addr = start; addr < end; addr += kPointerSize) { + CHECK(!filter.IsValid(addr)); + } + } + } +} + +} // namespace internal +} // namespace v8