diff --git a/BUILD.gn b/BUILD.gn index a57c152050..e3152ace0f 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1581,9 +1581,6 @@ v8_source_set("v8_base") { "src/heap/incremental-marking-job.h", "src/heap/incremental-marking.cc", "src/heap/incremental-marking.h", - "src/heap/invalidated-slots-inl.h", - "src/heap/invalidated-slots.cc", - "src/heap/invalidated-slots.h", "src/heap/item-parallel-job.h", "src/heap/local-allocator.h", "src/heap/mark-compact-inl.h", diff --git a/src/heap/heap.cc b/src/heap/heap.cc index d40d608b3f..e8fb948675 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4607,17 +4607,10 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( } } -void Heap::NotifyObjectLayoutChange(HeapObject* object, int size, +void Heap::NotifyObjectLayoutChange(HeapObject* object, const DisallowHeapAllocation&) { - DCHECK(InOldSpace(object) || InNewSpace(object)); if (FLAG_incremental_marking && incremental_marking()->IsMarking()) { incremental_marking()->MarkBlackAndPush(object); - if (InOldSpace(object) && incremental_marking()->IsCompacting()) { - // The concurrent marker might have recorded slots for the object. - // Register this object as invalidated to filter out the slots. - MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); - chunk->RegisterObjectWithInvalidatedSlots(object, size); - } } #ifdef VERIFY_HEAP DCHECK(pending_layout_change_object_ == nullptr); diff --git a/src/heap/heap.h b/src/heap/heap.h index a3bee56d6a..d1ab66d192 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1186,8 +1186,7 @@ class Heap { // The runtime uses this function to notify potentially unsafe object layout // changes that require special synchronization with the concurrent marker. - // The old size is the size of the object before layout change. - void NotifyObjectLayoutChange(HeapObject* object, int old_size, + void NotifyObjectLayoutChange(HeapObject* object, const DisallowHeapAllocation&); #ifdef VERIFY_HEAP diff --git a/src/heap/invalidated-slots-inl.h b/src/heap/invalidated-slots-inl.h deleted file mode 100644 index 03103a61ef..0000000000 --- a/src/heap/invalidated-slots-inl.h +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_INVALIDATED_SLOTS_INL_H -#define V8_INVALIDATED_SLOTS_INL_H - -#include - -#include "src/allocation.h" -#include "src/heap/invalidated-slots.h" -#include "src/heap/spaces.h" -#include "src/objects-body-descriptors-inl.h" -#include "src/objects-body-descriptors.h" -#include "src/objects.h" - -namespace v8 { -namespace internal { - -bool InvalidatedSlotsFilter::IsValid(Address slot) { -#ifdef DEBUG - DCHECK_LT(slot, sentinel_); - // Slots must come in non-decreasing order. - DCHECK_LE(last_slot_, slot); - last_slot_ = slot; -#endif - while (slot >= invalidated_end_) { - ++iterator_; - if (iterator_ != iterator_end_) { - // Invalidated ranges must not overlap. - DCHECK_LE(invalidated_end_, iterator_->first->address()); - invalidated_start_ = iterator_->first->address(); - invalidated_end_ = invalidated_start_ + iterator_->second; - } else { - invalidated_start_ = sentinel_; - invalidated_end_ = sentinel_; - } - } - // Now the invalidated region ends after the slot. - if (slot < invalidated_start_) { - // The invalidated region starts after the slot. - return true; - } - // The invalidated region includes the slot. - // Ask the object if the slot is valid. - if (invalidated_object_ == nullptr) { - invalidated_object_ = HeapObject::FromAddress(invalidated_start_); - invalidated_object_size_ = - invalidated_object_->SizeFromMap(invalidated_object_->map()); - } - int offset = static_cast(slot - invalidated_start_); - DCHECK_GT(offset, 0); - DCHECK_LE(invalidated_object_size_, - static_cast(invalidated_end_ - invalidated_start_)); - - if (offset >= invalidated_object_size_) { - // A new object could have been allocated during evacuation in the free - // space outside the object. Since objects are not invalidated in GC pause - // we can return true here. - return true; - } - return invalidated_object_->IsValidSlot(offset); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_INVALIDATED_SLOTS_INL_H diff --git a/src/heap/invalidated-slots.cc b/src/heap/invalidated-slots.cc deleted file mode 100644 index 85430e58bc..0000000000 --- a/src/heap/invalidated-slots.cc +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/heap/invalidated-slots.h" -#include "src/heap/spaces.h" - -namespace v8 { -namespace internal { - -InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) { - DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr, - chunk->owner()->identity() == OLD_SPACE); - InvalidatedSlots* invalidated_slots = - chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_; - iterator_ = invalidated_slots->begin(); - iterator_end_ = invalidated_slots->end(); - sentinel_ = chunk->area_end(); - if (iterator_ != iterator_end_) { - invalidated_start_ = iterator_->first->address(); - invalidated_end_ = invalidated_start_ + iterator_->second; - } else { - invalidated_start_ = sentinel_; - invalidated_end_ = sentinel_; - } - // These values will be lazily set when needed. - invalidated_object_ = nullptr; - invalidated_object_size_ = 0; -#ifdef DEBUG - last_slot_ = chunk->area_start(); -#endif -} - -} // namespace internal -} // namespace v8 diff --git a/src/heap/invalidated-slots.h b/src/heap/invalidated-slots.h deleted file mode 100644 index 6aa4ded748..0000000000 --- a/src/heap/invalidated-slots.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_INVALIDATED_SLOTS_H -#define V8_INVALIDATED_SLOTS_H - -#include -#include - -#include "src/allocation.h" -#include "src/base/atomic-utils.h" -#include "src/base/bits.h" -#include "src/utils.h" - -namespace v8 { -namespace internal { - -class HeapObject; - -// This data structure stores objects that went through object layout change -// that potentially invalidates slots recorded concurrently. The second part -// of each element is the size of the corresponding object before the layout -// change. -using InvalidatedSlots = std::map; - -// This class provides IsValid predicate that takes into account the set -// of invalidated objects in the given memory chunk. -// The sequence of queried slot must be non-decreasing. This allows fast -// implementation with complexity O(m*log(m) + n), where -// m is the number of invalidated objects in the memory chunk. -// n is the number of IsValid queries. -class InvalidatedSlotsFilter { - public: - explicit InvalidatedSlotsFilter(MemoryChunk* chunk); - inline bool IsValid(Address slot); - - private: - InvalidatedSlots::const_iterator iterator_; - InvalidatedSlots::const_iterator iterator_end_; - Address sentinel_; - Address invalidated_start_; - Address invalidated_end_; - HeapObject* invalidated_object_; - int invalidated_object_size_; - InvalidatedSlots empty_; -#ifdef DEBUG - Address last_slot_; -#endif -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_INVALIDATED_SLOTS_H diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index a39a6ada17..9562e1fc34 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -21,8 +21,6 @@ #include "src/heap/concurrent-marking.h" #include "src/heap/gc-tracer.h" #include "src/heap/incremental-marking.h" -#include "src/heap/invalidated-slots-inl.h" -#include "src/heap/invalidated-slots.h" #include "src/heap/item-parallel-job.h" #include "src/heap/local-allocator.h" #include "src/heap/mark-compact-inl.h" @@ -3265,14 +3263,6 @@ void MarkCompactCollector::EvacuateEpilogue() { heap()->new_space()->set_age_mark(heap()->new_space()->top()); // Old space. Deallocate evacuated candidate pages. ReleaseEvacuationCandidates(); -#ifdef DEBUG - // Old-to-old slot sets must be empty after evacuation. - for (Page* p : *heap()->old_space()) { - DCHECK_NULL((p->slot_set())); - DCHECK_NULL((p->typed_slot_set())); - DCHECK_NULL(p->invalidated_slots()); - } -#endif } class Evacuator : public Malloced { @@ -4140,21 +4130,13 @@ class RememberedSetUpdatingItem : public UpdatingItem { } if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && (chunk_->slot_set() != nullptr)) { - InvalidatedSlotsFilter filter(chunk_); - RememberedSet::Iterate( - chunk_, - [&filter](Address slot) { - if (!filter.IsValid(slot)) return REMOVE_SLOT; - return UpdateSlot( - reinterpret_cast(slot)); - }, - SlotSet::PREFREE_EMPTY_BUCKETS); - } - if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && - chunk_->invalidated_slots() != nullptr) { - // The invalidated slots are not needed after old-to-old slots were - // processsed. - chunk_->ReleaseInvalidatedSlots(); + RememberedSet::Iterate( + chunk_, + [](Address slot) { + return UpdateSlot( + reinterpret_cast(slot)); + }, + SlotSet::PREFREE_EMPTY_BUCKETS); } } diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h index 6ebedae078..f44d45852b 100644 --- a/src/heap/remembered-set.h +++ b/src/heap/remembered-set.h @@ -120,8 +120,7 @@ class RememberedSet : public AllStatic { while ((chunk = it.next()) != nullptr) { SlotSet* slots = chunk->slot_set(); TypedSlotSet* typed_slots = chunk->typed_slot_set(); - if (slots != nullptr || typed_slots != nullptr || - chunk->invalidated_slots() != nullptr) { + if (slots != nullptr || typed_slots != nullptr) { callback(chunk); } } @@ -231,7 +230,6 @@ class RememberedSet : public AllStatic { while ((chunk = it.next()) != nullptr) { chunk->ReleaseSlotSet(); chunk->ReleaseTypedSlotSet(); - chunk->ReleaseInvalidatedSlots(); } } diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 7f2bd18b50..66196ea153 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -549,7 +549,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, nullptr); base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD], nullptr); - chunk->invalidated_slots_ = nullptr; chunk->skip_list_ = nullptr; chunk->progress_bar_ = 0; chunk->high_water_mark_.SetValue(static_cast(area_start - base)); @@ -1217,7 +1216,6 @@ void MemoryChunk::ReleaseAllocatedMemory() { ReleaseSlotSet(); ReleaseTypedSlotSet(); ReleaseTypedSlotSet(); - ReleaseInvalidatedSlots(); if (local_tracker_ != nullptr) ReleaseLocalTracker(); if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap(); } @@ -1288,28 +1286,6 @@ void MemoryChunk::ReleaseTypedSlotSet() { } } -InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() { - DCHECK_NULL(invalidated_slots_); - invalidated_slots_ = new InvalidatedSlots(); - return invalidated_slots_; -} - -void MemoryChunk::ReleaseInvalidatedSlots() { - if (invalidated_slots_) { - delete invalidated_slots_; - invalidated_slots_ = nullptr; - } -} - -void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object, - int size) { - if (invalidated_slots() == nullptr) { - AllocateInvalidatedSlots(); - } - int old_size = (*invalidated_slots())[object]; - (*invalidated_slots())[object] = std::max(old_size, size); -} - void MemoryChunk::AllocateLocalTracker() { DCHECK_NULL(local_tracker_); local_tracker_ = new LocalArrayBufferTracker(heap()); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 5e8c4822ab..6ccc25db95 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -6,7 +6,6 @@ #define V8_HEAP_SPACES_H_ #include -#include #include #include @@ -20,7 +19,6 @@ #include "src/flags.h" #include "src/globals.h" #include "src/heap/heap.h" -#include "src/heap/invalidated-slots.h" #include "src/heap/marking.h" #include "src/list.h" #include "src/objects.h" @@ -356,8 +354,7 @@ class MemoryChunk { + kIntptrSize // intptr_t live_byte_count_ + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array - + kPointerSize // InvalidatedSlots* invalidated_slots_ - + kPointerSize // SkipList* skip_list_ + + kPointerSize // SkipList* skip_list_ + kPointerSize // AtomicValue high_water_mark_ + kPointerSize // base::RecursiveMutex* mutex_ + kPointerSize // base::AtomicWord concurrent_sweeping_ @@ -475,11 +472,6 @@ class MemoryChunk { template void ReleaseTypedSlotSet(); - InvalidatedSlots* AllocateInvalidatedSlots(); - void ReleaseInvalidatedSlots(); - void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size); - InvalidatedSlots* invalidated_slots() { return invalidated_slots_; } - void AllocateLocalTracker(); void ReleaseLocalTracker(); inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; } @@ -639,7 +631,6 @@ class MemoryChunk { // is ceil(size() / kPageSize). SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; - InvalidatedSlots* invalidated_slots_; SkipList* skip_list_; diff --git a/src/objects.cc b/src/objects.cc index ffbcdca3fc..c4159b3195 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -2598,7 +2598,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool is_internalized = this->IsInternalizedString(); bool has_pointers = StringShape(this).IsIndirect(); if (has_pointers) { - heap->NotifyObjectLayoutChange(this, size, no_allocation); + heap->NotifyObjectLayoutChange(this, no_allocation); } // Morph the string to an external string by replacing the map and // reinitializing the fields. This won't work if the space the existing @@ -2674,7 +2674,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { bool has_pointers = StringShape(this).IsIndirect(); if (has_pointers) { - heap->NotifyObjectLayoutChange(this, size, no_allocation); + heap->NotifyObjectLayoutChange(this, no_allocation); } // Morph the string to an external string by replacing the map and @@ -3980,9 +3980,7 @@ void MigrateFastToFast(Handle object, Handle new_map) { Heap* heap = isolate->heap(); - int old_instance_size = old_map->instance_size(); - - heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); + heap->NotifyObjectLayoutChange(*object, no_allocation); // Copy (real) inobject properties. If necessary, stop at number_of_fields to // avoid overwriting |one_pointer_filler_map|. @@ -4016,7 +4014,7 @@ void MigrateFastToFast(Handle object, Handle new_map) { // Create filler object past the new instance size. int new_instance_size = new_map->instance_size(); - int instance_size_delta = old_instance_size - new_instance_size; + int instance_size_delta = old_map->instance_size() - new_instance_size; DCHECK(instance_size_delta >= 0); if (instance_size_delta > 0) { @@ -4098,12 +4096,11 @@ void MigrateFastToSlow(Handle object, Handle new_map, DisallowHeapAllocation no_allocation; Heap* heap = isolate->heap(); - int old_instance_size = map->instance_size(); - heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); + heap->NotifyObjectLayoutChange(*object, no_allocation); // Resize the object in the heap if necessary. int new_instance_size = new_map->instance_size(); - int instance_size_delta = old_instance_size - new_instance_size; + int instance_size_delta = map->instance_size() - new_instance_size; DCHECK(instance_size_delta >= 0); if (instance_size_delta > 0) { @@ -17086,11 +17083,11 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) { if (!string->IsInternalizedString()) { DisallowHeapAllocation no_gc; - int old_size = string->Size(); - isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc); + isolate->heap()->NotifyObjectLayoutChange(string, no_gc); bool one_byte = internalized->IsOneByteRepresentation(); Handle map = one_byte ? isolate->factory()->thin_one_byte_string_map() : isolate->factory()->thin_string_map(); + int old_size = string->Size(); DCHECK(old_size >= ThinString::kSize); string->synchronized_set_map(*map); ThinString* thin = ThinString::cast(string); diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc index 3f4a104d44..2b9d79ca48 100644 --- a/src/runtime/runtime-object.cc +++ b/src/runtime/runtime-object.cc @@ -160,8 +160,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle receiver, // Zap the property to avoid keeping objects alive. Zapping is not necessary // for properties stored in the descriptor array. if (details.location() == kField) { - isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(), - no_allocation); + isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation); Object* filler = isolate->heap()->one_pointer_filler_map(); FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index()); JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler); diff --git a/src/v8.gyp b/src/v8.gyp index 0b91565a54..07ee64d8e4 100644 --- a/src/v8.gyp +++ b/src/v8.gyp @@ -1027,9 +1027,6 @@ 'heap/incremental-marking-job.h', 'heap/incremental-marking.cc', 'heap/incremental-marking.h', - 'heap/invalidated-slots-inl.h', - 'heap/invalidated-slots.cc', - 'heap/invalidated-slots.h', 'heap/item-parallel-job.h', 'heap/local-allocator.h', 'heap/mark-compact-inl.h', diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn index 4cd9550d12..8b6404be95 100644 --- a/test/cctest/BUILD.gn +++ b/test/cctest/BUILD.gn @@ -78,7 +78,6 @@ v8_executable("cctest") { "heap/test-concurrent-marking.cc", "heap/test-heap.cc", "heap/test-incremental-marking.cc", - "heap/test-invalidated-slots.cc", "heap/test-lab.cc", "heap/test-mark-compact.cc", "heap/test-page-promotion.cc", diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index f383374c8e..ccf79fc450 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -96,7 +96,6 @@ 'heap/test-concurrent-marking.cc', 'heap/test-heap.cc', 'heap/test-incremental-marking.cc', - 'heap/test-invalidated-slots.cc', 'heap/test-lab.cc', 'heap/test-mark-compact.cc', 'heap/test-page-promotion.cc', diff --git a/test/cctest/heap/heap-tester.h b/test/cctest/heap/heap-tester.h index 6ceb5e69b3..099a231ff1 100644 --- a/test/cctest/heap/heap-tester.h +++ b/test/cctest/heap/heap-tester.h @@ -16,10 +16,6 @@ V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \ V(CompactionSpaceDivideMultiplePages) \ V(CompactionSpaceDivideSinglePage) \ - V(InvalidatedSlotsNoInvalidatedRanges) \ - V(InvalidatedSlotsSomeInvalidatedRanges) \ - V(InvalidatedSlotsAllInvalidatedRanges) \ - V(InvalidatedSlotsAfterTrimming) \ V(TestNewSpaceRefsInCopiedCode) \ V(GCFlags) \ V(MarkCompactCollector) \ @@ -70,15 +66,11 @@ class HeapTester { HEAP_TEST_METHODS(DECLARE_STATIC) #undef HEAP_TEST_METHODS - // test-alloc.cc + /* test-alloc.cc */ static AllocationResult AllocateAfterFailures(); static Handle TestAllocateAfterFailures(); - // test-invalidated-slots.cc - static Page* AllocateByteArraysOnPage(Heap* heap, - std::vector* byte_arrays); - - // test-api.cc + /* test-api.cc */ static void ResetWeakHandle(bool global_gc); }; diff --git a/test/cctest/heap/test-invalidated-slots.cc b/test/cctest/heap/test-invalidated-slots.cc deleted file mode 100644 index f542c19b99..0000000000 --- a/test/cctest/heap/test-invalidated-slots.cc +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include - -#include "src/v8.h" - -#include "src/heap/heap-inl.h" -#include "src/heap/heap.h" -#include "src/heap/invalidated-slots-inl.h" -#include "src/heap/invalidated-slots.h" -#include "test/cctest/cctest.h" -#include "test/cctest/heap/heap-tester.h" -#include "test/cctest/heap/heap-utils.h" - -namespace v8 { -namespace internal { - -Page* v8::internal::HeapTester::AllocateByteArraysOnPage( - Heap* heap, std::vector* byte_arrays) { - const int kLength = 256 - ByteArray::kHeaderSize; - const int kSize = ByteArray::SizeFor(kLength); - CHECK_EQ(kSize, 256); - Isolate* isolate = heap->isolate(); - PagedSpace* old_space = heap->old_space(); - Page* page; - // Fill a page with byte arrays. - { - AlwaysAllocateScope always_allocate(isolate); - heap::SimulateFullSpace(old_space); - ByteArray* byte_array; - CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array)); - byte_arrays->push_back(byte_array); - page = Page::FromAddress(byte_array->address()); - CHECK_EQ(page->area_size() % kSize, 0u); - size_t n = page->area_size() / kSize; - for (size_t i = 1; i < n; i++) { - CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array)); - byte_arrays->push_back(byte_array); - CHECK_EQ(page, Page::FromAddress(byte_array->address())); - } - } - CHECK_NULL(page->invalidated_slots()); - return page; -} - -HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) { - CcTest::InitializeVM(); - Heap* heap = CcTest::heap(); - std::vector byte_arrays; - Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); - InvalidatedSlotsFilter filter(page); - for (auto byte_array : byte_arrays) { - Address start = byte_array->address() + ByteArray::kHeaderSize; - Address end = byte_array->address() + byte_array->Size(); - for (Address addr = start; addr < end; addr += kPointerSize) { - CHECK(filter.IsValid(addr)); - } - } -} - -HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) { - CcTest::InitializeVM(); - Heap* heap = CcTest::heap(); - std::vector byte_arrays; - Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); - // Register every second byte arrays as invalidated. - for (size_t i = 0; i < byte_arrays.size(); i += 2) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], - byte_arrays[i]->Size()); - } - InvalidatedSlotsFilter filter(page); - for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray* byte_array = byte_arrays[i]; - Address start = byte_array->address() + ByteArray::kHeaderSize; - Address end = byte_array->address() + byte_array->Size(); - for (Address addr = start; addr < end; addr += kPointerSize) { - if (i % 2 == 0) { - CHECK(!filter.IsValid(addr)); - } else { - CHECK(filter.IsValid(addr)); - } - } - } -} - -HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { - CcTest::InitializeVM(); - Heap* heap = CcTest::heap(); - std::vector byte_arrays; - Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); - // Register the all byte arrays as invalidated. - for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], - byte_arrays[i]->Size()); - } - InvalidatedSlotsFilter filter(page); - for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray* byte_array = byte_arrays[i]; - Address start = byte_array->address() + ByteArray::kHeaderSize; - Address end = byte_array->address() + byte_array->Size(); - for (Address addr = start; addr < end; addr += kPointerSize) { - CHECK(!filter.IsValid(addr)); - } - } -} - -HEAP_TEST(InvalidatedSlotsAfterTrimming) { - CcTest::InitializeVM(); - Heap* heap = CcTest::heap(); - std::vector byte_arrays; - Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); - // Register the all byte arrays as invalidated. - for (size_t i = 0; i < byte_arrays.size(); i++) { - page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], - byte_arrays[i]->Size()); - } - // Trim byte arrays and check that the slots outside the byte arrays are - // considered valid. Free space outside invalidated object can be reused - // during evacuation for allocation of the evacuated objects. That can - // add new valid slots to evacuation candidates. - InvalidatedSlotsFilter filter(page); - for (size_t i = 0; i < byte_arrays.size(); i++) { - ByteArray* byte_array = byte_arrays[i]; - Address start = byte_array->address() + ByteArray::kHeaderSize; - Address end = byte_array->address() + byte_array->Size(); - heap->RightTrimFixedArray(byte_array, byte_array->length()); - for (Address addr = start; addr < end; addr += kPointerSize) { - CHECK(filter.IsValid(addr)); - } - } -} - -} // namespace internal -} // namespace v8