Revert "[heap] Add mechanism for tracking invalidated slots per memory chunk."

This reverts commit 7a5a777c97.

Reason for revert: crashing in test-api

Original change's description:
> [heap] Add mechanism for tracking invalidated slots per memory chunk.
> 
> For correct slots recording in concurrent marker, we need to resolve
> the race that happens when
> 1) the mutator is invalidating slots for double unboxing or string
> conversions
> 2) and the concurrent marker is recording these slots.
> 
> This patch adds a data-structure for tracking the invalidated objects.
> Thus we can allow the concurrent marker to record slots without
> worrying about clearing them. During old-to-old pointer updating phase
> we re-check all slots that belong to the invalidated objects.
> 
> BUG=chromium:694255
> 
> Change-Id: Ifc3d82918cd3b96e5a5fb7125691626a56f4ab83
> Reviewed-on: https://chromium-review.googlesource.com/591810
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#47049}

TBR=ulan@chromium.org,mlippautz@chromium.org

Change-Id: I7f4f8e8cb027b921a82e9c0a0623536af02581fb
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:694255
Reviewed-on: https://chromium-review.googlesource.com/595994
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47052}
This commit is contained in:
Ulan Degenbaev 2017-08-01 18:13:20 +00:00 committed by Commit Bot
parent 5337b905ce
commit c59b81d7b8
17 changed files with 20 additions and 341 deletions

View File

@ -1581,9 +1581,6 @@ v8_source_set("v8_base") {
"src/heap/incremental-marking-job.h", "src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc", "src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h", "src/heap/incremental-marking.h",
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.h", "src/heap/item-parallel-job.h",
"src/heap/local-allocator.h", "src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h", "src/heap/mark-compact-inl.h",

View File

@ -4605,17 +4605,10 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
} }
} }
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size, void Heap::NotifyObjectLayoutChange(HeapObject* object,
const DisallowHeapAllocation&) { const DisallowHeapAllocation&) {
DCHECK(InOldSpace(object) || InNewSpace(object));
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) { if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object); incremental_marking()->MarkBlackAndPush(object);
if (InOldSpace(object)) {
// The concurrent marker might have recorded slots for the object.
// Register this object as invalidated to filter out the slots.
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
chunk->RegisterObjectWithInvalidatedSlots(object, size);
}
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr); DCHECK(pending_layout_change_object_ == nullptr);

View File

@ -1186,8 +1186,7 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout // The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker. // changes that require special synchronization with the concurrent marker.
// The old size is the size of the object before layout change. void NotifyObjectLayoutChange(HeapObject* object,
void NotifyObjectLayoutChange(HeapObject* object, int old_size,
const DisallowHeapAllocation&); const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP

View File

@ -1,61 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INVALIDATED_SLOTS_INL_H
#define V8_INVALIDATED_SLOTS_INL_H
#include <map>
#include "src/allocation.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects-body-descriptors.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
bool InvalidatedSlotsFilter::IsValid(Address slot) {
#ifdef DEBUG
DCHECK_LT(slot, sentinel_);
// Slots must come in non-decreasing order.
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first->address());
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true;
}
// The invalidated region includes the slot.
// Ask the object if the slot is valid.
if (invalidated_object_ == nullptr) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
invalidated_object_size_ =
invalidated_object_->SizeFromMap(invalidated_object_->map());
}
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
return offset < invalidated_object_size_ &&
invalidated_object_->IsValidSlot(offset);
}
} // namespace internal
} // namespace v8
#endif // V8_INVALIDATED_SLOTS_INL_H

View File

@ -1,35 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
chunk->owner()->identity() == OLD_SPACE);
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
invalidated_object_ = nullptr;
invalidated_object_size_ = 0;
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
}
} // namespace internal
} // namespace v8

View File

@ -1,55 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INVALIDATED_SLOTS_H
#define V8_INVALIDATED_SLOTS_H
#include <map>
#include <stack>
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
class HeapObject;
// This data structure stores objects that went through object layout change
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
using InvalidatedSlots = std::map<HeapObject*, int>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
// The sequence of queried slot must be non-decreasing. This allows fast
// implementation with complexity O(m*log(m) + n), where
// m is the number of invalidated objects in the memory chunk.
// n is the number of IsValid queries.
class InvalidatedSlotsFilter {
public:
explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
inline bool IsValid(Address slot);
private:
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
HeapObject* invalidated_object_;
int invalidated_object_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_INVALIDATED_SLOTS_H

View File

@ -21,8 +21,6 @@
#include "src/heap/concurrent-marking.h" #include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/item-parallel-job.h" #include "src/heap/item-parallel-job.h"
#include "src/heap/local-allocator.h" #include "src/heap/local-allocator.h"
#include "src/heap/mark-compact-inl.h" #include "src/heap/mark-compact-inl.h"
@ -3265,14 +3263,6 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top()); heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Old space. Deallocate evacuated candidate pages. // Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates(); ReleaseEvacuationCandidates();
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL(p->invalidated_slots());
}
#endif
} }
class Evacuator : public Malloced { class Evacuator : public Malloced {
@ -4140,21 +4130,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} }
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) { (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
InvalidatedSlotsFilter filter(chunk_); RememberedSet<OLD_TO_OLD>::Iterate(
RememberedSet<OLD_TO_OLD>::Iterate( chunk_,
chunk_, [](Address slot) {
[&filter](Address slot) { return UpdateSlot<AccessMode::NON_ATOMIC>(
if (!filter.IsValid(slot)) return REMOVE_SLOT; reinterpret_cast<Object**>(slot));
return UpdateSlot<AccessMode::NON_ATOMIC>( },
reinterpret_cast<Object**>(slot)); SlotSet::PREFREE_EMPTY_BUCKETS);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots() != nullptr) {
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots();
} }
} }

View File

@ -120,8 +120,7 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) { while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>(); SlotSet* slots = chunk->slot_set<type>();
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>(); TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || typed_slots != nullptr || if (slots != nullptr || typed_slots != nullptr) {
chunk->invalidated_slots() != nullptr) {
callback(chunk); callback(chunk);
} }
} }
@ -231,7 +230,6 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) { while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>(); chunk->ReleaseSlotSet<OLD_TO_OLD>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>(); chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots();
} }
} }

View File

@ -549,7 +549,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
nullptr); nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD], base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr); nullptr);
chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr; chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
@ -1217,7 +1216,6 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseSlotSet<OLD_TO_OLD>(); ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>(); ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>(); ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker(); if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap(); if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
} }
@ -1288,28 +1286,6 @@ void MemoryChunk::ReleaseTypedSlotSet() {
} }
} }
InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
DCHECK_NULL(invalidated_slots_);
invalidated_slots_ = new InvalidatedSlots();
return invalidated_slots_;
}
void MemoryChunk::ReleaseInvalidatedSlots() {
if (invalidated_slots_) {
delete invalidated_slots_;
invalidated_slots_ = nullptr;
}
}
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
int size) {
if (invalidated_slots() == nullptr) {
AllocateInvalidatedSlots();
}
int old_size = (*invalidated_slots())[object];
(*invalidated_slots())[object] = std::max(old_size, size);
}
void MemoryChunk::AllocateLocalTracker() { void MemoryChunk::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_); DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(heap()); local_tracker_ = new LocalArrayBufferTracker(heap());

View File

@ -6,7 +6,6 @@
#define V8_HEAP_SPACES_H_ #define V8_HEAP_SPACES_H_
#include <list> #include <list>
#include <map>
#include <memory> #include <memory>
#include <unordered_set> #include <unordered_set>
@ -20,7 +19,6 @@
#include "src/flags.h" #include "src/flags.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
#include "src/list.h" #include "src/list.h"
#include "src/objects.h" #include "src/objects.h"
@ -356,8 +354,7 @@ class MemoryChunk {
+ kIntptrSize // intptr_t live_byte_count_ + kIntptrSize // intptr_t live_byte_count_
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // InvalidatedSlots* invalidated_slots_ + kPointerSize // SkipList* skip_list_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_ + kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::RecursiveMutex* mutex_ + kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_ + kPointerSize // base::AtomicWord concurrent_sweeping_
@ -475,11 +472,6 @@ class MemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void ReleaseTypedSlotSet(); void ReleaseTypedSlotSet();
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void AllocateLocalTracker(); void AllocateLocalTracker();
void ReleaseLocalTracker(); void ReleaseLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; } inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
@ -639,7 +631,6 @@ class MemoryChunk {
// is ceil(size() / kPageSize). // is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_;
SkipList* skip_list_; SkipList* skip_list_;

View File

@ -2598,7 +2598,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_internalized = this->IsInternalizedString(); bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect(); bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) { if (has_pointers) {
heap->NotifyObjectLayoutChange(this, size, no_allocation); heap->NotifyObjectLayoutChange(this, no_allocation);
} }
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing // reinitializing the fields. This won't work if the space the existing
@ -2674,7 +2674,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(this).IsIndirect(); bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) { if (has_pointers) {
heap->NotifyObjectLayoutChange(this, size, no_allocation); heap->NotifyObjectLayoutChange(this, no_allocation);
} }
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
@ -3980,9 +3980,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
int old_instance_size = old_map->instance_size(); heap->NotifyObjectLayoutChange(*object, no_allocation);
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to // Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|. // avoid overwriting |one_pointer_filler_map|.
@ -4016,7 +4014,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Create filler object past the new instance size. // Create filler object past the new instance size.
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size; int instance_size_delta = old_map->instance_size() - new_instance_size;
DCHECK(instance_size_delta >= 0); DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) { if (instance_size_delta > 0) {
@ -4098,12 +4096,11 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
int old_instance_size = map->instance_size(); heap->NotifyObjectLayoutChange(*object, no_allocation);
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Resize the object in the heap if necessary. // Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size; int instance_size_delta = map->instance_size() - new_instance_size;
DCHECK(instance_size_delta >= 0); DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) { if (instance_size_delta > 0) {
@ -17074,11 +17071,11 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
if (!string->IsInternalizedString()) { if (!string->IsInternalizedString()) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
int old_size = string->Size(); isolate->heap()->NotifyObjectLayoutChange(string, no_gc);
isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
bool one_byte = internalized->IsOneByteRepresentation(); bool one_byte = internalized->IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map() Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map(); : isolate->factory()->thin_string_map();
int old_size = string->Size();
DCHECK(old_size >= ThinString::kSize); DCHECK(old_size >= ThinString::kSize);
string->synchronized_set_map(*map); string->synchronized_set_map(*map);
ThinString* thin = ThinString::cast(string); ThinString* thin = ThinString::cast(string);

View File

@ -160,8 +160,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Zap the property to avoid keeping objects alive. Zapping is not necessary // Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array. // for properties stored in the descriptor array.
if (details.location() == kField) { if (details.location() == kField) {
isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(), isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation);
no_allocation);
Object* filler = isolate->heap()->one_pointer_filler_map(); Object* filler = isolate->heap()->one_pointer_filler_map();
FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index()); FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler); JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);

View File

@ -1027,9 +1027,6 @@
'heap/incremental-marking-job.h', 'heap/incremental-marking-job.h',
'heap/incremental-marking.cc', 'heap/incremental-marking.cc',
'heap/incremental-marking.h', 'heap/incremental-marking.h',
'heap/invalidated-slots-inl.h',
'heap/invalidated-slots.cc',
'heap/invalidated-slots.h',
'heap/item-parallel-job.h', 'heap/item-parallel-job.h',
'heap/local-allocator.h', 'heap/local-allocator.h',
'heap/mark-compact-inl.h', 'heap/mark-compact-inl.h',

View File

@ -78,7 +78,6 @@ v8_executable("cctest") {
"heap/test-concurrent-marking.cc", "heap/test-concurrent-marking.cc",
"heap/test-heap.cc", "heap/test-heap.cc",
"heap/test-incremental-marking.cc", "heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
"heap/test-lab.cc", "heap/test-lab.cc",
"heap/test-mark-compact.cc", "heap/test-mark-compact.cc",
"heap/test-page-promotion.cc", "heap/test-page-promotion.cc",

View File

@ -96,7 +96,6 @@
'heap/test-concurrent-marking.cc', 'heap/test-concurrent-marking.cc',
'heap/test-heap.cc', 'heap/test-heap.cc',
'heap/test-incremental-marking.cc', 'heap/test-incremental-marking.cc',
'heap/test-invalidated-slots.cc',
'heap/test-lab.cc', 'heap/test-lab.cc',
'heap/test-mark-compact.cc', 'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc', 'heap/test-page-promotion.cc',

View File

@ -16,7 +16,6 @@
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \ V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \ V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \ V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlots) \
V(TestNewSpaceRefsInCopiedCode) \ V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \ V(GCFlags) \
V(MarkCompactCollector) \ V(MarkCompactCollector) \

View File

@ -1,96 +0,0 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdlib.h>
#include "src/v8.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
HEAP_TEST(InvalidatedSlots) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
PagedSpace* old_space = heap->old_space();
Page* page;
std::vector<ByteArray*> byte_arrays;
const int kLength = 256 - ByteArray::kHeaderSize;
const int kSize = ByteArray::SizeFor(kLength);
CHECK_EQ(kSize, 256);
// Fill a page with byte arrays.
{
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
ByteArray* byte_array;
CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
byte_arrays.push_back(byte_array);
page = Page::FromAddress(byte_array->address());
CHECK_EQ(page->area_size() % kSize, 0u);
size_t n = page->area_size() / kSize;
for (size_t i = 1; i < n; i++) {
CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
byte_arrays.push_back(byte_array);
CHECK_EQ(page, Page::FromAddress(byte_array->address()));
}
}
CHECK_NULL(page->invalidated_slots());
{
// Without invalidated slots on the page, the filter considers
// all slots as valid.
InvalidatedSlotsFilter filter(page);
for (auto byte_array : byte_arrays) {
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + kSize;
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(filter.IsValid(addr));
}
}
}
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize);
}
{
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + kSize;
for (Address addr = start; addr < end; addr += kPointerSize) {
if (i % 2 == 0) {
CHECK(!filter.IsValid(addr));
} else {
CHECK(filter.IsValid(addr));
}
}
}
}
// Register the remaining byte arrays as invalidated.
for (size_t i = 1; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize);
}
{
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + kSize;
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(!filter.IsValid(addr));
}
}
}
}
} // namespace internal
} // namespace v8