[heap] Reland move slot filtering logic into sweeper.
BUG=chromium:648568 Review-Url: https://codereview.chromium.org/2428493003 Cr-Commit-Position: refs/heads/master@{#40393}
This commit is contained in:
parent
5b6e391354
commit
60cb6013d4
@ -65,6 +65,13 @@ class NoBarrierAtomicValue {
|
||||
return reinterpret_cast<base::NoBarrierAtomicValue<T>*>(address);
|
||||
}
|
||||
|
||||
V8_INLINE bool TrySetValue(T old_value, T new_value) {
|
||||
return base::NoBarrier_CompareAndSwap(
|
||||
&value_, cast_helper<T>::to_storage_type(old_value),
|
||||
cast_helper<T>::to_storage_type(new_value)) ==
|
||||
cast_helper<T>::to_storage_type(old_value);
|
||||
}
|
||||
|
||||
V8_INLINE T Value() const {
|
||||
return cast_helper<T>::to_return_type(base::NoBarrier_Load(&value_));
|
||||
}
|
||||
|
@ -3320,9 +3320,10 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
Page* p, FreeListRebuildingMode free_list_mode,
|
||||
FreeSpaceTreatmentMode free_space_mode) {
|
||||
Space* space = p->owner();
|
||||
AllocationSpace identity = space->identity();
|
||||
DCHECK_NOT_NULL(space);
|
||||
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
|
||||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
|
||||
DCHECK(free_list_mode == IGNORE_FREE_LIST || identity == OLD_SPACE ||
|
||||
identity == CODE_SPACE || identity == MAP_SPACE);
|
||||
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
|
||||
|
||||
// Before we sweep objects on the page, we free dead array buffers which
|
||||
@ -3351,6 +3352,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
|
||||
LiveObjectIterator<kBlackObjects> it(p);
|
||||
HeapObject* object = NULL;
|
||||
bool clear_slots =
|
||||
p->old_to_new_slots() && (identity == OLD_SPACE || identity == MAP_SPACE);
|
||||
while ((object = it.Next()) != NULL) {
|
||||
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
|
||||
Address free_end = object->address();
|
||||
@ -3368,6 +3371,11 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
|
||||
if (clear_slots) {
|
||||
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
|
||||
SlotSet::KEEP_EMPTY_BUCKETS);
|
||||
}
|
||||
}
|
||||
Map* map = object->synchronized_map();
|
||||
int size = object->SizeFromMap(map);
|
||||
@ -3383,9 +3391,6 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
free_start = free_end + size;
|
||||
}
|
||||
|
||||
// Clear the mark bits of that page and reset live bytes count.
|
||||
p->ClearLiveness();
|
||||
|
||||
if (free_start != p->area_end()) {
|
||||
CHECK_GT(p->area_end(), free_start);
|
||||
size_t size = static_cast<size_t>(p->area_end() - free_start);
|
||||
@ -3400,7 +3405,16 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
||||
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
|
||||
ClearRecordedSlots::kNo);
|
||||
}
|
||||
|
||||
if (clear_slots) {
|
||||
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
|
||||
SlotSet::KEEP_EMPTY_BUCKETS);
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the mark bits of that page and reset live bytes count.
|
||||
p->ClearLiveness();
|
||||
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
|
||||
if (free_list_mode == IGNORE_FREE_LIST) return 0;
|
||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
||||
@ -3615,6 +3629,11 @@ class PointerUpdateJobTraits {
|
||||
|
||||
static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
|
||||
Address slot_address) {
|
||||
// There may be concurrent action on slots in dead objects. Concurrent
|
||||
// sweeper threads may overwrite the slot content with a free space object.
|
||||
// Moreover, the pointed-to object may also get concurrently overwritten
|
||||
// with a free space object. The sweeper always gets priority performing
|
||||
// these writes.
|
||||
base::NoBarrierAtomicValue<Object*>* slot =
|
||||
base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
|
||||
Object* slot_reference = slot->Value();
|
||||
@ -3630,8 +3649,10 @@ class PointerUpdateJobTraits {
|
||||
if (map_word.ToRawValue() < Page::kPageSize) {
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
// Update the corresponding slot.
|
||||
slot->SetValue(map_word.ToForwardingAddress());
|
||||
// Update the corresponding slot only if the slot content did not
|
||||
// change in the meantime. This may happen when a concurrent sweeper
|
||||
// thread stored a free space object at that memory location.
|
||||
slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
|
||||
}
|
||||
// If the object was in from space before and is after executing the
|
||||
// callback in to space, the object is still live.
|
||||
@ -3816,9 +3837,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
|
||||
if (identity == NEW_SPACE) {
|
||||
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
|
||||
} else {
|
||||
if (identity == OLD_SPACE || identity == MAP_SPACE) {
|
||||
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap_, page);
|
||||
} else {
|
||||
if (identity == CODE_SPACE) {
|
||||
RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(heap_, page);
|
||||
}
|
||||
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
|
||||
|
@ -14,23 +14,6 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template <PointerDirection direction>
|
||||
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap,
|
||||
MemoryChunk* chunk) {
|
||||
STATIC_ASSERT(direction == OLD_TO_NEW);
|
||||
DCHECK(chunk->owner()->identity() == OLD_SPACE ||
|
||||
chunk->owner()->identity() == MAP_SPACE);
|
||||
SlotSet* slots = GetSlotSet(chunk);
|
||||
if (slots != nullptr) {
|
||||
slots->Iterate(
|
||||
[heap, chunk](Address addr) {
|
||||
Object** slot = reinterpret_cast<Object**>(addr);
|
||||
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
|
||||
},
|
||||
SlotSet::KEEP_EMPTY_BUCKETS);
|
||||
}
|
||||
}
|
||||
|
||||
template <PointerDirection direction>
|
||||
void RememberedSet<direction>::ClearInvalidTypedSlots(Heap* heap,
|
||||
MemoryChunk* chunk) {
|
||||
@ -60,8 +43,6 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
|
||||
chunk, reinterpret_cast<Address>(slot));
|
||||
}
|
||||
|
||||
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap,
|
||||
MemoryChunk* chunk);
|
||||
template void RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(
|
||||
Heap* heap, MemoryChunk* chunk);
|
||||
|
||||
|
@ -202,13 +202,8 @@ class RememberedSet {
|
||||
// slots that are not part of live objects anymore. This method must be
|
||||
// called after marking, when the whole transitive closure is known and
|
||||
// must be called before sweeping when mark bits are still intact.
|
||||
static void ClearInvalidSlots(Heap* heap);
|
||||
|
||||
static void ClearInvalidSlots(Heap* heap, MemoryChunk* chunk);
|
||||
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
|
||||
|
||||
static void VerifyValidSlots(Heap* heap);
|
||||
|
||||
private:
|
||||
static SlotSet* GetSlotSet(MemoryChunk* chunk) {
|
||||
if (direction == OLD_TO_OLD) {
|
||||
|
@ -80,15 +80,6 @@ class SlotSet : public Malloced {
|
||||
}
|
||||
}
|
||||
|
||||
void PreFreeEmptyBucket(int bucket_index) {
|
||||
base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
|
||||
if (bucket_ptr != nullptr) {
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
|
||||
to_be_freed_buckets_.push(bucket_ptr);
|
||||
bucket[bucket_index].SetValue(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
// The slot offsets specify a range of slots at addresses:
|
||||
// [page_start_ + start_offset ... page_start_ + end_offset).
|
||||
void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
|
||||
@ -108,12 +99,10 @@ class SlotSet : public Malloced {
|
||||
int current_cell = start_cell;
|
||||
ClearCell(current_bucket, current_cell, ~start_mask);
|
||||
current_cell++;
|
||||
base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
|
||||
if (current_bucket < end_bucket) {
|
||||
if (bucket[current_bucket].Value() != nullptr) {
|
||||
while (current_cell < kCellsPerBucket) {
|
||||
bucket[current_bucket].Value()[current_cell].SetValue(0);
|
||||
current_cell++;
|
||||
}
|
||||
if (bucket_ptr != nullptr) {
|
||||
ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
|
||||
}
|
||||
// The rest of the current bucket is cleared.
|
||||
// Move on to the next bucket.
|
||||
@ -127,17 +116,23 @@ class SlotSet : public Malloced {
|
||||
PreFreeEmptyBucket(current_bucket);
|
||||
} else if (mode == FREE_EMPTY_BUCKETS) {
|
||||
ReleaseBucket(current_bucket);
|
||||
} else {
|
||||
DCHECK(mode == KEEP_EMPTY_BUCKETS);
|
||||
bucket_ptr = bucket[current_bucket].Value();
|
||||
if (bucket_ptr) {
|
||||
ClearBucket(bucket_ptr, 0, kCellsPerBucket);
|
||||
}
|
||||
}
|
||||
current_bucket++;
|
||||
}
|
||||
// All buckets between start_bucket and end_bucket are cleared.
|
||||
bucket_ptr = bucket[current_bucket].Value();
|
||||
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
|
||||
if (current_bucket == kBuckets ||
|
||||
bucket[current_bucket].Value() == nullptr) {
|
||||
if (current_bucket == kBuckets || bucket_ptr == nullptr) {
|
||||
return;
|
||||
}
|
||||
while (current_cell < end_cell) {
|
||||
bucket[current_bucket].Value()[current_cell].SetValue(0);
|
||||
bucket_ptr[current_cell].SetValue(0);
|
||||
current_cell++;
|
||||
}
|
||||
// All cells between start_cell and end_cell are cleared.
|
||||
@ -242,6 +237,26 @@ class SlotSet : public Malloced {
|
||||
return result;
|
||||
}
|
||||
|
||||
void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
|
||||
int end_cell) {
|
||||
DCHECK_GE(start_cell, 0);
|
||||
DCHECK_LE(end_cell, kCellsPerBucket);
|
||||
int current_cell = start_cell;
|
||||
while (current_cell < kCellsPerBucket) {
|
||||
bucket[current_cell].SetValue(0);
|
||||
current_cell++;
|
||||
}
|
||||
}
|
||||
|
||||
void PreFreeEmptyBucket(int bucket_index) {
|
||||
base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
|
||||
if (bucket_ptr != nullptr) {
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
|
||||
to_be_freed_buckets_.push(bucket_ptr);
|
||||
bucket[bucket_index].SetValue(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void ReleaseBucket(int bucket_index) {
|
||||
DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
|
||||
bucket[bucket_index].SetValue(nullptr);
|
||||
|
@ -101,10 +101,12 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
|
||||
set.SetPageStart(0);
|
||||
uint32_t first = start == 0 ? 0 : start - kPointerSize;
|
||||
uint32_t last = end == Page::kPageSize ? end - kPointerSize : end;
|
||||
for (const auto mode :
|
||||
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
|
||||
for (uint32_t i = first; i <= last; i += kPointerSize) {
|
||||
set.Insert(i);
|
||||
}
|
||||
set.RemoveRange(start, end, SlotSet::FREE_EMPTY_BUCKETS);
|
||||
set.RemoveRange(start, end, mode);
|
||||
if (first != start) {
|
||||
EXPECT_TRUE(set.Lookup(first));
|
||||
}
|
||||
@ -114,6 +116,7 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
|
||||
for (uint32_t i = start; i < end; i += kPointerSize) {
|
||||
EXPECT_FALSE(set.Lookup(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SlotSet, RemoveRange) {
|
||||
@ -134,11 +137,14 @@ TEST(SlotSet, RemoveRange) {
|
||||
}
|
||||
SlotSet set;
|
||||
set.SetPageStart(0);
|
||||
for (const auto mode :
|
||||
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
|
||||
set.Insert(Page::kPageSize / 2);
|
||||
set.RemoveRange(0, Page::kPageSize, SlotSet::FREE_EMPTY_BUCKETS);
|
||||
set.RemoveRange(0, Page::kPageSize, mode);
|
||||
for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
|
||||
EXPECT_FALSE(set.Lookup(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TypedSlotSet, Iterate) {
|
||||
|
Loading…
Reference in New Issue
Block a user