Revert "[heap] Remove size from invalidated slots"

This reverts commit 93063ade0f.

Reason for revert: Clusterfuzz found issue.

Original change's description:
> [heap] Remove size from invalidated slots
> 
> Slots are always valid inside an invalidated area when outside the
> respective object's current size. This allows us to remove the size
> from the InvalidatedSlots data structure.
> 
> This change was enabled by https://crrev.com/c/1771793.
> 
> Bug: v8:9454
> Change-Id: I2b5a7234d47227cb6ad8d67de20e9b5a2028ae83
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1773242
> Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#63510}

TBR=ulan@chromium.org,sigurds@chromium.org,tebbi@chromium.org,dinfuehr@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:9454
Change-Id: I7daf96cf50aaedd4dbdab48fd550182df94e54bf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1783106
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63535}
This commit is contained in:
Dominik Inführ 2019-09-03 20:39:19 +00:00 committed by Commit Bot
parent dc51c15be1
commit d4e168a3b5
15 changed files with 205 additions and 81 deletions

View File

@ -3712,7 +3712,8 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(slot->GetChildrenCount(), 2);
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Fill the property array field.
{
@ -3771,7 +3772,8 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
}
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) {

View File

@ -3387,19 +3387,19 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
void Heap::NotifyObjectLayoutChange(HeapObject object,
void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
}
}
if (MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {

View File

@ -895,7 +895,8 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
void NotifyObjectLayoutChange(HeapObject object,
// The old size is the size of the object before layout change.
void NotifyObjectLayoutChange(HeapObject object, int old_size,
const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP

View File

@ -24,39 +24,42 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first.address());
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
invalidated_object_ = HeapObject();
invalidated_object_size_ = 0;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true;
}
while (slot >= next_invalidated_start_) {
NextInvalidatedObject();
// The invalidated region includes the slot.
// Ask the object if the slot is valid.
if (invalidated_object_.is_null()) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
DCHECK(!invalidated_object_.IsFiller());
invalidated_object_size_ =
invalidated_object_.SizeFromMap(invalidated_object_.map());
}
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
if (invalidated_size_ == 0) {
invalidated_size_ = invalidated_object.Size();
}
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
if (offset < invalidated_size_)
return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
DCHECK_LE(invalidated_object_size_,
static_cast<int>(invalidated_end_ - invalidated_start_));
NextInvalidatedObject();
return true;
}
void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_;
invalidated_size_ = 0;
if (iterator_ == iterator_end_) {
next_invalidated_start_ = sentinel_;
} else {
next_invalidated_start_ = iterator_->address();
iterator_++;
if (offset >= invalidated_object_size_) {
return slots_in_free_space_are_valid_;
}
return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
@ -69,25 +72,35 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
if (iterator_ == iterator_end_) return;
// Ignore invalidated objects that start before free region
while (invalidated_start_ < free_start) {
// Ignore invalidated objects before free region
while (free_start >= invalidated_end_) {
++iterator_;
NextInvalidatedObject();
}
// Remove all invalidated objects that start within
// free region.
while (invalidated_start_ < free_end) {
iterator_ = invalidated_slots_->erase(iterator_);
// Loop here: Free region might contain multiple invalidated objects
while (free_end > invalidated_start_) {
// Case: Free region starts before current invalidated object
if (free_start <= invalidated_start_) {
iterator_ = invalidated_slots_->erase(iterator_);
} else {
// Case: Free region starts within current invalidated object
// (Can happen for right-trimmed objects)
iterator_++;
}
NextInvalidatedObject();
}
}
void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->address();
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}

View File

@ -3,35 +3,52 @@
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
// The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in the free space.
// Note that we cannot simply check for SweepingDone because pages in large
// object space are not swept but have SweepingDone() == true.
bool slots_in_free_space_are_valid =
chunk->SweepingDone() && chunk->InOldSpace();
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
slots_in_free_space_are_valid);
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
// Always treat these slots as valid for old-to-new for now. Invalid
// old-to-new slots are always cleared.
bool slots_in_free_space_are_valid = true;
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
slots_in_free_space_are_valid);
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
bool slots_in_free_space_are_valid) {
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(invalidated_slots != nullptr,
chunk->InOldSpace() || chunk->InLargeObjectSpace());
slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
// Invoke NextInvalidatedObject twice, to initialize
// invalidated_start_ to the first invalidated object and
// next_invalidated_object_ to the second one.
NextInvalidatedObject();
NextInvalidatedObject();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
invalidated_object_size_ = 0;
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
@ -52,7 +69,13 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end();
NextInvalidatedObject();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
#ifdef DEBUG
last_free_ = chunk->area_start();

View File

@ -5,7 +5,7 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
#define V8_HEAP_INVALIDATED_SLOTS_H_
#include <set>
#include <map>
#include <stack>
#include "src/base/atomic-utils.h"
@ -20,7 +20,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
using InvalidatedSlots = std::set<HeapObject, Object::Comparer>;
using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
@ -34,7 +34,8 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots);
InvalidatedSlots* invalidated_slots,
bool slots_in_free_space_are_valid);
inline bool IsValid(Address slot);
private:
@ -42,15 +43,14 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
Address next_invalidated_start_;
int invalidated_size_;
Address invalidated_end_;
HeapObject invalidated_object_;
int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
private:
inline void NextInvalidatedObject();
};
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
inline void NextInvalidatedObject();
#ifdef DEBUG

View File

@ -3420,6 +3420,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were
// processed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
@ -3438,6 +3445,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();

View File

@ -440,6 +440,13 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
SlotSet::KEEP_EMPTY_BUCKETS);
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *page->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were
// processed.
page->ReleaseInvalidatedSlots<OLD_TO_NEW>();

View File

@ -1484,12 +1484,15 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
int size);
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
int size);
template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int size) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
@ -1506,7 +1509,27 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
AllocateInvalidatedSlots<type>();
}
invalidated_slots<type>()->insert(object);
InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
if (it != invalidated_slots->end() && it->first == object) {
// object was already inserted
CHECK_LE(size, it->second);
return;
}
it = invalidated_slots->insert(it, std::make_pair(object, size));
// prevent overlapping invalidated objects for old-to-new.
if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
HeapObject pred = (--it)->first;
int pred_size = it->second;
DCHECK_LT(pred.address(), object.address());
if (pred.address() + pred_size > object.address()) {
it->second = static_cast<int>(object.address() - pred.address());
}
}
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
@ -1523,6 +1546,27 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end();
}
template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject old_start, HeapObject new_start);
template <RememberedSetType type>
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
auto it = invalidated_slots<type>()->find(old_start);
if (it != invalidated_slots<type>()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start.address() - old_start.address());
invalidated_slots<type>()->erase(it);
(*invalidated_slots<type>())[new_start] = old_size - delta;
}
}
}
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;

View File

@ -729,7 +729,12 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
int size);
// Updates invalidated_slots after array left-trimming.
template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>

View File

@ -2776,7 +2776,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation);
int old_instance_size = old_map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
@ -2807,7 +2809,6 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
object->SetProperties(*array);
// Create filler object past the new instance size.
int old_instance_size = old_map->instance_size();
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0);
@ -2890,10 +2891,10 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation);
int old_instance_size = map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Resize the object in the heap if necessary.
int old_instance_size = map->instance_size();
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0);

View File

@ -613,7 +613,8 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data);
// Swap the map.
heap->NotifyObjectLayoutChange(data, no_gc);
heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize,
no_gc);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==

View File

@ -113,7 +113,7 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size();
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc);
isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
@ -158,7 +158,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@ -232,7 +232,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing

View File

@ -132,7 +132,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array.
if (details.location() == kField) {
DisallowHeapAllocation no_allocation;
isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(
*receiver, receiver_map->instance_size(), no_allocation);
FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property.

View File

@ -70,7 +70,8 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
@ -94,7 +95,8 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
@ -115,7 +117,8 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept.
@ -142,7 +145,8 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation
// candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
}
// All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
@ -165,7 +169,8 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
@ -354,7 +359,8 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
}
// Mark full page as free
@ -373,7 +379,8 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
}
// Mark each object as free on page
@ -398,9 +405,11 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
CHECK_GT(byte_arrays.size(), 1);
ByteArray& invalidated = byte_arrays[1];
int invalidated_size = invalidated.Size();
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
invalidated_size);
// Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
@ -409,6 +418,8 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
// After cleanup the invalidated object should be smaller
InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
CHECK_GE((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
invalidated.Size());
CHECK_EQ(invalidated_slots->size(), 1);
}