Reland of Remove slots that point to unboxed doubles from the StoreBuffer/SlotsBuffer.
The problem is that tagged slot could become a double slot after migrating of an object to another map with "shifted" fields (for example as a result of generalizing immutable data property to a data field). This CL also adds useful machinery that helps triggering incremental write barriers. BUG=chromium:454297, chromium:465273 LOG=Y Review URL: https://codereview.chromium.org/988363002 Cr-Commit-Position: refs/heads/master@{#27141}
This commit is contained in:
parent
2bd426cdd5
commit
6d0677d845
@ -763,6 +763,11 @@ DEFINE_BOOL(stress_compaction, false,
|
||||
"stress the GC compactor to flush out bugs (implies "
|
||||
"--force_marking_deque_overflows)")
|
||||
|
||||
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
|
||||
"Test mode only flag. It allows an unit test to select evacuation "
|
||||
"candidates pages (requires --stress_compaction).")
|
||||
|
||||
|
||||
//
|
||||
// Dev shell flags
|
||||
//
|
||||
|
@ -721,6 +721,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
|
||||
int count = 0;
|
||||
int fragmentation = 0;
|
||||
int page_number = 0;
|
||||
Candidate* least = NULL;
|
||||
|
||||
PageIterator it(space);
|
||||
@ -735,9 +736,16 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
CHECK(p->slots_buffer() == NULL);
|
||||
|
||||
if (FLAG_stress_compaction) {
|
||||
if (FLAG_manual_evacuation_candidates_selection) {
|
||||
if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
|
||||
p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||
fragmentation = 1;
|
||||
}
|
||||
} else {
|
||||
unsigned int counter = space->heap()->ms_count();
|
||||
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
|
||||
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
|
||||
page_number++;
|
||||
}
|
||||
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
|
||||
// Don't try to release too many pages.
|
||||
if (estimated_release >= over_reserved) {
|
||||
@ -4499,6 +4507,21 @@ bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
|
||||
}
|
||||
|
||||
|
||||
static Object* g_smi_slot = NULL;
|
||||
|
||||
|
||||
void SlotsBuffer::RemoveSlot(SlotsBuffer* buffer, ObjectSlot slot_to_remove) {
|
||||
DCHECK_EQ(Smi::FromInt(0), g_smi_slot);
|
||||
DCHECK(!IsTypedSlot(slot_to_remove));
|
||||
while (buffer != NULL) {
|
||||
ObjectSlot* slots = buffer->slots_;
|
||||
// Remove entries by replacing them with a dummy slot containing a smi.
|
||||
std::replace(&slots[0], &slots[buffer->idx_], slot_to_remove, &g_smi_slot);
|
||||
buffer = buffer->next_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
|
||||
if (RelocInfo::IsCodeTarget(rmode)) {
|
||||
return SlotsBuffer::CODE_TARGET_SLOT;
|
||||
|
@ -360,6 +360,9 @@ class SlotsBuffer {
|
||||
SlotsBuffer** buffer_address, SlotType type, Address addr,
|
||||
AdditionMode mode);
|
||||
|
||||
// Removes all occurrences of given slot from the chain of slots buffers.
|
||||
static void RemoveSlot(SlotsBuffer* buffer, ObjectSlot slot_to_remove);
|
||||
|
||||
static const int kNumberOfElements = 1021;
|
||||
|
||||
private:
|
||||
|
@ -385,6 +385,12 @@ class MemoryChunk {
|
||||
// to grey transition is performed in the value.
|
||||
HAS_PROGRESS_BAR,
|
||||
|
||||
// This flag is intended to be used for testing. Works only when both
|
||||
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
|
||||
// are set. It forces the page to become an evacuation candidate at next
|
||||
// candidates selection cycle.
|
||||
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
|
||||
|
||||
// Last flag, keep at bottom.
|
||||
NUM_MEMORY_CHUNK_FLAGS
|
||||
};
|
||||
|
@ -247,6 +247,55 @@ void StoreBuffer::Filter(int flag) {
|
||||
}
|
||||
|
||||
|
||||
void StoreBuffer::RemoveSlots(Address start_address, Address end_address) {
|
||||
struct IsValueInRangePredicate {
|
||||
Address start_address_;
|
||||
Address end_address_;
|
||||
|
||||
IsValueInRangePredicate(Address start_address, Address end_address)
|
||||
: start_address_(start_address), end_address_(end_address) {}
|
||||
|
||||
bool operator()(Address addr) {
|
||||
return start_address_ <= addr && addr < end_address_;
|
||||
}
|
||||
};
|
||||
|
||||
IsValueInRangePredicate predicate(start_address, end_address);
|
||||
// Some address in old space that does not move.
|
||||
const Address kRemovedSlot = heap_->undefined_value()->address();
|
||||
DCHECK(Page::FromAddress(kRemovedSlot)->NeverEvacuate());
|
||||
|
||||
{
|
||||
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
|
||||
std::replace_if(start_, top, predicate, kRemovedSlot);
|
||||
}
|
||||
|
||||
if (old_buffer_is_sorted_) {
|
||||
// Remove slots from an old buffer preserving the order.
|
||||
Address* lower = std::lower_bound(old_start_, old_top_, start_address);
|
||||
if (lower != old_top_) {
|
||||
// [lower, old_top_) range contain elements that are >= |start_address|.
|
||||
Address* upper = std::lower_bound(lower, old_top_, end_address);
|
||||
// Remove [lower, upper) from the buffer.
|
||||
if (upper == old_top_) {
|
||||
// All elements in [lower, old_top_) range are < |end_address|.
|
||||
old_top_ = lower;
|
||||
} else if (lower != upper) {
|
||||
// [upper, old_top_) range contain elements that are >= |end_address|,
|
||||
// move [upper, old_top_) range to [lower, ...) and update old_top_.
|
||||
Address* new_top = lower;
|
||||
for (Address* p = upper; p < old_top_; p++) {
|
||||
*new_top++ = *p;
|
||||
}
|
||||
old_top_ = new_top;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::replace_if(old_start_, old_top_, predicate, kRemovedSlot);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StoreBuffer::SortUniq() {
|
||||
Compact();
|
||||
if (old_buffer_is_sorted_) return;
|
||||
@ -297,12 +346,18 @@ static Address* in_store_buffer_1_element_cache = NULL;
|
||||
|
||||
|
||||
bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
|
||||
if (!FLAG_enable_slow_asserts) return true;
|
||||
DCHECK_NOT_NULL(cell_address);
|
||||
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
|
||||
if (in_store_buffer_1_element_cache != NULL &&
|
||||
*in_store_buffer_1_element_cache == cell_address) {
|
||||
// Check if the cache still points into the active part of the buffer.
|
||||
if ((start_ <= in_store_buffer_1_element_cache &&
|
||||
in_store_buffer_1_element_cache < top) ||
|
||||
(old_start_ <= in_store_buffer_1_element_cache &&
|
||||
in_store_buffer_1_element_cache < old_top_)) {
|
||||
return true;
|
||||
}
|
||||
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
|
||||
}
|
||||
for (Address* current = top - 1; current >= start_; current--) {
|
||||
if (*current == cell_address) {
|
||||
in_store_buffer_1_element_cache = current;
|
||||
|
@ -107,6 +107,10 @@ class StoreBuffer {
|
||||
void ClearInvalidStoreBufferEntries();
|
||||
void VerifyValidStoreBufferEntries();
|
||||
|
||||
// Removes all the slots in [start_address, end_address) range from the store
|
||||
// buffer.
|
||||
void RemoveSlots(Address start_address, Address end_address);
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
|
||||
|
@ -1922,6 +1922,50 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
|
||||
}
|
||||
|
||||
|
||||
// Returns true if during migration from |old_map| to |new_map| "tagged"
|
||||
// inobject fields are going to be replaced with unboxed double fields.
|
||||
static bool ShouldClearSlotsRecorded(Map* old_map, Map* new_map,
|
||||
int new_number_of_fields) {
|
||||
DisallowHeapAllocation no_gc;
|
||||
int inobject = new_map->inobject_properties();
|
||||
DCHECK(inobject <= old_map->inobject_properties());
|
||||
|
||||
int limit = Min(inobject, new_number_of_fields);
|
||||
for (int i = 0; i < limit; i++) {
|
||||
FieldIndex index = FieldIndex::ForPropertyIndex(new_map, i);
|
||||
if (new_map->IsUnboxedDoubleField(index) &&
|
||||
!old_map->IsUnboxedDoubleField(index)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static void RemoveOldToOldSlotsRecorded(Heap* heap, JSObject* object,
|
||||
FieldIndex index) {
|
||||
DisallowHeapAllocation no_gc;
|
||||
|
||||
Object* old_value = object->RawFastPropertyAt(index);
|
||||
if (old_value->IsHeapObject()) {
|
||||
HeapObject* ho = HeapObject::cast(old_value);
|
||||
if (heap->InNewSpace(ho)) {
|
||||
// At this point there must be no old-to-new slots recorded for this
|
||||
// object.
|
||||
SLOW_DCHECK(
|
||||
!heap->store_buffer()->CellIsInStoreBuffer(reinterpret_cast<Address>(
|
||||
HeapObject::RawField(object, index.offset()))));
|
||||
} else {
|
||||
Page* p = Page::FromAddress(reinterpret_cast<Address>(ho));
|
||||
if (p->IsEvacuationCandidate()) {
|
||||
Object** slot = HeapObject::RawField(object, index.offset());
|
||||
SlotsBuffer::RemoveSlot(p->slots_buffer(), slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// To migrate a fast instance to a fast map:
|
||||
// - First check whether the instance needs to be rewritten. If not, simply
|
||||
// change the map.
|
||||
@ -2075,16 +2119,32 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
|
||||
// From here on we cannot fail and we shouldn't GC anymore.
|
||||
DisallowHeapAllocation no_allocation;
|
||||
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
// If we are going to put an unboxed double to the field that used to
|
||||
// contain HeapObject we should ensure that this slot is removed from
|
||||
// both StoreBuffer and respective SlotsBuffer.
|
||||
bool clear_slots_recorded =
|
||||
FLAG_unbox_double_fields && !heap->InNewSpace(object->address()) &&
|
||||
ShouldClearSlotsRecorded(*old_map, *new_map, number_of_fields);
|
||||
if (clear_slots_recorded) {
|
||||
Address obj_address = object->address();
|
||||
Address start_address = obj_address + JSObject::kHeaderSize;
|
||||
Address end_address = obj_address + old_map->instance_size();
|
||||
heap->store_buffer()->RemoveSlots(start_address, end_address);
|
||||
}
|
||||
|
||||
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
|
||||
// avoid overwriting |one_pointer_filler_map|.
|
||||
int limit = Min(inobject, number_of_fields);
|
||||
for (int i = 0; i < limit; i++) {
|
||||
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
|
||||
Object* value = array->get(external + i);
|
||||
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
|
||||
// yet.
|
||||
if (new_map->IsUnboxedDoubleField(index)) {
|
||||
DCHECK(value->IsMutableHeapNumber());
|
||||
if (clear_slots_recorded && !old_map->IsUnboxedDoubleField(index)) {
|
||||
RemoveOldToOldSlotsRecorded(heap, *object, index);
|
||||
}
|
||||
object->RawFastDoublePropertyAtPut(index,
|
||||
HeapNumber::cast(value)->value());
|
||||
} else {
|
||||
@ -2092,8 +2152,6 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
|
||||
}
|
||||
}
|
||||
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
// If there are properties in the new backing store, trim it to the correct
|
||||
// size and install the backing store into the object.
|
||||
if (external > 0) {
|
||||
|
@ -48,6 +48,12 @@ static Handle<String> MakeName(const char* str, int suffix) {
|
||||
}
|
||||
|
||||
|
||||
Handle<JSObject> GetObject(const char* name) {
|
||||
return v8::Utils::OpenHandle(
|
||||
*v8::Handle<v8::Object>::Cast(CcTest::global()->Get(v8_str(name))));
|
||||
}
|
||||
|
||||
|
||||
static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
|
||||
if (obj->IsUnboxedDoubleField(field_index)) {
|
||||
return obj->RawFastDoublePropertyAt(field_index);
|
||||
@ -1305,4 +1311,223 @@ TEST(WriteBarriersInCopyJSObject) {
|
||||
CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
|
||||
}
|
||||
|
||||
|
||||
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
|
||||
int tagged_descriptor, int double_descriptor,
|
||||
bool check_tagged_value = true) {
|
||||
FLAG_stress_compaction = true;
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Factory* factory = isolate->factory();
|
||||
Heap* heap = CcTest::heap();
|
||||
PagedSpace* old_pointer_space = heap->old_pointer_space();
|
||||
|
||||
// The plan: create |obj| by |map| in old space, create |obj_value| in
|
||||
// new space and ensure that write barrier is triggered when |obj_value| is
|
||||
// written to property |tagged_descriptor| of |obj|.
|
||||
// Then migrate object to |new_map| and set proper value for property
|
||||
// |double_descriptor|. Call GC and ensure that it did not crash during
|
||||
// store buffer entries updating.
|
||||
|
||||
Handle<JSObject> obj;
|
||||
Handle<HeapObject> obj_value;
|
||||
{
|
||||
AlwaysAllocateScope always_allocate(isolate);
|
||||
obj = factory->NewJSObjectFromMap(map, TENURED, false);
|
||||
CHECK(old_pointer_space->Contains(*obj));
|
||||
|
||||
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS);
|
||||
}
|
||||
|
||||
CHECK(heap->InNewSpace(*obj_value));
|
||||
|
||||
StoreBuffer* store_buffer = heap->store_buffer();
|
||||
USE(store_buffer);
|
||||
Address slot;
|
||||
{
|
||||
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
|
||||
int offset = index.offset();
|
||||
slot = reinterpret_cast<Address>(HeapObject::RawField(*obj, offset));
|
||||
USE(slot);
|
||||
DCHECK(!store_buffer->CellIsInStoreBuffer(slot));
|
||||
|
||||
const int n = 153;
|
||||
for (int i = 0; i < n; i++) {
|
||||
obj->FastPropertyAtPut(index, *obj_value);
|
||||
}
|
||||
// Ensure that the slot was actually added to the store buffer.
|
||||
DCHECK(store_buffer->CellIsInStoreBuffer(slot));
|
||||
}
|
||||
|
||||
// Migrate |obj| to |new_map| which should shift fields and put the
|
||||
// |boom_value| to the slot that was earlier recorded by write barrier.
|
||||
JSObject::MigrateToMap(obj, new_map);
|
||||
|
||||
// Ensure that invalid entries were removed from the store buffer.
|
||||
DCHECK(!store_buffer->CellIsInStoreBuffer(slot));
|
||||
|
||||
Address fake_object = reinterpret_cast<Address>(*obj_value) + kPointerSize;
|
||||
double boom_value = bit_cast<double>(fake_object);
|
||||
|
||||
FieldIndex double_field_index =
|
||||
FieldIndex::ForDescriptor(*new_map, double_descriptor);
|
||||
CHECK(obj->IsUnboxedDoubleField(double_field_index));
|
||||
obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
|
||||
|
||||
// Trigger GC to evacuate all candidates.
|
||||
CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
|
||||
|
||||
if (check_tagged_value) {
|
||||
FieldIndex tagged_field_index =
|
||||
FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
|
||||
CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
|
||||
}
|
||||
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
|
||||
}
|
||||
|
||||
|
||||
static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
|
||||
int tagged_descriptor,
|
||||
int double_descriptor,
|
||||
bool check_tagged_value = true) {
|
||||
if (FLAG_never_compact || !FLAG_incremental_marking) return;
|
||||
FLAG_stress_compaction = true;
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Factory* factory = isolate->factory();
|
||||
Heap* heap = CcTest::heap();
|
||||
PagedSpace* old_pointer_space = heap->old_pointer_space();
|
||||
|
||||
// The plan: create |obj| by |map| in old space, create |obj_value| in
|
||||
// old space and ensure it end up in evacuation candidate page. Start
|
||||
// incremental marking and ensure that incremental write barrier is triggered
|
||||
// when |obj_value| is written to property |tagged_descriptor| of |obj|.
|
||||
// Then migrate object to |new_map| and set proper value for property
|
||||
// |double_descriptor|. Call GC and ensure that it did not crash during
|
||||
// slots buffer entries updating.
|
||||
|
||||
Handle<JSObject> obj;
|
||||
Handle<HeapObject> obj_value;
|
||||
Page* ec_page;
|
||||
{
|
||||
AlwaysAllocateScope always_allocate(isolate);
|
||||
obj = factory->NewJSObjectFromMap(map, TENURED, false);
|
||||
CHECK(old_pointer_space->Contains(*obj));
|
||||
|
||||
// Make sure |obj_value| is placed on an old-space evacuation candidate.
|
||||
SimulateFullSpace(old_pointer_space);
|
||||
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
|
||||
ec_page = Page::FromAddress(obj_value->address());
|
||||
CHECK_NE(ec_page, Page::FromAddress(obj->address()));
|
||||
}
|
||||
|
||||
// Heap is ready, force |ec_page| to become an evacuation candidate and
|
||||
// simulate incremental marking.
|
||||
ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||
SimulateIncrementalMarking(heap);
|
||||
|
||||
// Check that everything is ready for triggering incremental write barrier
|
||||
// (i.e. that both |obj| and |obj_value| are black and the marking phase is
|
||||
// still active and |obj_value|'s page is indeed an evacuation candidate).
|
||||
IncrementalMarking* marking = heap->incremental_marking();
|
||||
CHECK(marking->IsMarking());
|
||||
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj)));
|
||||
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
|
||||
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
|
||||
|
||||
// Trigger incremental write barrier, which should add a slot to |ec_page|'s
|
||||
// slots buffer.
|
||||
{
|
||||
int slots_buffer_len = SlotsBuffer::SizeOfChain(ec_page->slots_buffer());
|
||||
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
|
||||
const int n = SlotsBuffer::kNumberOfElements + 10;
|
||||
for (int i = 0; i < n; i++) {
|
||||
obj->FastPropertyAtPut(index, *obj_value);
|
||||
}
|
||||
// Ensure that the slot was actually added to the |ec_page|'s slots buffer.
|
||||
CHECK_EQ(slots_buffer_len + n,
|
||||
SlotsBuffer::SizeOfChain(ec_page->slots_buffer()));
|
||||
}
|
||||
|
||||
// Migrate |obj| to |new_map| which should shift fields and put the
|
||||
// |boom_value| to the slot that was earlier recorded by incremental write
|
||||
// barrier.
|
||||
JSObject::MigrateToMap(obj, new_map);
|
||||
|
||||
double boom_value = bit_cast<double>(UINT64_C(0xbaad0176a37c28e1));
|
||||
|
||||
FieldIndex double_field_index =
|
||||
FieldIndex::ForDescriptor(*new_map, double_descriptor);
|
||||
CHECK(obj->IsUnboxedDoubleField(double_field_index));
|
||||
obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
|
||||
|
||||
// Trigger GC to evacuate all candidates.
|
||||
CcTest::heap()->CollectGarbage(OLD_POINTER_SPACE, "boom");
|
||||
|
||||
// Ensure that the values are still there and correct.
|
||||
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
|
||||
|
||||
if (check_tagged_value) {
|
||||
FieldIndex tagged_field_index =
|
||||
FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
|
||||
CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
|
||||
}
|
||||
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
|
||||
}
|
||||
|
||||
|
||||
enum WriteBarrierKind { OLD_TO_OLD_WRITE_BARRIER, OLD_TO_NEW_WRITE_BARRIER };
|
||||
static void TestWriteBarrierObjectShiftFieldsRight(
|
||||
WriteBarrierKind write_barrier_kind) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
|
||||
Handle<HeapType> any_type = HeapType::Any(isolate);
|
||||
|
||||
CompileRun("function func() { return 1; }");
|
||||
|
||||
Handle<JSObject> func = GetObject("func");
|
||||
|
||||
Handle<Map> map = Map::Create(isolate, 10);
|
||||
map = Map::CopyWithConstant(map, MakeName("prop", 0), func, NONE,
|
||||
INSERT_TRANSITION).ToHandleChecked();
|
||||
map = Map::CopyWithField(map, MakeName("prop", 1), any_type, NONE,
|
||||
Representation::Double(),
|
||||
INSERT_TRANSITION).ToHandleChecked();
|
||||
map = Map::CopyWithField(map, MakeName("prop", 2), any_type, NONE,
|
||||
Representation::Tagged(),
|
||||
INSERT_TRANSITION).ToHandleChecked();
|
||||
|
||||
// Shift fields right by turning constant property to a field.
|
||||
Handle<Map> new_map = Map::ReconfigureProperty(
|
||||
map, 0, kData, NONE, Representation::Tagged(), any_type, FORCE_FIELD);
|
||||
|
||||
if (write_barrier_kind == OLD_TO_NEW_WRITE_BARRIER) {
|
||||
TestWriteBarrier(map, new_map, 2, 1);
|
||||
} else {
|
||||
CHECK_EQ(OLD_TO_OLD_WRITE_BARRIER, write_barrier_kind);
|
||||
TestIncrementalWriteBarrier(map, new_map, 2, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(WriteBarrierObjectShiftFieldsRight) {
|
||||
TestWriteBarrierObjectShiftFieldsRight(OLD_TO_NEW_WRITE_BARRIER);
|
||||
}
|
||||
|
||||
|
||||
TEST(IncrementalWriteBarrierObjectShiftFieldsRight) {
|
||||
TestWriteBarrierObjectShiftFieldsRight(OLD_TO_OLD_WRITE_BARRIER);
|
||||
}
|
||||
|
||||
|
||||
// TODO(ishell): add respective tests for property kind reconfiguring from
|
||||
// accessor field to double, once accessor fields are supported by
|
||||
// Map::ReconfigureProperty().
|
||||
|
||||
|
||||
// TODO(ishell): add respective tests for fast property removal case once
|
||||
// Map::ReconfigureProperty() supports that.
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user