Revert of Remove slots that point to unboxed doubles from the StoreBuffer/SlotsBuffer. (patchset #4 id:230001 of https://codereview.chromium.org/957273002/)

Reason for revert:
It caused a lot of Canary crashes.

Original issue's description:
> Remove slots that point to unboxed doubles from the StoreBuffer/SlotsBuffer.
>
> The problem is that tagged slot could become a double slot after migrating of an object to another map with "shifted" fields (for example as a result of generalizing immutable data property to a data field).
> This CL also adds useful machinery that helps triggering incremental write barriers.
>
> BUG=chromium:454297
> LOG=Y
>
> Committed: https://crrev.com/9633ebabd405c264d33f603f8798c31f59418dcd
> Cr-Commit-Position: refs/heads/master@{#27054}

TBR=verwaest@chromium.org,hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:454297

Review URL: https://codereview.chromium.org/991793002

Cr-Commit-Position: refs/heads/master@{#27063}
This commit is contained in:
ishell@chromium.org 2015-03-09 11:10:36 +01:00
parent 5abc724e82
commit 67a02321c2
8 changed files with 10 additions and 389 deletions

View File

@ -765,11 +765,6 @@ DEFINE_BOOL(stress_compaction, false,
"stress the GC compactor to flush out bugs (implies "
"--force_marking_deque_overflows)")
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
//
// Dev shell flags
//

View File

@ -712,7 +712,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int count = 0;
int fragmentation = 0;
int page_number = 0;
Candidate* least = NULL;
PageIterator it(space);
@ -727,16 +726,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CHECK(p->slots_buffer() == NULL);
if (FLAG_stress_compaction) {
if (FLAG_manual_evacuation_candidates_selection) {
if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
fragmentation = 1;
}
} else {
unsigned int counter = space->heap()->ms_count();
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
page_number++;
}
unsigned int counter = space->heap()->ms_count();
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
// Don't try to release too many pages.
if (estimated_release >= over_reserved) {
@ -4370,21 +4362,6 @@ bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
}
static Object* g_smi_slot = NULL;
void SlotsBuffer::RemoveSlot(SlotsBuffer* buffer, ObjectSlot slot_to_remove) {
DCHECK_EQ(Smi::FromInt(0), g_smi_slot);
DCHECK(!IsTypedSlot(slot_to_remove));
while (buffer != NULL) {
ObjectSlot* slots = buffer->slots_;
// Remove entries by replacing them with a dummy slot containing a smi.
std::replace(&slots[0], &slots[buffer->idx_], slot_to_remove, &g_smi_slot);
buffer = buffer->next_;
}
}
static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTarget(rmode)) {
return SlotsBuffer::CODE_TARGET_SLOT;

View File

@ -360,9 +360,6 @@ class SlotsBuffer {
SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
// Removes all occurrences of given slot from the chain of slots buffers.
static void RemoveSlot(SlotsBuffer* buffer, ObjectSlot slot_to_remove);
static const int kNumberOfElements = 1021;
private:

View File

@ -385,12 +385,6 @@ class MemoryChunk {
// to grey transition is performed in the value.
HAS_PROGRESS_BAR,
// This flag is intended to be used for testing. Works only when both
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
// are set. It forces the page to become an evacuation candidate at next
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};

View File

@ -247,55 +247,6 @@ void StoreBuffer::Filter(int flag) {
}
void StoreBuffer::RemoveSlots(Address start_address, Address end_address) {
struct IsValueInRangePredicate {
Address start_address_;
Address end_address_;
IsValueInRangePredicate(Address start_address, Address end_address)
: start_address_(start_address), end_address_(end_address) {}
bool operator()(Address addr) {
return start_address_ <= addr && addr < end_address_;
}
};
IsValueInRangePredicate predicate(start_address, end_address);
// Some address in old space that does not move.
const Address kRemovedSlot = heap_->undefined_value()->address();
DCHECK(Page::FromAddress(kRemovedSlot)->NeverEvacuate());
{
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
std::replace_if(start_, top, predicate, kRemovedSlot);
}
if (old_buffer_is_sorted_) {
// Remove slots from an old buffer preserving the order.
Address* lower = std::lower_bound(old_start_, old_top_, start_address);
if (lower != old_top_) {
// [lower, old_top_) range contain elements that are >= |start_address|.
Address* upper = std::lower_bound(lower, old_top_, end_address);
// Remove [lower, upper) from the buffer.
if (upper == old_top_) {
// All elements in [lower, old_top_) range are < |end_address|.
old_top_ = lower;
} else if (lower != upper) {
// [upper, old_top_) range contain elements that are >= |end_address|,
// move [upper, old_top_) range to [lower, ...) and update old_top_.
Address* new_top = lower;
for (Address* p = upper; p < old_top_; p++) {
*new_top++ = *p;
}
old_top_ = new_top;
}
}
} else {
std::replace_if(old_start_, old_top_, predicate, kRemovedSlot);
}
}
void StoreBuffer::SortUniq() {
Compact();
if (old_buffer_is_sorted_) return;
@ -346,18 +297,12 @@ static Address* in_store_buffer_1_element_cache = NULL;
bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
DCHECK_NOT_NULL(cell_address);
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
if (!FLAG_enable_slow_asserts) return true;
if (in_store_buffer_1_element_cache != NULL &&
*in_store_buffer_1_element_cache == cell_address) {
// Check if the cache still points into the active part of the buffer.
if ((start_ <= in_store_buffer_1_element_cache &&
in_store_buffer_1_element_cache < top) ||
(old_start_ <= in_store_buffer_1_element_cache &&
in_store_buffer_1_element_cache < old_top_)) {
return true;
}
return true;
}
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
for (Address* current = top - 1; current >= start_; current--) {
if (*current == cell_address) {
in_store_buffer_1_element_cache = current;

View File

@ -105,11 +105,6 @@ class StoreBuffer {
void Filter(int flag);
// Removes all the slots in [start_address, end_address) range from the store
// buffer.
void RemoveSlots(Address start_address, Address end_address);
private:
Heap* heap_;

View File

@ -1918,50 +1918,6 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
}
// Returns true if during migration from |old_map| to |new_map| "tagged"
// inobject fields are going to be replaced with unboxed double fields.
static bool ShouldClearSlotsRecorded(Map* old_map, Map* new_map,
int new_number_of_fields) {
DisallowHeapAllocation no_gc;
int inobject = new_map->inobject_properties();
DCHECK(inobject <= old_map->inobject_properties());
int limit = Min(inobject, new_number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(new_map, i);
if (new_map->IsUnboxedDoubleField(index) &&
!old_map->IsUnboxedDoubleField(index)) {
return true;
}
}
return false;
}
static void RemoveOldToOldSlotsRecorded(Heap* heap, JSObject* object,
FieldIndex index) {
DisallowHeapAllocation no_gc;
Object* old_value = object->RawFastPropertyAt(index);
if (old_value->IsHeapObject()) {
HeapObject* ho = HeapObject::cast(old_value);
if (heap->InNewSpace(ho)) {
// At this point there must be no old-to-new slots recorded for this
// object.
SLOW_DCHECK(
!heap->store_buffer()->CellIsInStoreBuffer(reinterpret_cast<Address>(
HeapObject::RawField(object, index.offset()))));
} else {
Page* p = Page::FromAddress(reinterpret_cast<Address>(ho));
if (p->IsEvacuationCandidate()) {
Object** slot = HeapObject::RawField(object, index.offset());
SlotsBuffer::RemoveSlot(p->slots_buffer(), slot);
}
}
}
}
// To migrate a fast instance to a fast map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@ -2115,31 +2071,16 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
// If we are going to put an unboxed double to the field that used to
// contain HeapObject we should ensure that this slot is removed from
// both StoreBuffer and respective SlotsBuffer.
bool clear_slots_recorded =
FLAG_unbox_double_fields && !heap->InNewSpace(object->address()) &&
ShouldClearSlotsRecorded(*old_map, *new_map, number_of_fields);
if (clear_slots_recorded) {
Address obj_address = object->address();
Address end_address = obj_address + old_map->instance_size();
heap->store_buffer()->RemoveSlots(obj_address, end_address);
}
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
Object* value = array->get(external + i);
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
if (new_map->IsUnboxedDoubleField(index)) {
DCHECK(value->IsMutableHeapNumber());
if (clear_slots_recorded && !old_map->IsUnboxedDoubleField(index)) {
RemoveOldToOldSlotsRecorded(heap, *object, index);
}
object->RawFastDoublePropertyAtPut(index,
HeapNumber::cast(value)->value());
} else {
@ -2147,6 +2088,8 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
}
Heap* heap = isolate->heap();
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {

View File

@ -48,12 +48,6 @@ static Handle<String> MakeName(const char* str, int suffix) {
}
Handle<JSObject> GetObject(const char* name) {
return v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(CcTest::global()->Get(v8_str(name))));
}
static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
if (obj->IsUnboxedDoubleField(field_index)) {
return obj->RawFastDoublePropertyAt(field_index);
@ -1311,223 +1305,4 @@ TEST(WriteBarriersInCopyJSObject) {
CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
}
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor, int double_descriptor,
bool check_tagged_value = true) {
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
PagedSpace* old_pointer_space = heap->old_pointer_space();
// The plan: create |obj| by |map| in old space, create |obj_value| in
// new space and ensure that write barrier is triggered when |obj_value| is
// written to property |tagged_descriptor| of |obj|.
// Then migrate object to |new_map| and set proper value for property
// |double_descriptor|. Call GC and ensure that it did not crash during
// store buffer entries updating.
Handle<JSObject> obj;
Handle<HeapObject> obj_value;
{
AlwaysAllocateScope always_allocate(isolate);
obj = factory->NewJSObjectFromMap(map, TENURED, false);
CHECK(old_pointer_space->Contains(*obj));
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS);
}
CHECK(heap->InNewSpace(*obj_value));
StoreBuffer* store_buffer = heap->store_buffer();
USE(store_buffer);
Address slot;
{
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
int offset = index.offset();
slot = reinterpret_cast<Address>(HeapObject::RawField(*obj, offset));
USE(slot);
DCHECK(!store_buffer->CellIsInStoreBuffer(slot));
const int n = 153;
for (int i = 0; i < n; i++) {
obj->FastPropertyAtPut(index, *obj_value);
}
// Ensure that the slot was actually added to the store buffer.
DCHECK(store_buffer->CellIsInStoreBuffer(slot));
}
// Migrate |obj| to |new_map| which should shift fields and put the
// |boom_value| to the slot that was earlier recorded by write barrier.
JSObject::MigrateToMap(obj, new_map);
// Ensure that invalid entries were removed from the store buffer.
DCHECK(!store_buffer->CellIsInStoreBuffer(slot));
Address fake_object = reinterpret_cast<Address>(*obj_value) + kPointerSize;
double boom_value = bit_cast<double>(fake_object);
FieldIndex double_field_index =
FieldIndex::ForDescriptor(*new_map, double_descriptor);
CHECK(obj->IsUnboxedDoubleField(double_field_index));
obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
// Trigger GC to evacuate all candidates.
CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
if (check_tagged_value) {
FieldIndex tagged_field_index =
FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
}
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
}
static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor,
int double_descriptor,
bool check_tagged_value = true) {
if (FLAG_never_compact || !FLAG_incremental_marking) return;
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
PagedSpace* old_pointer_space = heap->old_pointer_space();
// The plan: create |obj| by |map| in old space, create |obj_value| in
// old space and ensure it end up in evacuation candidate page. Start
// incremental marking and ensure that incremental write barrier is triggered
// when |obj_value| is written to property |tagged_descriptor| of |obj|.
// Then migrate object to |new_map| and set proper value for property
// |double_descriptor|. Call GC and ensure that it did not crash during
// slots buffer entries updating.
Handle<JSObject> obj;
Handle<HeapObject> obj_value;
Page* ec_page;
{
AlwaysAllocateScope always_allocate(isolate);
obj = factory->NewJSObjectFromMap(map, TENURED, false);
CHECK(old_pointer_space->Contains(*obj));
// Make sure |obj_value| is placed on an old-space evacuation candidate.
SimulateFullSpace(old_pointer_space);
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
ec_page = Page::FromAddress(obj_value->address());
CHECK_NE(ec_page, Page::FromAddress(obj->address()));
}
// Heap is ready, force |ec_page| to become an evacuation candidate and
// simulate incremental marking.
ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
SimulateIncrementalMarking(heap);
// Check that everything is ready for triggering incremental write barrier
// (i.e. that both |obj| and |obj_value| are black and the marking phase is
// still active and |obj_value|'s page is indeed an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsMarking());
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj)));
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger incremental write barrier, which should add a slot to |ec_page|'s
// slots buffer.
{
int slots_buffer_len = SlotsBuffer::SizeOfChain(ec_page->slots_buffer());
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
const int n = SlotsBuffer::kNumberOfElements + 10;
for (int i = 0; i < n; i++) {
obj->FastPropertyAtPut(index, *obj_value);
}
// Ensure that the slot was actually added to the |ec_page|'s slots buffer.
CHECK_EQ(slots_buffer_len + n,
SlotsBuffer::SizeOfChain(ec_page->slots_buffer()));
}
// Migrate |obj| to |new_map| which should shift fields and put the
// |boom_value| to the slot that was earlier recorded by incremental write
// barrier.
JSObject::MigrateToMap(obj, new_map);
double boom_value = bit_cast<double>(UINT64_C(0xbaad0176a37c28e1));
FieldIndex double_field_index =
FieldIndex::ForDescriptor(*new_map, double_descriptor);
CHECK(obj->IsUnboxedDoubleField(double_field_index));
obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
// Trigger GC to evacuate all candidates.
CcTest::heap()->CollectGarbage(OLD_POINTER_SPACE, "boom");
// Ensure that the values are still there and correct.
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
if (check_tagged_value) {
FieldIndex tagged_field_index =
FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
}
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
}
enum WriteBarrierKind { OLD_TO_OLD_WRITE_BARRIER, OLD_TO_NEW_WRITE_BARRIER };
static void TestWriteBarrierObjectShiftFieldsRight(
WriteBarrierKind write_barrier_kind) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::HandleScope scope(CcTest::isolate());
Handle<HeapType> any_type = HeapType::Any(isolate);
CompileRun("function func() { return 1; }");
Handle<JSObject> func = GetObject("func");
Handle<Map> map = Map::Create(isolate, 10);
map = Map::CopyWithConstant(map, MakeName("prop", 0), func, NONE,
INSERT_TRANSITION).ToHandleChecked();
map = Map::CopyWithField(map, MakeName("prop", 1), any_type, NONE,
Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
map = Map::CopyWithField(map, MakeName("prop", 2), any_type, NONE,
Representation::Tagged(),
INSERT_TRANSITION).ToHandleChecked();
// Shift fields right by turning constant property to a field.
Handle<Map> new_map = Map::ReconfigureProperty(
map, 0, kData, NONE, Representation::Tagged(), any_type, FORCE_FIELD);
if (write_barrier_kind == OLD_TO_NEW_WRITE_BARRIER) {
TestWriteBarrier(map, new_map, 2, 1);
} else {
CHECK_EQ(OLD_TO_OLD_WRITE_BARRIER, write_barrier_kind);
TestIncrementalWriteBarrier(map, new_map, 2, 1);
}
}
TEST(WriteBarrierObjectShiftFieldsRight) {
TestWriteBarrierObjectShiftFieldsRight(OLD_TO_NEW_WRITE_BARRIER);
}
TEST(IncrementalWriteBarrierObjectShiftFieldsRight) {
TestWriteBarrierObjectShiftFieldsRight(OLD_TO_OLD_WRITE_BARRIER);
}
// TODO(ishell): add respective tests for property kind reconfiguring from
// accessor field to double, once accessor fields are supported by
// Map::ReconfigureProperty().
// TODO(ishell): add respective tests for fast property removal case once
// Map::ReconfigureProperty() supports that.
#endif