Avoid issuing write barriers for unboxed double fields in Heap::CopyJSObject().
Review URL: https://codereview.chromium.org/880043003 Cr-Commit-Position: refs/heads/master@{#26394}
This commit is contained in:
parent
4e9e1eedf1
commit
ba8409d2f6
@ -3906,9 +3906,33 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
|
||||
}
|
||||
Address clone_address = clone->address();
|
||||
CopyBlock(clone_address, source->address(), object_size);
|
||||
// Update write barrier for all fields that lie beyond the header.
|
||||
RecordWrites(clone_address, JSObject::kHeaderSize,
|
||||
(object_size - JSObject::kHeaderSize) / kPointerSize);
|
||||
|
||||
// Update write barrier for all tagged fields that lie beyond the header.
|
||||
const int start_offset = JSObject::kHeaderSize;
|
||||
const int end_offset = object_size;
|
||||
|
||||
#if V8_DOUBLE_FIELDS_UNBOXING
|
||||
LayoutDescriptorHelper helper(map);
|
||||
bool has_only_tagged_fields = helper.all_fields_tagged();
|
||||
|
||||
if (!has_only_tagged_fields) {
|
||||
for (int offset = start_offset; offset < end_offset;) {
|
||||
int end_of_region_offset;
|
||||
if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
|
||||
RecordWrites(clone_address, offset,
|
||||
(end_of_region_offset - offset) / kPointerSize);
|
||||
}
|
||||
offset = end_of_region_offset;
|
||||
}
|
||||
} else {
|
||||
#endif
|
||||
// Object has only tagged fields.
|
||||
RecordWrites(clone_address, start_offset,
|
||||
(end_offset - start_offset) / kPointerSize);
|
||||
#if V8_DOUBLE_FIELDS_UNBOXING
|
||||
}
|
||||
#endif
|
||||
|
||||
} else {
|
||||
wb_mode = SKIP_WRITE_BARRIER;
|
||||
|
||||
|
@ -1223,4 +1223,89 @@ TEST(StoreBufferScanOnScavenge) {
|
||||
CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
|
||||
}
|
||||
|
||||
|
||||
static int LenFromSize(int size) {
|
||||
return (size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
}
|
||||
|
||||
|
||||
TEST(WriteBarriersInCopyJSObject) {
|
||||
FLAG_max_semi_space_size = 1; // Ensure new space is not growing.
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
TestHeap* heap = CcTest::test_heap();
|
||||
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
|
||||
// The plan: create JSObject which contains unboxed double value that looks
|
||||
// like a reference to an object in new space.
|
||||
// Then clone this object (forcing it to go into old space) and check
|
||||
// that the value of the unboxed double property of the cloned object has
|
||||
// was not corrupted by GC.
|
||||
|
||||
// Step 1: prepare a map for the object. We add unboxed double property to it.
|
||||
// Create a map with single inobject property.
|
||||
Handle<Map> my_map = Map::Create(isolate, 1);
|
||||
Handle<String> name = isolate->factory()->InternalizeUtf8String("foo");
|
||||
my_map = Map::CopyWithField(my_map, name, HeapType::Any(isolate), NONE,
|
||||
Representation::Double(),
|
||||
INSERT_TRANSITION).ToHandleChecked();
|
||||
my_map->set_pre_allocated_property_fields(1);
|
||||
int n_properties = my_map->InitialPropertiesLength();
|
||||
CHECK_GE(n_properties, 0);
|
||||
|
||||
int object_size = my_map->instance_size();
|
||||
|
||||
// Step 2: allocate a lot of objects so to almost fill new space: we need
|
||||
// just enough room to allocate JSObject and thus fill the newspace.
|
||||
|
||||
int allocation_amount =
|
||||
Min(FixedArray::kMaxSize, Page::kMaxRegularHeapObjectSize + kPointerSize);
|
||||
int allocation_len = LenFromSize(allocation_amount);
|
||||
NewSpace* new_space = heap->new_space();
|
||||
Address* top_addr = new_space->allocation_top_address();
|
||||
Address* limit_addr = new_space->allocation_limit_address();
|
||||
while ((*limit_addr - *top_addr) > allocation_amount) {
|
||||
CHECK(!heap->always_allocate());
|
||||
Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
|
||||
CHECK(new_space->Contains(array));
|
||||
}
|
||||
|
||||
// Step 3: now allocate fixed array and JSObject to fill the whole new space.
|
||||
int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
|
||||
int fixed_array_len = LenFromSize(to_fill);
|
||||
CHECK(fixed_array_len < FixedArray::kMaxLength);
|
||||
|
||||
CHECK(!heap->always_allocate());
|
||||
Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
|
||||
CHECK(new_space->Contains(array));
|
||||
|
||||
Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
|
||||
CHECK(new_space->Contains(object));
|
||||
JSObject* jsobject = JSObject::cast(object);
|
||||
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
|
||||
CHECK_EQ(0, jsobject->properties()->length());
|
||||
|
||||
// Construct a double value that looks like a pointer to the new space object
|
||||
// and store it into the obj.
|
||||
Address fake_object = reinterpret_cast<Address>(array) + kPointerSize;
|
||||
double boom_value = bit_cast<double>(fake_object);
|
||||
FieldIndex index = FieldIndex::ForDescriptor(*my_map, 0);
|
||||
jsobject->RawFastDoublePropertyAtPut(index, boom_value);
|
||||
|
||||
CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
|
||||
|
||||
// Step 4: clone jsobject, but force always allocate first to create a clone
|
||||
// in old pointer space.
|
||||
AlwaysAllocateScope aa_scope(isolate);
|
||||
Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
|
||||
Handle<JSObject> clone(JSObject::cast(clone_obj));
|
||||
CHECK(heap->old_pointer_space()->Contains(clone->address()));
|
||||
|
||||
CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
|
||||
|
||||
// The value in cloned object should not be corrupted by GC.
|
||||
CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user