Layout descriptor must be trimmed when corresponding descriptors array is trimmed to stay in sync.

BUG=chromium:470804
LOG=Y

Review URL: https://codereview.chromium.org/1033273005

Cr-Commit-Position: refs/heads/master@{#27528}
This commit is contained in:
ishell 2015-03-30 10:03:41 -07:00 committed by Commit bot
parent 8baa09c4fe
commit 3cb9f132ba
6 changed files with 296 additions and 61 deletions

View File

@ -2610,6 +2610,13 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
descriptors->Sort();
if (FLAG_unbox_double_fields) {
LayoutDescriptor* layout_descriptor = map->layout_descriptor();
layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
}
}

View File

@ -20,18 +20,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
// The whole bit vector fits into a smi.
return handle(LayoutDescriptor::FromSmi(Smi::FromInt(0)), isolate);
}
length = (length + kNumberOfBits - 1) / kNumberOfBits;
DCHECK(length > 0);
if (SmiValuesAre32Bits() && (length & 1)) {
// On 64-bit systems if the length is odd then the half-word space would be
// lost anyway (due to alignment and the fact that we are allocating
// uint32-typed array), so we increase the length of allocated array
// to utilize that "lost" space which could also help to avoid layout
// descriptor reallocations.
++length;
}
length = GetSlowModeBackingStoreLength(length);
return Handle<LayoutDescriptor>::cast(
isolate->factory()->NewFixedTypedArray(length, kExternalUint32Array));
}
@ -154,6 +143,77 @@ LayoutDescriptor* LayoutDescriptor::cast_gc_safe(Object* object) {
}
int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
length = (length + kNumberOfBits - 1) / kNumberOfBits;
DCHECK_LT(0, length);
if (SmiValuesAre32Bits() && (length & 1)) {
// On 64-bit systems if the length is odd then the half-word space would be
// lost anyway (due to alignment and the fact that we are allocating
// uint32-typed array), so we increase the length of allocated array
// to utilize that "lost" space which could also help to avoid layout
// descriptor reallocations.
++length;
}
return length;
}
int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
int num_descriptors) {
int inobject_properties = map->inobject_properties();
if (inobject_properties == 0) return 0;
DCHECK_LE(num_descriptors, descriptors->number_of_descriptors());
int layout_descriptor_length;
const int kMaxWordsPerField = kDoubleSize / kPointerSize;
if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
// Even in the "worst" case (all fields are doubles) it would fit into
// a Smi, so no need to calculate length.
layout_descriptor_length = kSmiValueSize;
} else {
layout_descriptor_length = 0;
for (int i = 0; i < num_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
layout_descriptor_length =
Max(layout_descriptor_length, field_index + field_width_in_words);
}
}
layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
return layout_descriptor_length;
}
LayoutDescriptor* LayoutDescriptor::Initialize(
LayoutDescriptor* layout_descriptor, Map* map, DescriptorArray* descriptors,
int num_descriptors) {
DisallowHeapAllocation no_allocation;
int inobject_properties = map->inobject_properties();
for (int i = 0; i < num_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) {
DCHECK(details.location() != kField ||
layout_descriptor->IsTagged(details.field_index()));
continue;
}
int field_index = details.field_index();
layout_descriptor = layout_descriptor->SetRawData(field_index);
if (details.field_width_in_words() > 1) {
layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
}
}
return layout_descriptor;
}
// InobjectPropertiesHelper is a helper class for querying whether inobject
// property at offset is Double or not.
LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)

View File

@ -19,55 +19,22 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(
Isolate* isolate = descriptors->GetIsolate();
if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
int inobject_properties = map->inobject_properties();
if (inobject_properties == 0) return handle(FastPointerLayout(), isolate);
int layout_descriptor_length =
CalculateCapacity(*map, *descriptors, num_descriptors);
DCHECK(num_descriptors <= descriptors->number_of_descriptors());
int layout_descriptor_length;
const int kMaxWordsPerField = kDoubleSize / kPointerSize;
if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
// Even in the "worst" case (all fields are doubles) it would fit into
// a Smi, so no need to calculate length.
layout_descriptor_length = kSmiValueSize;
} else {
layout_descriptor_length = 0;
for (int i = 0; i < num_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
layout_descriptor_length =
Max(layout_descriptor_length, field_index + field_width_in_words);
}
if (layout_descriptor_length == 0) {
// No double fields were found, use fast pointer layout.
return handle(FastPointerLayout(), isolate);
}
if (layout_descriptor_length == 0) {
// No double fields were found, use fast pointer layout.
return handle(FastPointerLayout(), isolate);
}
layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
// Initially, layout descriptor corresponds to an object with all fields
// tagged.
Handle<LayoutDescriptor> layout_descriptor_handle =
LayoutDescriptor::New(isolate, layout_descriptor_length);
DisallowHeapAllocation no_allocation;
LayoutDescriptor* layout_descriptor = *layout_descriptor_handle;
LayoutDescriptor* layout_descriptor = Initialize(
*layout_descriptor_handle, *map, *descriptors, num_descriptors);
for (int i = 0; i < num_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
layout_descriptor = layout_descriptor->SetRawData(field_index);
if (details.field_width_in_words() > 1) {
layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
}
}
return handle(layout_descriptor, isolate);
}
@ -258,13 +225,44 @@ bool LayoutDescriptorHelper::IsTagged(
}
bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
DescriptorArray* descriptors,
int num_descriptors) {
DisallowHeapAllocation no_allocation;
// Fast mode descriptors are never shared and therefore always fully
// correspond to their map.
if (!IsSlowLayout()) return this;
int layout_descriptor_length =
CalculateCapacity(map, descriptors, num_descriptors);
// It must not become fast-mode descriptor here, because otherwise it has to
// be fast pointer layout descriptor already but it's is slow mode now.
DCHECK_LT(kSmiValueSize, layout_descriptor_length);
// Trim, clean and reinitialize this slow-mode layout descriptor.
int array_length = GetSlowModeBackingStoreLength(layout_descriptor_length);
int current_length = length();
if (current_length != array_length) {
DCHECK_LT(array_length, current_length);
int delta = current_length - array_length;
heap->RightTrimFixedArray<Heap::FROM_GC>(this, delta);
}
memset(DataPtr(), 0, DataSize());
LayoutDescriptor* layout_descriptor =
Initialize(this, map, descriptors, num_descriptors);
DCHECK_EQ(this, layout_descriptor);
return layout_descriptor;
}
bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) {
if (FLAG_unbox_double_fields) {
DescriptorArray* descriptors = map->instance_descriptors();
int nof_descriptors = map->NumberOfOwnDescriptors();
int last_field_index = 0;
for (int i = 0; i < nof_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != DATA) continue;
if (details.location() != kField) continue;
FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
bool tagged_expected =
!field_index.is_inobject() || !details.representation().IsDouble();
@ -273,6 +271,15 @@ bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
DCHECK_EQ(tagged_expected, tagged_actual);
if (tagged_actual != tagged_expected) return false;
}
last_field_index =
Max(last_field_index,
details.field_index() + details.field_width_in_words());
}
if (check_tail) {
int n = capacity();
for (int i = last_field_index; i < n; i++) {
DCHECK(IsTagged(i));
}
}
}
return true;

View File

@ -70,7 +70,14 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
V8_INLINE static LayoutDescriptor* FastPointerLayout();
// Check that this layout descriptor corresponds to given map.
bool IsConsistentWithMap(Map* map);
bool IsConsistentWithMap(Map* map, bool check_tail = false);
// Trims this layout descriptor to given number of descriptors. This happens
// only when corresponding descriptors array is trimmed.
// The layout descriptor could be trimmed if it was slow or it could
// become fast.
LayoutDescriptor* Trim(Heap* heap, Map* map, DescriptorArray* descriptors,
int num_descriptors);
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
@ -94,6 +101,21 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
V8_INLINE static bool InobjectUnboxedField(int inobject_properties,
PropertyDetails details);
// Calculates minimal layout descriptor capacity required for given
// |map|, |descriptors| and |num_descriptors|.
V8_INLINE static int CalculateCapacity(Map* map, DescriptorArray* descriptors,
int num_descriptors);
// Calculates the length of the slow-mode backing store array by given layout
// descriptor length.
V8_INLINE static int GetSlowModeBackingStoreLength(int length);
// Fills in clean |layout_descriptor| according to given |map|, |descriptors|
// and |num_descriptors|.
V8_INLINE static LayoutDescriptor* Initialize(
LayoutDescriptor* layout_descriptor, Map* map,
DescriptorArray* descriptors, int num_descriptors);
static Handle<LayoutDescriptor> EnsureCapacity(
Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
int new_capacity);

View File

@ -30,7 +30,7 @@ static void InitializeVerifiedMapDescriptors(
Map* map, DescriptorArray* descriptors,
LayoutDescriptor* layout_descriptor) {
map->InitializeDescriptors(descriptors, layout_descriptor);
CHECK(layout_descriptor->IsConsistentWithMap(map));
CHECK(layout_descriptor->IsConsistentWithMap(map, true));
}
@ -230,7 +230,7 @@ TEST(LayoutDescriptorBasicSlow) {
}
CHECK(layout_desc->IsSlowLayout());
CHECK(!layout_desc->IsFastPointerLayout());
CHECK(layout_descriptor->IsConsistentWithMap(*map));
CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
}
}
@ -644,7 +644,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
map->InitializeDescriptors(*descriptors, *layout_descriptor);
}
Handle<LayoutDescriptor> layout_descriptor(map->layout_descriptor(), isolate);
CHECK(layout_descriptor->IsConsistentWithMap(*map));
CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
return layout_descriptor;
}
@ -913,6 +913,92 @@ TEST(Regress436816) {
}
TEST(DescriptorArrayTrimming) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
const int kFieldCount = 128;
const int kSplitFieldIndex = 32;
const int kTrimmedLayoutDescriptorLength = 64;
Handle<HeapType> any_type = HeapType::Any(isolate);
Handle<Map> map = Map::Create(isolate, kFieldCount);
for (int i = 0; i < kSplitFieldIndex; i++) {
map = Map::CopyWithField(map, MakeName("prop", i), any_type, NONE,
Representation::Smi(),
INSERT_TRANSITION).ToHandleChecked();
}
map = Map::CopyWithField(map, MakeName("dbl", kSplitFieldIndex), any_type,
NONE, Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
CHECK(map->layout_descriptor()->IsSlowLayout());
CHECK(map->owns_descriptors());
CHECK_EQ(2, map->layout_descriptor()->length());
{
// Add transitions to double fields.
v8::HandleScope scope(CcTest::isolate());
Handle<Map> tmp_map = map;
for (int i = kSplitFieldIndex + 1; i < kFieldCount; i++) {
tmp_map = Map::CopyWithField(tmp_map, MakeName("dbl", i), any_type, NONE,
Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
}
// Check that descriptors are shared.
CHECK(tmp_map->owns_descriptors());
CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
CHECK_EQ(map->layout_descriptor(), tmp_map->layout_descriptor());
}
CHECK(map->layout_descriptor()->IsSlowLayout());
CHECK_EQ(4, map->layout_descriptor()->length());
// The unused tail of the layout descriptor is now "durty" because of sharing.
CHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
for (int i = kSplitFieldIndex + 1; i < kTrimmedLayoutDescriptorLength; i++) {
CHECK(!map->layout_descriptor()->IsTagged(i));
}
CHECK_LT(map->NumberOfOwnDescriptors(),
map->instance_descriptors()->number_of_descriptors());
// Call GC that should trim both |map|'s descriptor array and layout
// descriptor.
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// The unused tail of the layout descriptor is now "clean" again.
CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
CHECK(map->owns_descriptors());
CHECK_EQ(map->NumberOfOwnDescriptors(),
map->instance_descriptors()->number_of_descriptors());
CHECK(map->layout_descriptor()->IsSlowLayout());
CHECK_EQ(2, map->layout_descriptor()->length());
{
// Add transitions to tagged fields.
v8::HandleScope scope(CcTest::isolate());
Handle<Map> tmp_map = map;
for (int i = kSplitFieldIndex + 1; i < kFieldCount - 1; i++) {
tmp_map = Map::CopyWithField(tmp_map, MakeName("tagged", i), any_type,
NONE, Representation::Tagged(),
INSERT_TRANSITION).ToHandleChecked();
CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
}
tmp_map = Map::CopyWithField(tmp_map, MakeString("dbl"), any_type, NONE,
Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
// Check that descriptors are shared.
CHECK(tmp_map->owns_descriptors());
CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
}
CHECK(map->layout_descriptor()->IsSlowLayout());
}
TEST(DoScavenge) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@ -1225,7 +1311,7 @@ TEST(LayoutDescriptorSharing) {
}
Handle<LayoutDescriptor> split_layout_descriptor(
split_map->layout_descriptor(), isolate);
CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map));
CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map, true));
CHECK(split_layout_descriptor->IsSlowLayout());
CHECK(split_map->owns_descriptors());
@ -1238,7 +1324,7 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should be shared with |split_map|.
CHECK(map1->owns_descriptors());
CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor());
CHECK(map1->layout_descriptor()->IsConsistentWithMap(*map1));
CHECK(map1->layout_descriptor()->IsConsistentWithMap(*map1, true));
Handle<Map> map2 = Map::CopyWithField(split_map, MakeString("bar"), any_type,
NONE, Representation::Tagged(),
@ -1247,7 +1333,7 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should not be shared with |split_map|.
CHECK(map2->owns_descriptors());
CHECK_NE(*split_layout_descriptor, map2->layout_descriptor());
CHECK(map2->layout_descriptor()->IsConsistentWithMap(*map2));
CHECK(map2->layout_descriptor()->IsConsistentWithMap(*map2, true));
}

View File

@ -0,0 +1,53 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --expose-gc
function f() {
this.foo00 = 0;
this.foo01 = 0;
this.foo02 = 0;
this.foo03 = 0;
this.foo04 = 0;
this.foo05 = 0;
this.foo06 = 0;
this.foo07 = 0;
this.foo08 = 0;
this.foo09 = 0;
this.foo0a = 0;
this.foo0b = 0;
this.foo0c = 0;
this.foo0d = 0;
this.foo0e = 0;
this.foo0f = 0;
this.foo10 = 0;
this.foo11 = 0;
this.foo12 = 0;
this.foo13 = 0;
this.foo14 = 0;
this.foo15 = 0;
this.foo16 = 0;
this.foo17 = 0;
this.foo18 = 0;
this.foo19 = 0;
this.foo1a = 0;
this.foo1b = 0;
this.foo1c = 0;
this.foo1d = 0;
this.foo1e = 0;
this.foo1f = 0;
this.d = 1.3;
gc();
this.boom = 230;
this.boom = 1.4;
}
function g() {
return new f();
}
g();
g();
var o = g();
assertEquals(0, o.foo00);
assertEquals(1.4, o.boom);