Swap bitfield3 and backpointer.

Bitfield3 now has its own field, while the backpointer shares the field with the descriptor array; which will become the transition array.

BUG=
TEST=

Review URL: https://chromiumcodereview.appspot.com/10692130

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
verwaest@chromium.org 2012-07-10 13:31:36 +00:00
parent 1c62d72a3c
commit 45419ee145
15 changed files with 150 additions and 156 deletions

View File

@ -1141,7 +1141,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a map in register r0. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(r0, r1);
__ LoadInstanceDescriptors(r0, r1, r2);
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
@ -2737,7 +2737,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Look for valueOf symbol in the descriptor array, and indicate false if
// found. The type is not checked, so if it is a transition it is a false
// negative.
__ LoadInstanceDescriptors(r1, r4);
__ LoadInstanceDescriptors(r1, r4, r3);
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: descriptor array
// r3: length of descriptor array

View File

@ -2381,8 +2381,9 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
LOperand* scratch = TempRegister();
return AssignEnvironment(DefineAsRegister(
new(zone()) LForInCacheArray(map)));
new(zone()) LForInCacheArray(map, scratch)));
}

View File

@ -2177,13 +2177,15 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
class LForInCacheArray: public LTemplateInstruction<1, 1, 1> {
public:
explicit LForInCacheArray(LOperand* map) {
explicit LForInCacheArray(LOperand* map, LOperand* scratch) {
inputs_[0] = map;
temps_[0] = scratch;
}
LOperand* map() { return inputs_[0]; }
LOperand* scratch() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")

View File

@ -5357,7 +5357,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
Register scratch = ToRegister(instr->scratch());
__ LoadInstanceDescriptors(map, result, scratch);
__ ldr(result,
FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
__ ldr(result,

View File

@ -3673,13 +3673,21 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
Register descriptors,
Register scratch) {
ldr(descriptors,
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, &not_smi);
FieldMemOperand(map, Map::kInstanceDescriptorsOrBackPointerOffset));
Label ok, fail;
CheckMap(descriptors,
scratch,
isolate()->factory()->fixed_array_map(),
&fail,
DONT_DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
bind(&not_smi);
bind(&ok);
}
@ -3704,8 +3712,13 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// check for an enum cache. Leave the map in r2 for the subsequent
// prototype load.
ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(r3, call_runtime);
ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBackPointerOffset));
CheckMap(r3,
r7,
isolate()->factory()->fixed_array_map(),
call_runtime,
DONT_DO_SMI_CHECK);
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration

View File

@ -1268,7 +1268,9 @@ class MacroAssembler: public Assembler {
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
void LoadInstanceDescriptors(Register map,
Register descriptors,
Register scratch);
// Activation support.
void EnterFrame(StackFrame::Type type);

View File

@ -2058,6 +2058,7 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
reinterpret_cast<Map*>(result)->set_bit_field3(0);
return result;
}
@ -2080,12 +2081,12 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
map->set_bit_field3(0);
map->set_elements_kind(elements_kind);
// If the map object is aligned fill the padding area with Smi 0 objects.
@ -2219,15 +2220,12 @@ bool Heap::CreateInitialMaps() {
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->init_back_pointer(undefined_value());
fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->init_back_pointer(undefined_value());
oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->init_back_pointer(undefined_value());
@ -4191,7 +4189,6 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Set up the global object as a normalized object.
global->set_map(new_map);
global->map()->clear_instance_descriptors();
global->set_properties(dictionary);
// Make sure result is a global object with properties in dictionary.

View File

@ -2518,12 +2518,18 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors,
FieldOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, &not_smi);
mov(descriptors, FieldOperand(map,
Map::kInstanceDescriptorsOrBackPointerOffset));
Label ok, fail;
CheckMap(descriptors,
isolate()->factory()->fixed_array_map(),
&fail,
DONT_DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
mov(descriptors, isolate()->factory()->empty_descriptor_array());
bind(&not_smi);
bind(&ok);
}
@ -2886,8 +2892,11 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// check for an enum cache. Leave the map in ebx for the subsequent
// prototype load.
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(edx, call_runtime);
mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBackPointerOffset));
CheckMap(edx,
isolate()->factory()->fixed_array_map(),
call_runtime,
DONT_DO_SMI_CHECK);
// Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration

View File

@ -1829,20 +1829,23 @@ void Marker<T>::MarkMapContents(Map* map) {
base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
Object** descriptor_array_slot =
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBackPointerOffset);
Object* descriptor_array = *descriptor_array_slot;
if (!descriptor_array->IsSmi()) {
if (descriptor_array->IsDescriptorArray()) {
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
} else {
// Already marked by marking map->GetBackPointer().
ASSERT(descriptor_array->IsMap() || descriptor_array->IsUndefined());
}
// Mark the Object* fields of the Map. Since the descriptor array has been
// marked already, it is fine that one of these fields contains a pointer
// to it. But make sure to skip back pointer.
STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
Map::kBackPointerOffset + kPointerSize);
Map::kBitField3Offset + kPointerSize);
Object** start_slot =
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(map, Map::kBackPointerOffset);
Object** end_slot = HeapObject::RawField(map, Map::kBitField3Offset);
for (Object** slot = start_slot; slot < end_slot; slot++) {
Object* obj = *slot;
if (!obj->NonFailureIsHeapObject()) continue;

View File

@ -1896,14 +1896,17 @@ bool DescriptorArray::HasTransitionArray() {
}
int DescriptorArray::bit_field3_storage() {
Object* storage = READ_FIELD(this, kBitField3StorageOffset);
return Smi::cast(storage)->value();
Object* DescriptorArray::back_pointer_storage() {
return READ_FIELD(this, kBackPointerStorageOffset);
}
void DescriptorArray::set_bit_field3_storage(int value) {
ASSERT(length() > kBitField3StorageIndex);
WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
void DescriptorArray::set_back_pointer_storage(Object* value,
WriteBarrierMode mode) {
ASSERT(length() > kBackPointerStorageIndex);
Heap* heap = GetHeap();
WRITE_FIELD(this, kBackPointerStorageOffset, value);
CONDITIONAL_WRITE_BARRIER(heap, this, kBackPointerStorageOffset, value, mode);
}
@ -3437,8 +3440,9 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
DescriptorArray* Map::instance_descriptors() {
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
if (object->IsSmi()) {
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBackPointerOffset);
if (!object->IsDescriptorArray()) {
ASSERT(object->IsMap() || object->IsUndefined());
return GetHeap()->empty_descriptor_array();
} else {
return DescriptorArray::cast(object);
@ -3446,85 +3450,60 @@ DescriptorArray* Map::instance_descriptors() {
}
void Map::init_instance_descriptors() {
WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, Smi::FromInt(0));
}
void Map::clear_instance_descriptors() {
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
if (!object->IsSmi()) {
WRITE_FIELD(
this,
kInstanceDescriptorsOrBitField3Offset,
Smi::FromInt(DescriptorArray::cast(object)->bit_field3_storage()));
}
}
void Map::set_instance_descriptors(DescriptorArray* value,
WriteBarrierMode mode) {
Object* object = READ_FIELD(this,
kInstanceDescriptorsOrBitField3Offset);
Heap* heap = GetHeap();
if (value == heap->empty_descriptor_array()) {
clear_instance_descriptors();
ClearDescriptorArray(heap, mode);
return;
} else {
if (object->IsSmi()) {
value->set_bit_field3_storage(Smi::cast(object)->value());
} else {
value->set_bit_field3_storage(
DescriptorArray::cast(object)->bit_field3_storage());
}
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBackPointerOffset);
if (object->IsDescriptorArray()) {
value->set_back_pointer_storage(
DescriptorArray::cast(object)->back_pointer_storage());
} else {
ASSERT(object->IsMap() || object->IsUndefined());
value->set_back_pointer_storage(object);
}
ASSERT(!is_shared());
WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
WRITE_FIELD(this, kInstanceDescriptorsOrBackPointerOffset, value);
CONDITIONAL_WRITE_BARRIER(
heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
heap, this, kInstanceDescriptorsOrBackPointerOffset, value, mode);
}
int Map::bit_field3() {
Object* object = READ_FIELD(this,
kInstanceDescriptorsOrBitField3Offset);
if (object->IsSmi()) {
return Smi::cast(object)->value();
} else {
return DescriptorArray::cast(object)->bit_field3_storage();
}
}
SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
void Map::ClearDescriptorArray() {
int bitfield3 = bit_field3();
void Map::ClearDescriptorArray(Heap* heap, WriteBarrierMode mode) {
Object* back_pointer = GetBackPointer();
#ifdef DEBUG
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
if (!object->IsSmi()) {
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBackPointerOffset);
if (object->IsDescriptorArray()) {
ZapTransitions();
} else {
ASSERT(object->IsMap() || object->IsUndefined());
}
#endif
WRITE_FIELD(this,
kInstanceDescriptorsOrBitField3Offset,
Smi::FromInt(bitfield3));
WRITE_FIELD(this, kInstanceDescriptorsOrBackPointerOffset, back_pointer);
CONDITIONAL_WRITE_BARRIER(
heap, this, kInstanceDescriptorsOrBackPointerOffset, back_pointer, mode);
}
void Map::set_bit_field3(int value) {
ASSERT(Smi::IsValid(value));
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
if (object->IsSmi()) {
WRITE_FIELD(this,
kInstanceDescriptorsOrBitField3Offset,
Smi::FromInt(value));
} else {
DescriptorArray::cast(object)->set_bit_field3_storage(value);
}
}
Object* Map::GetBackPointer() {
return READ_FIELD(this, kBackPointerOffset);
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBackPointerOffset);
if (object->IsDescriptorArray()) {
return DescriptorArray::cast(object)->back_pointer_storage();
} else {
ASSERT(object->IsMap() || object->IsUndefined());
return object;
}
}
@ -3549,8 +3528,8 @@ MaybeObject* Map::AddTransition(String* key, Object* value) {
}
// If the map does not have a descriptor array, install a new empty
// descriptor array that has room for a transition array.
// If the map is using the empty descriptor array, install a new empty
// descriptor array that will contain an elements transition.
static MaybeObject* AllowTransitions(Map* map) {
if (map->instance_descriptors()->MayContainTransitions()) return map;
DescriptorArray* descriptors;
@ -3619,13 +3598,13 @@ TransitionArray* Map::transitions() {
}
void Map::ClearTransitions() {
void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
#ifdef DEBUG
ZapTransitions();
#endif
DescriptorArray* descriptors = instance_descriptors();
if (descriptors->number_of_descriptors() == 0) {
ClearDescriptorArray();
ClearDescriptorArray(heap, mode);
} else {
descriptors->ClearTransitions();
}
@ -3648,17 +3627,22 @@ MaybeObject* Map::set_transitions(TransitionArray* transitions_array) {
void Map::init_back_pointer(Object* undefined) {
ASSERT(undefined->IsUndefined());
WRITE_FIELD(this, kBackPointerOffset, undefined);
WRITE_FIELD(this, kInstanceDescriptorsOrBackPointerOffset, undefined);
}
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
Heap* heap = GetHeap();
ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
(value->IsMap() && GetBackPointer()->IsUndefined()));
WRITE_FIELD(this, kBackPointerOffset, value);
CONDITIONAL_WRITE_BARRIER(heap, this, kBackPointerOffset, value, mode);
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBackPointerOffset);
if (object->IsMap()) {
WRITE_FIELD(this, kInstanceDescriptorsOrBackPointerOffset, value);
CONDITIONAL_WRITE_BARRIER(
GetHeap(), this, kInstanceDescriptorsOrBackPointerOffset, value, mode);
} else {
DescriptorArray::cast(object)->set_back_pointer_storage(value);
}
}

View File

@ -3411,9 +3411,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
-instance_size_delta);
}
set_map(new_map);
new_map->clear_instance_descriptors();
set_properties(dictionary);
@ -4927,12 +4925,7 @@ MaybeObject* Map::CopyDropDescriptors() {
}
Map::cast(result)->set_prototype(prototype());
Map::cast(result)->set_constructor(constructor());
// Don't copy descriptors, so map transitions always remain a forest.
// If we retained the same descriptors we would have two maps
// pointing to the same transition which is bad because the garbage
// collector relies on being able to reverse pointers from transitions
// to maps. If properties need to be retained use CopyDropTransitions.
Map::cast(result)->clear_instance_descriptors();
// Please note instance_type and instance_size are set when allocated.
Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields());
@ -5824,7 +5817,6 @@ MaybeObject* DescriptorArray::Allocate(int number_of_descriptors,
if (!maybe_array->To(&result)) return maybe_array;
}
result->set(kBitField3StorageIndex, Smi::FromInt(0));
result->set(kEnumerationIndexIndex,
Smi::FromInt(PropertyDetails::kInitialIndex));
result->set(kTransitionsIndex, Smi::FromInt(0));
@ -7413,7 +7405,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
if (transition_index == 0 &&
!t->HasElementsTransition() &&
!t->HasPrototypeTransitions()) {
return ClearTransitions();
return ClearTransitions(heap);
}
int trim = t->number_of_transitions() - transition_index;

View File

@ -2514,11 +2514,7 @@ class DescriptorArray: public FixedArray {
kTransitionsOffset);
}
// TODO(1399): It should be possible to make room for bit_field3 in the map
// without overloading the instance descriptors field in the map
// (and storing it in the DescriptorArray when the map has one).
inline int bit_field3_storage();
inline void set_bit_field3_storage(int value);
DECL_ACCESSORS(back_pointer_storage, Object)
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
@ -2607,7 +2603,7 @@ class DescriptorArray: public FixedArray {
// Constant for denoting key was not found.
static const int kNotFound = -1;
static const int kBitField3StorageIndex = 0;
static const int kBackPointerStorageIndex = 0;
static const int kEnumerationIndexIndex = 1;
static const int kTransitionsIndex = 2;
static const int kFirstIndex = 3;
@ -2619,8 +2615,8 @@ class DescriptorArray: public FixedArray {
static const int kEnumCacheBridgeIndicesCacheIndex = 2;
// Layout description.
static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
static const int kEnumerationIndexOffset = kBitField3StorageOffset +
static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
static const int kEnumerationIndexOffset = kBackPointerStorageOffset +
kPointerSize;
static const int kTransitionsOffset = kEnumerationIndexOffset + kPointerSize;
static const int kFirstOffset = kTransitionsOffset + kPointerSize;
@ -4676,12 +4672,8 @@ class Map: public HeapObject {
inline void set_bit_field2(byte value);
// Bit field 3.
// TODO(1399): It should be possible to make room for bit_field3 in the map
// without overloading the instance descriptors field (and storing it in the
// DescriptorArray when the map has one).
inline int bit_field3();
inline void set_bit_field3(int value);
inline void SetOwnBitField3(int value);
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@ -4812,7 +4804,8 @@ class Map: public HeapObject {
Object* value);
MUST_USE_RESULT inline MaybeObject* set_transitions(
TransitionArray* transitions);
inline void ClearTransitions();
inline void ClearTransitions(Heap* heap,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
@ -4839,20 +4832,12 @@ class Map: public HeapObject {
inline JSFunction* unchecked_constructor();
// Should only be called by the code that initializes map to set initial valid
// value of the instance descriptor member.
inline void init_instance_descriptors();
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
// Should only be called to clear a descriptor array that was only used to
// store transitions and does not contain any live transitions anymore.
inline void ClearDescriptorArray();
// Sets the instance descriptor array for the map to be an empty descriptor
// array.
inline void clear_instance_descriptors();
inline void ClearDescriptorArray(Heap* heap, WriteBarrierMode mode);
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
@ -5047,23 +5032,18 @@ class Map: public HeapObject {
// map flags when unused (bit_field3). When the map has instance descriptors,
// the flags are transferred to the instance descriptor array and accessed
// through an extra indirection.
// TODO(1399): It should be possible to make room for bit_field3 in the map
// without overloading the instance descriptors field, but the map is
// currently perfectly aligned to 32 bytes and extending it at all would
// double its size. After the increment GC work lands, this size restriction
// could be loosened and bit_field3 moved directly back in the map.
static const int kInstanceDescriptorsOrBitField3Offset =
static const int kInstanceDescriptorsOrBackPointerOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset =
kInstanceDescriptorsOrBitField3Offset + kPointerSize;
static const int kBackPointerOffset = kCodeCacheOffset + kPointerSize;
static const int kPadStart = kBackPointerOffset + kPointerSize;
kInstanceDescriptorsOrBackPointerOffset + kPointerSize;
static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
static const int kPadStart = kBitField3Offset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset = kBackPointerOffset + kPointerSize;
static const int kPointerFieldsEndOffset = kBitField3Offset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;

View File

@ -2011,7 +2011,7 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
TagObject(map->instance_descriptors(), "(map descriptors)");
SetInternalReference(map, entry,
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
Map::kInstanceDescriptorsOrBackPointerOffset);
}
SetInternalReference(map, entry,
"code_cache", map->code_cache(),

View File

@ -2190,13 +2190,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
DescriptorArray* new_descriptors =
DescriptorArray::cast(descriptors_unchecked);
// Create a new map featuring the new field descriptors array.
Object* map_unchecked;
{ MaybeObject* maybe_map_unchecked = function->map()->CopyDropDescriptors();
if (!maybe_map_unchecked->ToObject(&map_unchecked)) {
Map* new_map;
{ MaybeObject* maybe_map_unchecked =
function->map()->CopyDropDescriptors();
if (!maybe_map_unchecked->To(&new_map)) {
return maybe_map_unchecked;
}
}
Map* new_map = Map::cast(map_unchecked);
new_map->set_instance_descriptors(new_descriptors);
function->set_map(new_map);
} else { // Dictionary properties.

View File

@ -2853,11 +2853,17 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movq(descriptors, FieldOperand(map,
Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
Map::kInstanceDescriptorsOrBackPointerOffset));
Label ok, fail;
CheckMap(descriptors,
isolate()->factory()->fixed_array_map(),
&fail,
DONT_DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
Move(descriptors, isolate()->factory()->empty_descriptor_array());
bind(&not_smi);
bind(&ok);
}
@ -4457,8 +4463,12 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// check for an enum cache. Leave the map in rbx for the subsequent
// prototype load.
movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(rdx, call_runtime);
movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBackPointerOffset));
CheckMap(rdx,
isolate()->factory()->fixed_array_map(),
call_runtime,
DONT_DO_SMI_CHECK);
// Check that there is an enum cache in the non-empty instance
// descriptors (rdx). This is the case if the next enumeration