diff --git a/src/x87/stub-cache-x87.cc b/src/x87/stub-cache-x87.cc index 7e31b0bc3e..fee4871fde 100644 --- a/src/x87/stub-cache-x87.cc +++ b/src/x87/stub-cache-x87.cc @@ -660,12 +660,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, // checks. ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - int index = lookup->GetFieldIndex().field_index(); - - // Adjust for the number of properties stored in the object. Even in the - // face of a transition we can use the old map here because the size of the - // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); + FieldIndex index = lookup->GetFieldIndex(); Representation representation = lookup->representation(); ASSERT(!representation.IsNone()); @@ -690,13 +685,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, } } else if (representation.IsDouble()) { // Load the double storage. - if (index < 0) { - int offset = object->map()->instance_size() + (index * kPointerSize); - __ mov(scratch1, FieldOperand(receiver_reg, offset)); + if (index.is_inobject()) { + __ mov(scratch1, FieldOperand(receiver_reg, index.offset())); } else { __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ mov(scratch1, FieldOperand(scratch1, offset)); + __ mov(scratch1, FieldOperand(scratch1, index.offset())); } // Store the value into the storage. @@ -724,17 +717,16 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, // TODO(verwaest): Share this code as a code stub. SmiCheck smi_check = representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; - if (index < 0) { + if (index.is_inobject()) { // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); - __ mov(FieldOperand(receiver_reg, offset), value_reg); + __ mov(FieldOperand(receiver_reg, index.offset()), value_reg); if (!representation.IsSmi()) { // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. __ mov(name_reg, value_reg); __ RecordWriteField(receiver_reg, - offset, + index.offset(), name_reg, scratch1, EMIT_REMEMBERED_SET, @@ -742,17 +734,16 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, } } else { // Write to the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array (optimistically). __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ mov(FieldOperand(scratch1, offset), value_reg); + __ mov(FieldOperand(scratch1, index.offset()), value_reg); if (!representation.IsSmi()) { // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. __ mov(name_reg, value_reg); __ RecordWriteField(scratch1, - offset, + index.offset(), name_reg, receiver_reg, EMIT_REMEMBERED_SET, @@ -971,20 +962,14 @@ Register LoadStubCompiler::CallbackHandlerFrontend( void LoadStubCompiler::GenerateLoadField(Register reg, Handle holder, - PropertyIndex field, + FieldIndex field, Representation representation) { if (!reg.is(receiver())) __ mov(receiver(), reg); if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(isolate(), - field.is_inobject(holder), - field.translate(holder), - representation); + LoadFieldStub stub(isolate(), field); GenerateTailCall(masm(), stub.GetCode()); } else { - KeyedLoadFieldStub stub(isolate(), - field.is_inobject(holder), - field.translate(holder), - representation); + KeyedLoadFieldStub stub(isolate(), field); GenerateTailCall(masm(), stub.GetCode()); } }