From 75c7565d4b342db02759bdadf3a172cc803b68ee Mon Sep 17 00:00:00 2001 From: "yangguo@chromium.org" Date: Fri, 21 Oct 2011 18:40:36 +0000 Subject: [PATCH] MIPS: Porting r9605 to arm (elements kind conversion in generated code). Port r9690 (857eacf) BUG= TEST= Review URL: http://codereview.chromium.org/8366031 Patch from Paul Lind . git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9745 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/mips/code-stubs-mips.cc | 7 + src/mips/codegen-mips.cc | 257 +++++++++++++++++++++++++++++++ src/mips/codegen-mips.h | 1 - src/mips/ic-mips.cc | 41 +++++ src/mips/macro-assembler-mips.cc | 1 + src/mips/stub-cache-mips.cc | 6 +- 6 files changed, 309 insertions(+), 4 deletions(-) diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index 7ed4f8915a..8d98dc85c4 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -7216,6 +7216,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { { a3, a1, a2, EMIT_REMEMBERED_SET }, // KeyedStoreStubCompiler::GenerateStoreFastElement. { t0, a2, a3, EMIT_REMEMBERED_SET }, + // ElementsTransitionGenerator::GenerateSmiOnlyToObject + // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // and ElementsTransitionGenerator::GenerateDoubleToObject + { a2, a3, t5, EMIT_REMEMBERED_SET }, + // ElementsTransitionGenerator::GenerateDoubleToObject + { t2, a2, a0, EMIT_REMEMBERED_SET }, + { a2, t2, t5, EMIT_REMEMBERED_SET }, // Null termination. { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} }; diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc index ff146dd4ed..e9fe2324ea 100644 --- a/src/mips/codegen-mips.cc +++ b/src/mips/codegen-mips.cc @@ -30,10 +30,13 @@ #if defined(V8_TARGET_ARCH_MIPS) #include "codegen.h" +#include "macro-assembler.h" namespace v8 { namespace internal { +#define __ ACCESS_MASM(masm) + // ------------------------------------------------------------------------- // Platform-specific RuntimeCallHelper functions. @@ -50,6 +53,260 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->set_has_frame(false); } +// ------------------------------------------------------------------------- +// Code generators + +void ElementsTransitionGenerator::GenerateSmiOnlyToObject( + MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : value + // -- a1 : key + // -- a2 : receiver + // -- ra : return address + // -- a3 : target map, scratch for subsequent call + // -- t0 : scratch (elements) + // ----------------------------------- + // Set transitioned map. + __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ RecordWriteField(a2, + HeapObject::kMapOffset, + a3, + t5, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( + MacroAssembler* masm, Label* fail) { + // ----------- S t a t e ------------- + // -- a0 : value + // -- a1 : key + // -- a2 : receiver + // -- ra : return address + // -- a3 : target map, scratch for subsequent call + // -- t0 : scratch (elements) + // ----------------------------------- + Label loop, entry, convert_hole, gc_required; + bool fpu_supported = CpuFeatures::IsSupported(FPU); + __ push(ra); + + Register scratch = t6; + + __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); + // t0: source FixedArray + // t1: number of elements (smi-tagged) + + // Allocate new FixedDoubleArray. + __ sll(scratch, t1, 2); + __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); + __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); + // t2: destination FixedDoubleArray, not tagged as heap object + __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex); + __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); + // Set destination FixedDoubleArray's length. + __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); + // Update receiver's map. + + __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ RecordWriteField(a2, + HeapObject::kMapOffset, + a3, + t5, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + // Replace receiver's backing store with newly created FixedDoubleArray. + __ Addu(a3, t2, Operand(kHeapObjectTag)); + __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ RecordWriteField(a2, + JSObject::kElementsOffset, + a3, + t5, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + + // Prepare for conversion loop. + __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize)); + __ sll(t2, t1, 2); + __ Addu(t2, t2, t3); + __ li(t0, Operand(kHoleNanLower32)); + __ li(t1, Operand(kHoleNanUpper32)); + // t0: kHoleNanLower32 + // t1: kHoleNanUpper32 + // t2: end of destination FixedDoubleArray, not tagged + // t3: begin of FixedDoubleArray element fields, not tagged + + if (!fpu_supported) __ Push(a1, a0); + + __ Branch(&entry); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ pop(ra); + __ Branch(fail); + + // Convert and copy elements. + __ bind(&loop); + __ lw(t5, MemOperand(a3)); + __ Addu(a3, a3, kIntSize); + // t5: current element + __ JumpIfNotSmi(t5, &convert_hole); + + // Normal smi, convert to double and store. + __ SmiUntag(t5); + if (fpu_supported) { + CpuFeatures::Scope scope(FPU); + __ mtc1(t5, f0); + __ cvt_d_w(f0, f0); + __ sdc1(f0, MemOperand(t3)); + __ Addu(t3, t3, kDoubleSize); + } else { + FloatingPointHelper::ConvertIntToDouble(masm, + t5, + FloatingPointHelper::kCoreRegisters, + f0, + a0, + a1, + t7, + f0); + __ sw(a0, MemOperand(t3)); // mantissa + __ sw(a1, MemOperand(t3, kIntSize)); // exponent + __ Addu(t3, t3, kDoubleSize); + } + __ Branch(&entry); + + // Hole found, store the-hole NaN. + __ bind(&convert_hole); + __ sw(t0, MemOperand(t3)); // mantissa + __ sw(t1, MemOperand(t3, kIntSize)); // exponent + __ Addu(t3, t3, kDoubleSize); + + __ bind(&entry); + __ Branch(&loop, lt, t3, Operand(t2)); + + if (!fpu_supported) __ Pop(a1, a0); + __ pop(ra); +} + + +void ElementsTransitionGenerator::GenerateDoubleToObject( + MacroAssembler* masm, Label* fail) { + // ----------- S t a t e ------------- + // -- a0 : value + // -- a1 : key + // -- a2 : receiver + // -- ra : return address + // -- a3 : target map, scratch for subsequent call + // -- t0 : scratch (elements) + // ----------------------------------- + Label entry, loop, convert_hole, gc_required; + __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); + + __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); + // t0: source FixedArray + // t1: number of elements (smi-tagged) + + // Allocate new FixedArray. + __ sll(a0, t1, 1); + __ Addu(a0, a0, FixedDoubleArray::kHeaderSize); + __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); + // t2: destination FixedArray, not tagged as heap object + __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex); + __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); + // Set destination FixedDoubleArray's length. + __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); + + // Prepare for conversion loop. + __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); + __ Addu(a3, t2, Operand(FixedArray::kHeaderSize)); + __ Addu(t2, t2, Operand(kHeapObjectTag)); + __ sll(t1, t1, 1); + __ Addu(t1, a3, t1); + __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); + __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex); + // Using offsetted addresses. + // a3: begin of destination FixedArray element fields, not tagged + // t0: begin of source FixedDoubleArray element fields, not tagged, +4 + // t1: end of destination FixedArray, not tagged + // t2: destination FixedArray + // t3: the-hole pointer + // t5: heap number map + __ Branch(&entry); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); + + __ Branch(fail); + + __ bind(&loop); + __ lw(a1, MemOperand(t0)); + __ Addu(t0, t0, kDoubleSize); + // a1: current element's upper 32 bit + // t0: address of next element's upper 32 bit + __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); + + // Non-hole double, copy value into a heap number. + __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required); + // a2: new heap number + __ lw(a0, MemOperand(t0, -12)); + __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); + __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); + __ mov(a0, a3); + __ sw(a2, MemOperand(a3)); + __ Addu(a3, a3, kIntSize); + __ RecordWrite(t2, + a0, + a2, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Branch(&entry); + + // Replace the-hole NaN with the-hole pointer. + __ bind(&convert_hole); + __ sw(t3, MemOperand(a3)); + __ Addu(a3, a3, kIntSize); + + __ bind(&entry); + __ Branch(&loop, lt, a3, Operand(t1)); + + __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); + // Update receiver's map. + __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ RecordWriteField(a2, + HeapObject::kMapOffset, + a3, + t5, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + // Replace receiver's backing store with newly created and filled FixedArray. + __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ RecordWriteField(a2, + JSObject::kElementsOffset, + t2, + t5, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ pop(ra); +} + +#undef __ } } // namespace v8::internal diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h index b020d80575..4549509f30 100644 --- a/src/mips/codegen-mips.h +++ b/src/mips/codegen-mips.h @@ -31,7 +31,6 @@ #include "ast.h" -#include "code-stubs-mips.h" #include "ic-inl.h" namespace v8 { diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc index 42c45839b5..afa2ccf9b8 100644 --- a/src/mips/ic-mips.cc +++ b/src/mips/ic-mips.cc @@ -1399,6 +1399,47 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { } +void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { + // ---------- S t a t e -------------- + // -- a2 : receiver + // -- a3 : target map + // -- ra : return address + // ----------------------------------- + // Must return the modified receiver in v0. + if (!FLAG_trace_elements_transitions) { + Label fail; + ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a2); + __ bind(&fail); + } + + __ push(a2); + __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1); +} + + +void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( + MacroAssembler* masm) { + // ---------- S t a t e -------------- + // -- a2 : receiver + // -- a3 : target map + // -- ra : return address + // ----------------------------------- + // Must return the modified receiver in v0. + if (!FLAG_trace_elements_transitions) { + Label fail; + ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a2); + __ bind(&fail); + } + + __ push(a2); + __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1); +} + + void StoreIC::GenerateMegamorphic(MacroAssembler* masm, StrictModeFlag strict_mode) { // ----------- S t a t e ------------- diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index b627cad3a8..32dce660d8 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -2873,6 +2873,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, ASSERT(!result.is(scratch1)); ASSERT(!result.is(scratch2)); ASSERT(!scratch1.is(scratch2)); + ASSERT(!object_size.is(t9)); ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); // Check relative positions of allocation top and limit addresses. diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc index 1deaaa4ec7..9ca9cd4bfb 100644 --- a/src/mips/stub-cache-mips.cc +++ b/src/mips/stub-cache-mips.cc @@ -1640,7 +1640,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ bind(&with_write_barrier); __ lw(t2, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastSmiOnlyElements(t2, t2, &call_builtin); + __ CheckFastObjectElements(t2, t2, &call_builtin); // Save new length. __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -3293,8 +3293,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map)); } else { Label next_map; - __ Branch(&next_map, eq, a3, Operand(map)); - __ li(t0, Operand(Handle(transitioned_maps->at(i)))); + __ Branch(&next_map, ne, a3, Operand(map)); + __ li(a3, Operand(Handle(transitioned_maps->at(i)))); __ Jump(code, RelocInfo::CODE_TARGET); __ bind(&next_map); }