From d7431f2c6ec010725a3ae6fd1b42d62c0f7d3f8f Mon Sep 17 00:00:00 2001 From: "plind44@gmail.com" Date: Fri, 7 Jun 2013 16:00:19 +0000 Subject: [PATCH] MIPS: Fill more branch delay slots. TEST= BUG= R=plind44@gmail.com Review URL: https://codereview.chromium.org/15967006 Patch from Dusan Milosavljevic . git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15011 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/mips/builtins-mips.cc | 16 ++++---- src/mips/code-stubs-mips.cc | 80 +++++++++++++++++++++---------------- src/mips/stub-cache-mips.cc | 75 ++++++++++++++-------------------- 3 files changed, 84 insertions(+), 87 deletions(-) diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc index 611c74f7f8..06273caf78 100644 --- a/src/mips/builtins-mips.cc +++ b/src/mips/builtins-mips.cc @@ -335,9 +335,9 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { call_generic_code); __ IncrementCounter(counters->array_function_native(), 1, a3, t0); // Set up return value, remove receiver from stack and return. - __ mov(v0, a2); __ Addu(sp, sp, Operand(kPointerSize)); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a2); // Check for one argument. Bail out if argument is not smi or if it is // negative. @@ -378,9 +378,9 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { __ IncrementCounter(counters->array_function_native(), 1, a2, t0); // Set up return value, remove receiver and argument from stack and return. - __ mov(v0, a3); __ Addu(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a3); // Handle construction of an array from a list of arguments. __ bind(&argc_two_or_more); @@ -434,8 +434,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { // a3: JSArray // sp[0]: receiver __ Addu(sp, sp, Operand(kPointerSize)); + __ Ret(USE_DELAY_SLOT); __ mov(v0, a3); - __ Ret(); __ bind(&has_non_smi_element); // Double values are handled by the runtime. @@ -1372,15 +1372,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Label with_tos_register, unknown_state; __ Branch(&with_tos_register, ne, t2, Operand(FullCodeGenerator::NO_REGISTERS)); + __ Ret(USE_DELAY_SLOT); + // Safe to fill delay slot Addu will emit one instruction. __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state. - __ Ret(); __ bind(&with_tos_register); __ lw(v0, MemOperand(sp, 1 * kPointerSize)); __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG)); + __ Ret(USE_DELAY_SLOT); + // Safe to fill delay slot Addu will emit one instruction. __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state. - __ Ret(); __ bind(&unknown_state); __ stop("no cases left"); diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index 1a00bc05e0..3d0577eb1e 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -348,8 +348,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Return result. The argument function info has been popped already. + __ Ret(USE_DELAY_SLOT); __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); - __ Ret(); __ bind(&check_optimized); @@ -969,9 +969,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { __ sw(scratch_, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); __ sll(scratch_, the_int_, 32 - shift_distance); + __ Ret(USE_DELAY_SLOT); __ sw(scratch_, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); - __ Ret(); __ bind(&max_negative_int); // The max negative int32 is stored as a positive number in the mantissa of @@ -983,9 +983,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { __ sw(scratch_, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); __ mov(scratch_, zero_reg); + __ Ret(USE_DELAY_SLOT); __ sw(scratch_, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); - __ Ret(); } @@ -1023,6 +1023,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); __ Branch(&return_equal, ne, a0, Operand(t2)); + ASSERT(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); if (cc == le) { // undefined <= undefined should fail. __ li(v0, Operand(GREATER)); @@ -1030,13 +1032,13 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, // undefined >= undefined should fail. __ li(v0, Operand(LESS)); } - __ Ret(); } } } __ bind(&return_equal); - + ASSERT(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); if (cc == less) { __ li(v0, Operand(GREATER)); // Things aren't less than themselves. } else if (cc == greater) { @@ -1044,7 +1046,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, } else { __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. } - __ Ret(); // For less and greater we don't have to check for NaN since the result of // x < x is false regardless. For the others here is some code to check @@ -1075,13 +1076,14 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, if (cc != eq) { // All-zero means Infinity means equal. __ Ret(eq, v0, Operand(zero_reg)); + ASSERT(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); if (cc == le) { __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. } else { __ li(v0, Operand(LESS)); // NaN >= NaN should fail. } } - __ Ret(); } // No fall through here. @@ -1456,12 +1458,14 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ bind(&nan); // NaN comparisons always fail. // Load whatever we need in v0 to make the comparison fail. + ASSERT(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); if (cc == lt || cc == le) { __ li(v0, Operand(GREATER)); } else { __ li(v0, Operand(LESS)); } - __ Ret(); + __ bind(¬_smis); // At this point we know we are dealing with two different objects, @@ -1725,6 +1729,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, if (mode_ == UNARY_OVERWRITE) { __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ Ret(USE_DELAY_SLOT); __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); } else { Label slow_allocate_heapnumber, heapnumber_allocated; @@ -1746,9 +1751,9 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); + __ Ret(USE_DELAY_SLOT); __ mov(v0, a1); } - __ Ret(); } @@ -1768,8 +1773,8 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ Branch(&try_float, lt, a2, Operand(zero_reg)); // Tag the result as a smi and we're done. + __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. __ SmiTag(v0, a1); - __ Ret(); // Try to store the result in a heap number. __ bind(&try_float); @@ -1968,8 +1973,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, // Check that the signed result fits in a Smi. __ Addu(scratch2, scratch1, Operand(0x40000000)); __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); + __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. __ SmiTag(v0, scratch1); - __ Ret(); } break; case Token::MOD: { @@ -1991,8 +1996,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, // Check that the signed result fits in a Smi. __ Addu(scratch1, scratch2, Operand(0x40000000)); __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); + __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. __ SmiTag(v0, scratch2); - __ Ret(); } break; case Token::BIT_OR: @@ -2026,8 +2031,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, __ And(scratch1, v0, Operand(0xc0000000)); __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); // Smi tag result. + __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. __ SmiTag(v0); - __ Ret(); break; case Token::SHL: // Remove tags from operands. @@ -2037,8 +2042,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, // Check that the signed result fits in a Smi. __ Addu(scratch2, scratch1, Operand(0x40000000)); __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); - __ SmiTag(v0, scratch1); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot. break; default: UNREACHABLE(); @@ -2240,8 +2245,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Check that the *signed* result fits in a smi. __ Addu(a3, a2, Operand(0x40000000)); __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); + __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. __ SmiTag(v0, a2); - __ Ret(); // Allocate new heap number for result. __ bind(&result_not_a_smi); @@ -2520,8 +2525,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(¬_zero); // Tag the result and return. - __ SmiTag(v0, scratch1); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ SmiTag(v0, scratch1); // SmiTag emits one instruction. } else { // DIV just falls through to allocating a heap number. } @@ -2538,9 +2543,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { scratch2, &call_runtime, mode_); + __ sdc1(f10, + FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + __ Ret(USE_DELAY_SLOT); __ mov(v0, heap_number_result); - __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); // A DIV operation expecting an integer result falls through // to type transition. @@ -2660,8 +2666,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // If not try to return a heap number. (We know the result is an int32.) __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); // Tag the result and return. + __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. __ SmiTag(v0, a2); - __ Ret(); __ bind(&return_heap_number); heap_number_result = t1; @@ -2684,9 +2690,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Store the result. + __ sdc1(double_scratch, + FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + __ Ret(USE_DELAY_SLOT); __ mov(v0, heap_number_result); - __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); break; } @@ -4124,8 +4131,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { __ subu(a3, a0, a1); __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); __ Addu(a3, fp, Operand(t3)); + __ Ret(USE_DELAY_SLOT); __ lw(v0, MemOperand(a3, kDisplacement)); - __ Ret(); // Arguments adaptor case: Check index (a1) against actual arguments // limit found in the arguments adaptor frame. Use unsigned @@ -4138,8 +4145,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { __ subu(a3, a0, a1); __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); __ Addu(a3, a2, Operand(t3)); + __ Ret(USE_DELAY_SLOT); __ lw(v0, MemOperand(a3, kDisplacement)); - __ Ret(); // Slow-case: Handle non-smi or out-of-bounds access to arguments // by calling the runtime system. @@ -6002,16 +6009,18 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); __ Branch(&check_zero_length, eq, length, Operand(scratch2)); __ bind(&strings_not_equal); + ASSERT(is_int16(NOT_EQUAL)); + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); - __ Ret(); // Check if the length is zero. Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); __ Branch(&compare_chars, ne, length, Operand(zero_reg)); + ASSERT(is_int16(EQUAL)); + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(EQUAL))); - __ Ret(); // Compare characters. __ bind(&compare_chars); @@ -6021,8 +6030,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, &strings_not_equal); // Characters are equal. + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(EQUAL))); - __ Ret(); } @@ -6540,14 +6549,15 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { if (GetCondition() == eq) { // For equality we do not care about the sign of the result. + __ Ret(USE_DELAY_SLOT); __ Subu(v0, a0, a1); } else { // Untag before subtracting to avoid handling overflow. __ SmiUntag(a1); __ SmiUntag(a0); + __ Ret(USE_DELAY_SLOT); __ Subu(v0, a1, a0); } - __ Ret(); __ bind(&miss); GenerateMiss(masm); @@ -6608,16 +6618,17 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ BranchF(&fpu_lt, NULL, lt, f0, f2); // Otherwise it's greater, so just fall thru, and return. + ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(GREATER)); - __ Ret(); __ bind(&fpu_eq); + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(EQUAL)); - __ Ret(); __ bind(&fpu_lt); + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(LESS)); - __ Ret(); __ bind(&unordered); __ bind(&generic_stub); @@ -6676,8 +6687,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { __ mov(v0, right); // Internalized strings are compared by identity. __ Ret(ne, left, Operand(right)); + ASSERT(is_int16(EQUAL)); + __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(EQUAL))); - __ Ret(); __ bind(&miss); GenerateMiss(masm); @@ -7561,8 +7573,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { } masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); __ sll(a1, a1, kPointerSizeLog2); + __ Ret(USE_DELAY_SLOT); __ Addu(sp, sp, a1); - __ Ret(); } diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc index 63fde97118..b2c78a38d6 100644 --- a/src/mips/stub-cache-mips.cc +++ b/src/mips/stub-cache-mips.cc @@ -337,8 +337,8 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE)); // Load length directly from the JS array. + __ Ret(USE_DELAY_SLOT); __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Ret(); } @@ -384,8 +384,8 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, support_wrappers ? &check_wrapper : miss); // Load length directly from the string. + __ Ret(USE_DELAY_SLOT); __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset)); - __ Ret(); if (support_wrappers) { // Check if the object is a JSValue wrapper. @@ -395,8 +395,8 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, // Unwrap the value and check if the wrapped value is a string. __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); + __ Ret(USE_DELAY_SLOT); __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset)); - __ Ret(); } } @@ -407,8 +407,8 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); + __ Ret(USE_DELAY_SLOT); __ mov(v0, scratch1); - __ Ret(); } @@ -639,8 +639,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, // Return the value (register v0). ASSERT(value_reg.is(a0)); __ bind(&exit); + __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - __ Ret(); } @@ -715,8 +715,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); // Return the value (register v0). ASSERT(value_reg.is(a0)); + __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - __ Ret(); return; } @@ -773,8 +773,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Return the value (register v0). ASSERT(value_reg.is(a0)); __ bind(&exit); + __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - __ Ret(); } @@ -1706,8 +1706,7 @@ Handle CallStubCompiler::CompileArrayPushCall( if (argc == 0) { // Nothing to do, just return the length. __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); } else { Label call_builtin; if (argc == 1) { // Otherwise fall through to call the builtin. @@ -1755,8 +1754,7 @@ Handle CallStubCompiler::CompileArrayPushCall( __ sw(t0, MemOperand(end_elements)); // Check for a smi. - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); __ bind(&check_double); @@ -1788,8 +1786,7 @@ Handle CallStubCompiler::CompileArrayPushCall( __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Check for a smi. - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); __ bind(&with_write_barrier); @@ -1855,8 +1852,7 @@ Handle CallStubCompiler::CompileArrayPushCall( kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); __ bind(&attempt_to_grow_elements); // v0: array's length + 1. @@ -1911,8 +1907,7 @@ Handle CallStubCompiler::CompileArrayPushCall( __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); // Elements are in new space, so write barrier is not required. - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); } __ bind(&call_builtin); __ TailCallExternalReference( @@ -1991,13 +1986,11 @@ Handle CallStubCompiler::CompileArrayPopCall( // Fill with the hole. __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize)); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); __ bind(&return_undefined); __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); __ bind(&call_builtin); __ TailCallExternalReference( @@ -2072,8 +2065,7 @@ Handle CallStubCompiler::CompileStringCharCodeAtCall( index_out_of_range_label, STRING_INDEX_IS_NUMBER); generator.GenerateFast(masm()); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); StubRuntimeCallHelper call_helper; generator.GenerateSlow(masm(), call_helper); @@ -2081,8 +2073,7 @@ Handle CallStubCompiler::CompileStringCharCodeAtCall( if (index_out_of_range.is_linked()) { __ bind(&index_out_of_range); __ LoadRoot(v0, Heap::kNanValueRootIndex); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); } __ bind(&miss); @@ -2155,8 +2146,7 @@ Handle CallStubCompiler::CompileStringCharAtCall( index_out_of_range_label, STRING_INDEX_IS_NUMBER); generator.GenerateFast(masm()); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); StubRuntimeCallHelper call_helper; generator.GenerateSlow(masm(), call_helper); @@ -2164,8 +2154,7 @@ Handle CallStubCompiler::CompileStringCharAtCall( if (index_out_of_range.is_linked()) { __ bind(&index_out_of_range); __ LoadRoot(v0, Heap::kempty_stringRootIndex); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); } __ bind(&miss); @@ -2231,8 +2220,7 @@ Handle CallStubCompiler::CompileStringFromCharCodeCall( StringCharFromCodeGenerator generator(code, v0); generator.GenerateFast(masm()); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); StubRuntimeCallHelper call_helper; generator.GenerateSlow(masm(), call_helper); @@ -2295,8 +2283,7 @@ Handle CallStubCompiler::CompileMathFloorCall( // If the argument is a smi, just return. STATIC_ASSERT(kSmiTag == 0); __ And(t0, v0, Operand(kSmiTagMask)); - __ Drop(argc + 1, eq, t0, Operand(zero_reg)); - __ Ret(eq, t0, Operand(zero_reg)); + __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg)); __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); @@ -2361,8 +2348,7 @@ Handle CallStubCompiler::CompileMathFloorCall( // Restore FCSR and return. __ ctc1(a3, FCSR); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); __ bind(&wont_fit_smi); // Restore FCSR and fall to slow case. @@ -2441,8 +2427,7 @@ Handle CallStubCompiler::CompileMathAbsCall( __ Branch(&slow, lt, v0, Operand(zero_reg)); // Smi case done. - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); // Check if the argument is a heap number and load its exponent and // sign. @@ -2455,8 +2440,7 @@ Handle CallStubCompiler::CompileMathAbsCall( Label negative_sign; __ And(t0, a1, Operand(HeapNumber::kSignMask)); __ Branch(&negative_sign, ne, t0, Operand(zero_reg)); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); // If the argument is negative, clear the sign, and return a new // number. @@ -2467,8 +2451,7 @@ Handle CallStubCompiler::CompileMathAbsCall( __ AllocateHeapNumber(v0, t0, t1, t2, &slow); __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset)); __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); - __ Drop(argc + 1); - __ Ret(); + __ DropAndRet(argc + 1); // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. @@ -3066,8 +3049,8 @@ Handle LoadStubCompiler::CompileLoadGlobal( Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); + __ Ret(USE_DELAY_SLOT); __ mov(v0, t0); - __ Ret(); // Return the generated code. return GetICCode(kind(), Code::NORMAL, name); @@ -3338,8 +3321,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } // Entry registers are intact, a0 holds the value which is the return value. + __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - __ Ret(); if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { // a3: external array. @@ -3406,8 +3389,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // Entry registers are intact, a0 holds the value // which is the return value. + __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - __ Ret(); } // Slow case, key and receiver still in a0 and a1. @@ -3568,8 +3551,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Increment the length of the array. __ li(length_reg, Operand(Smi::FromInt(1))); + __ Ret(USE_DELAY_SLOT); __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); - __ Ret(); __ bind(&check_capacity); // Check for cow elements, in general they are not handled by this stub @@ -3733,9 +3716,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Increment the length of the array. __ li(length_reg, Operand(Smi::FromInt(1))); __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ Ret(USE_DELAY_SLOT); __ lw(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ Ret(); __ bind(&check_capacity); // Make sure that the backing store can hold additional elements.