MIPS: Fill more branch delay slots.

TEST=
BUG=
R=plind44@gmail.com

Review URL: https://codereview.chromium.org/15967006

Patch from Dusan Milosavljevic <Dusan.Milosavljevic@rt-rk.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15011 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
plind44@gmail.com 2013-06-07 16:00:19 +00:00
parent 5160d982ac
commit d7431f2c6e
3 changed files with 84 additions and 87 deletions

View File

@ -335,9 +335,9 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
call_generic_code); call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a3, t0); __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
// Set up return value, remove receiver from stack and return. // Set up return value, remove receiver from stack and return.
__ mov(v0, a2);
__ Addu(sp, sp, Operand(kPointerSize)); __ Addu(sp, sp, Operand(kPointerSize));
__ Ret(); __ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
// Check for one argument. Bail out if argument is not smi or if it is // Check for one argument. Bail out if argument is not smi or if it is
// negative. // negative.
@ -378,9 +378,9 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
__ IncrementCounter(counters->array_function_native(), 1, a2, t0); __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
// Set up return value, remove receiver and argument from stack and return. // Set up return value, remove receiver and argument from stack and return.
__ mov(v0, a3);
__ Addu(sp, sp, Operand(2 * kPointerSize)); __ Addu(sp, sp, Operand(2 * kPointerSize));
__ Ret(); __ Ret(USE_DELAY_SLOT);
__ mov(v0, a3);
// Handle construction of an array from a list of arguments. // Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more); __ bind(&argc_two_or_more);
@ -434,8 +434,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
// a3: JSArray // a3: JSArray
// sp[0]: receiver // sp[0]: receiver
__ Addu(sp, sp, Operand(kPointerSize)); __ Addu(sp, sp, Operand(kPointerSize));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3); __ mov(v0, a3);
__ Ret();
__ bind(&has_non_smi_element); __ bind(&has_non_smi_element);
// Double values are handled by the runtime. // Double values are handled by the runtime.
@ -1372,15 +1372,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Label with_tos_register, unknown_state; Label with_tos_register, unknown_state;
__ Branch(&with_tos_register, __ Branch(&with_tos_register,
ne, t2, Operand(FullCodeGenerator::NO_REGISTERS)); ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state. __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register); __ bind(&with_tos_register);
__ lw(v0, MemOperand(sp, 1 * kPointerSize)); __ lw(v0, MemOperand(sp, 1 * kPointerSize));
__ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG)); __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state. __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&unknown_state); __ bind(&unknown_state);
__ stop("no cases left"); __ stop("no cases left");

View File

@ -348,8 +348,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Return result. The argument function info has been popped already. // Return result. The argument function info has been popped already.
__ Ret(USE_DELAY_SLOT);
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ Ret();
__ bind(&check_optimized); __ bind(&check_optimized);
@ -969,9 +969,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ sw(scratch_, FieldMemOperand(the_heap_number_, __ sw(scratch_, FieldMemOperand(the_heap_number_,
HeapNumber::kExponentOffset)); HeapNumber::kExponentOffset));
__ sll(scratch_, the_int_, 32 - shift_distance); __ sll(scratch_, the_int_, 32 - shift_distance);
__ Ret(USE_DELAY_SLOT);
__ sw(scratch_, FieldMemOperand(the_heap_number_, __ sw(scratch_, FieldMemOperand(the_heap_number_,
HeapNumber::kMantissaOffset)); HeapNumber::kMantissaOffset));
__ Ret();
__ bind(&max_negative_int); __ bind(&max_negative_int);
// The max negative int32 is stored as a positive number in the mantissa of // The max negative int32 is stored as a positive number in the mantissa of
@ -983,9 +983,9 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ sw(scratch_, __ sw(scratch_,
FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
__ mov(scratch_, zero_reg); __ mov(scratch_, zero_reg);
__ Ret(USE_DELAY_SLOT);
__ sw(scratch_, __ sw(scratch_,
FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
__ Ret();
} }
@ -1023,6 +1023,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
__ LoadRoot(t2, Heap::kUndefinedValueRootIndex); __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
__ Branch(&return_equal, ne, a0, Operand(t2)); __ Branch(&return_equal, ne, a0, Operand(t2));
ASSERT(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == le) { if (cc == le) {
// undefined <= undefined should fail. // undefined <= undefined should fail.
__ li(v0, Operand(GREATER)); __ li(v0, Operand(GREATER));
@ -1030,13 +1032,13 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// undefined >= undefined should fail. // undefined >= undefined should fail.
__ li(v0, Operand(LESS)); __ li(v0, Operand(LESS));
} }
__ Ret();
} }
} }
} }
__ bind(&return_equal); __ bind(&return_equal);
ASSERT(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == less) { if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves. __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) { } else if (cc == greater) {
@ -1044,7 +1046,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} else { } else {
__ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
} }
__ Ret();
// For less and greater we don't have to check for NaN since the result of // For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check // x < x is false regardless. For the others here is some code to check
@ -1075,13 +1076,14 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cc != eq) { if (cc != eq) {
// All-zero means Infinity means equal. // All-zero means Infinity means equal.
__ Ret(eq, v0, Operand(zero_reg)); __ Ret(eq, v0, Operand(zero_reg));
ASSERT(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == le) { if (cc == le) {
__ li(v0, Operand(GREATER)); // NaN <= NaN should fail. __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
} else { } else {
__ li(v0, Operand(LESS)); // NaN >= NaN should fail. __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
} }
} }
__ Ret();
} }
// No fall through here. // No fall through here.
@ -1456,12 +1458,14 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&nan); __ bind(&nan);
// NaN comparisons always fail. // NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail. // Load whatever we need in v0 to make the comparison fail.
ASSERT(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == lt || cc == le) { if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER)); __ li(v0, Operand(GREATER));
} else { } else {
__ li(v0, Operand(LESS)); __ li(v0, Operand(LESS));
} }
__ Ret();
__ bind(&not_smis); __ bind(&not_smis);
// At this point we know we are dealing with two different objects, // At this point we know we are dealing with two different objects,
@ -1725,6 +1729,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
if (mode_ == UNARY_OVERWRITE) { if (mode_ == UNARY_OVERWRITE) {
__ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
__ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ Ret(USE_DELAY_SLOT);
__ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
} else { } else {
Label slow_allocate_heapnumber, heapnumber_allocated; Label slow_allocate_heapnumber, heapnumber_allocated;
@ -1746,9 +1751,9 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
__ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a1); __ mov(v0, a1);
} }
__ Ret();
} }
@ -1768,8 +1773,8 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ Branch(&try_float, lt, a2, Operand(zero_reg)); __ Branch(&try_float, lt, a2, Operand(zero_reg));
// Tag the result as a smi and we're done. // Tag the result as a smi and we're done.
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a1); __ SmiTag(v0, a1);
__ Ret();
// Try to store the result in a heap number. // Try to store the result in a heap number.
__ bind(&try_float); __ bind(&try_float);
@ -1968,8 +1973,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
// Check that the signed result fits in a Smi. // Check that the signed result fits in a Smi.
__ Addu(scratch2, scratch1, Operand(0x40000000)); __ Addu(scratch2, scratch1, Operand(0x40000000));
__ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg)); __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, scratch1); __ SmiTag(v0, scratch1);
__ Ret();
} }
break; break;
case Token::MOD: { case Token::MOD: {
@ -1991,8 +1996,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
// Check that the signed result fits in a Smi. // Check that the signed result fits in a Smi.
__ Addu(scratch1, scratch2, Operand(0x40000000)); __ Addu(scratch1, scratch2, Operand(0x40000000));
__ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg)); __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, scratch2); __ SmiTag(v0, scratch2);
__ Ret();
} }
break; break;
case Token::BIT_OR: case Token::BIT_OR:
@ -2026,8 +2031,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ And(scratch1, v0, Operand(0xc0000000)); __ And(scratch1, v0, Operand(0xc0000000));
__ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg)); __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
// Smi tag result. // Smi tag result.
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0); __ SmiTag(v0);
__ Ret();
break; break;
case Token::SHL: case Token::SHL:
// Remove tags from operands. // Remove tags from operands.
@ -2037,8 +2042,8 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
// Check that the signed result fits in a Smi. // Check that the signed result fits in a Smi.
__ Addu(scratch2, scratch1, Operand(0x40000000)); __ Addu(scratch2, scratch1, Operand(0x40000000));
__ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg)); __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
__ SmiTag(v0, scratch1); __ Ret(USE_DELAY_SLOT);
__ Ret(); __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -2240,8 +2245,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Check that the *signed* result fits in a smi. // Check that the *signed* result fits in a smi.
__ Addu(a3, a2, Operand(0x40000000)); __ Addu(a3, a2, Operand(0x40000000));
__ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a2); __ SmiTag(v0, a2);
__ Ret();
// Allocate new heap number for result. // Allocate new heap number for result.
__ bind(&result_not_a_smi); __ bind(&result_not_a_smi);
@ -2520,8 +2525,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&not_zero); __ bind(&not_zero);
// Tag the result and return. // Tag the result and return.
__ SmiTag(v0, scratch1); __ Ret(USE_DELAY_SLOT);
__ Ret(); __ SmiTag(v0, scratch1); // SmiTag emits one instruction.
} else { } else {
// DIV just falls through to allocating a heap number. // DIV just falls through to allocating a heap number.
} }
@ -2538,9 +2543,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2, scratch2,
&call_runtime, &call_runtime,
mode_); mode_);
__ sdc1(f10,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, heap_number_result); __ mov(v0, heap_number_result);
__ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
// A DIV operation expecting an integer result falls through // A DIV operation expecting an integer result falls through
// to type transition. // to type transition.
@ -2660,8 +2666,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// If not try to return a heap number. (We know the result is an int32.) // If not try to return a heap number. (We know the result is an int32.)
__ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
// Tag the result and return. // Tag the result and return.
__ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
__ SmiTag(v0, a2); __ SmiTag(v0, a2);
__ Ret();
__ bind(&return_heap_number); __ bind(&return_heap_number);
heap_number_result = t1; heap_number_result = t1;
@ -2684,9 +2690,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
} }
// Store the result. // Store the result.
__ sdc1(double_scratch,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, heap_number_result); __ mov(v0, heap_number_result);
__ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
break; break;
} }
@ -4124,8 +4131,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ subu(a3, a0, a1); __ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, fp, Operand(t3)); __ Addu(a3, fp, Operand(t3));
__ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement)); __ lw(v0, MemOperand(a3, kDisplacement));
__ Ret();
// Arguments adaptor case: Check index (a1) against actual arguments // Arguments adaptor case: Check index (a1) against actual arguments
// limit found in the arguments adaptor frame. Use unsigned // limit found in the arguments adaptor frame. Use unsigned
@ -4138,8 +4145,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ subu(a3, a0, a1); __ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(t3)); __ Addu(a3, a2, Operand(t3));
__ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement)); __ lw(v0, MemOperand(a3, kDisplacement));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments // Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system. // by calling the runtime system.
@ -6002,16 +6009,18 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
__ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2)); __ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal); __ bind(&strings_not_equal);
ASSERT(is_int16(NOT_EQUAL));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
__ Ret();
// Check if the length is zero. // Check if the length is zero.
Label compare_chars; Label compare_chars;
__ bind(&check_zero_length); __ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_chars, ne, length, Operand(zero_reg)); __ Branch(&compare_chars, ne, length, Operand(zero_reg));
ASSERT(is_int16(EQUAL));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL))); __ li(v0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
// Compare characters. // Compare characters.
__ bind(&compare_chars); __ bind(&compare_chars);
@ -6021,8 +6030,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
&strings_not_equal); &strings_not_equal);
// Characters are equal. // Characters are equal.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL))); __ li(v0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
} }
@ -6540,14 +6549,15 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == eq) { if (GetCondition() == eq) {
// For equality we do not care about the sign of the result. // For equality we do not care about the sign of the result.
__ Ret(USE_DELAY_SLOT);
__ Subu(v0, a0, a1); __ Subu(v0, a0, a1);
} else { } else {
// Untag before subtracting to avoid handling overflow. // Untag before subtracting to avoid handling overflow.
__ SmiUntag(a1); __ SmiUntag(a1);
__ SmiUntag(a0); __ SmiUntag(a0);
__ Ret(USE_DELAY_SLOT);
__ Subu(v0, a1, a0); __ Subu(v0, a1, a0);
} }
__ Ret();
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);
@ -6608,16 +6618,17 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ BranchF(&fpu_lt, NULL, lt, f0, f2); __ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return. // Otherwise it's greater, so just fall thru, and return.
ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(GREATER)); __ li(v0, Operand(GREATER));
__ Ret();
__ bind(&fpu_eq); __ bind(&fpu_eq);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(EQUAL)); __ li(v0, Operand(EQUAL));
__ Ret();
__ bind(&fpu_lt); __ bind(&fpu_lt);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(LESS)); __ li(v0, Operand(LESS));
__ Ret();
__ bind(&unordered); __ bind(&unordered);
__ bind(&generic_stub); __ bind(&generic_stub);
@ -6676,8 +6687,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ mov(v0, right); __ mov(v0, right);
// Internalized strings are compared by identity. // Internalized strings are compared by identity.
__ Ret(ne, left, Operand(right)); __ Ret(ne, left, Operand(right));
ASSERT(is_int16(EQUAL));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL))); __ li(v0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);
@ -7561,8 +7573,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
} }
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ sll(a1, a1, kPointerSizeLog2); __ sll(a1, a1, kPointerSizeLog2);
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, a1); __ Addu(sp, sp, a1);
__ Ret();
} }

View File

@ -337,8 +337,8 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE)); __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
// Load length directly from the JS array. // Load length directly from the JS array.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Ret();
} }
@ -384,8 +384,8 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
support_wrappers ? &check_wrapper : miss); support_wrappers ? &check_wrapper : miss);
// Load length directly from the string. // Load length directly from the string.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset)); __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
if (support_wrappers) { if (support_wrappers) {
// Check if the object is a JSValue wrapper. // Check if the object is a JSValue wrapper.
@ -395,8 +395,8 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Unwrap the value and check if the wrapped value is a string. // Unwrap the value and check if the wrapped value is a string.
__ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset)); __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
__ Ret();
} }
} }
@ -407,8 +407,8 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch2, Register scratch2,
Label* miss_label) { Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, scratch1); __ mov(v0, scratch1);
__ Ret();
} }
@ -639,8 +639,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Return the value (register v0). // Return the value (register v0).
ASSERT(value_reg.is(a0)); ASSERT(value_reg.is(a0));
__ bind(&exit); __ bind(&exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
__ Ret();
} }
@ -715,8 +715,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
// Return the value (register v0). // Return the value (register v0).
ASSERT(value_reg.is(a0)); ASSERT(value_reg.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
__ Ret();
return; return;
} }
@ -773,8 +773,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Return the value (register v0). // Return the value (register v0).
ASSERT(value_reg.is(a0)); ASSERT(value_reg.is(a0));
__ bind(&exit); __ bind(&exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
__ Ret();
} }
@ -1706,8 +1706,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (argc == 0) { if (argc == 0) {
// Nothing to do, just return the length. // Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
} else { } else {
Label call_builtin; Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin. if (argc == 1) { // Otherwise fall through to call the builtin.
@ -1755,8 +1754,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, MemOperand(end_elements)); __ sw(t0, MemOperand(end_elements));
// Check for a smi. // Check for a smi.
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
__ bind(&check_double); __ bind(&check_double);
@ -1788,8 +1786,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi. // Check for a smi.
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
__ bind(&with_write_barrier); __ bind(&with_write_barrier);
@ -1855,8 +1852,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
__ bind(&attempt_to_grow_elements); __ bind(&attempt_to_grow_elements);
// v0: array's length + 1. // v0: array's length + 1.
@ -1911,8 +1907,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required. // Elements are in new space, so write barrier is not required.
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
} }
__ bind(&call_builtin); __ bind(&call_builtin);
__ TailCallExternalReference( __ TailCallExternalReference(
@ -1991,13 +1986,11 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// Fill with the hole. // Fill with the hole.
__ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize)); __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
__ bind(&return_undefined); __ bind(&return_undefined);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex); __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
__ bind(&call_builtin); __ bind(&call_builtin);
__ TailCallExternalReference( __ TailCallExternalReference(
@ -2072,8 +2065,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
index_out_of_range_label, index_out_of_range_label,
STRING_INDEX_IS_NUMBER); STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm()); generator.GenerateFast(masm());
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper; StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper); generator.GenerateSlow(masm(), call_helper);
@ -2081,8 +2073,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (index_out_of_range.is_linked()) { if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range); __ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kNanValueRootIndex); __ LoadRoot(v0, Heap::kNanValueRootIndex);
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
} }
__ bind(&miss); __ bind(&miss);
@ -2155,8 +2146,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
index_out_of_range_label, index_out_of_range_label,
STRING_INDEX_IS_NUMBER); STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm()); generator.GenerateFast(masm());
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper; StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper); generator.GenerateSlow(masm(), call_helper);
@ -2164,8 +2154,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) { if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range); __ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kempty_stringRootIndex); __ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
} }
__ bind(&miss); __ bind(&miss);
@ -2231,8 +2220,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StringCharFromCodeGenerator generator(code, v0); StringCharFromCodeGenerator generator(code, v0);
generator.GenerateFast(masm()); generator.GenerateFast(masm());
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper; StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper); generator.GenerateSlow(masm(), call_helper);
@ -2295,8 +2283,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// If the argument is a smi, just return. // If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ And(t0, v0, Operand(kSmiTagMask)); __ And(t0, v0, Operand(kSmiTagMask));
__ Drop(argc + 1, eq, t0, Operand(zero_reg)); __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ Ret(eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
@ -2361,8 +2348,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Restore FCSR and return. // Restore FCSR and return.
__ ctc1(a3, FCSR); __ ctc1(a3, FCSR);
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
__ bind(&wont_fit_smi); __ bind(&wont_fit_smi);
// Restore FCSR and fall to slow case. // Restore FCSR and fall to slow case.
@ -2441,8 +2427,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ Branch(&slow, lt, v0, Operand(zero_reg)); __ Branch(&slow, lt, v0, Operand(zero_reg));
// Smi case done. // Smi case done.
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
// Check if the argument is a heap number and load its exponent and // Check if the argument is a heap number and load its exponent and
// sign. // sign.
@ -2455,8 +2440,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label negative_sign; Label negative_sign;
__ And(t0, a1, Operand(HeapNumber::kSignMask)); __ And(t0, a1, Operand(HeapNumber::kSignMask));
__ Branch(&negative_sign, ne, t0, Operand(zero_reg)); __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
// If the argument is negative, clear the sign, and return a new // If the argument is negative, clear the sign, and return a new
// number. // number.
@ -2467,8 +2451,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ AllocateHeapNumber(v0, t0, t1, t2, &slow); __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
__ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset)); __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ Drop(argc + 1); __ DropAndRet(argc + 1);
__ Ret();
// Tail call the full function. We do not have to patch the receiver // Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it. // because the function makes no use of it.
@ -3066,8 +3049,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters(); Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, t0); __ mov(v0, t0);
__ Ret();
// Return the generated code. // Return the generated code.
return GetICCode(kind(), Code::NORMAL, name); return GetICCode(kind(), Code::NORMAL, name);
@ -3338,8 +3321,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
} }
// Entry registers are intact, a0 holds the value which is the return value. // Entry registers are intact, a0 holds the value which is the return value.
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
__ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// a3: external array. // a3: external array.
@ -3406,8 +3389,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Entry registers are intact, a0 holds the value // Entry registers are intact, a0 holds the value
// which is the return value. // which is the return value.
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
__ Ret();
} }
// Slow case, key and receiver still in a0 and a1. // Slow case, key and receiver still in a0 and a1.
@ -3568,8 +3551,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Increment the length of the array. // Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1))); __ li(length_reg, Operand(Smi::FromInt(1)));
__ Ret(USE_DELAY_SLOT);
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret();
__ bind(&check_capacity); __ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub // Check for cow elements, in general they are not handled by this stub
@ -3733,9 +3716,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Increment the length of the array. // Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1))); __ li(length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret(USE_DELAY_SLOT);
__ lw(elements_reg, __ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ Ret();
__ bind(&check_capacity); __ bind(&check_capacity);
// Make sure that the backing store can hold additional elements. // Make sure that the backing store can hold additional elements.