diff --git a/src/assembler.cc b/src/assembler.cc index ad5f350816..d7b13e6a7e 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -736,6 +736,10 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref) : address_(table_ref.address()) {} +ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { + return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache))); +} + ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); diff --git a/src/assembler.h b/src/assembler.h index d58034df0d..8162016917 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -561,6 +561,7 @@ class ExternalReference BASE_EMBEDDED { // pattern. This means that they have to be added to the // ExternalReferenceTable in serialize.cc manually. + static ExternalReference flush_icache_function(Isolate* isolate); static ExternalReference perform_gc_function(Isolate* isolate); static ExternalReference fill_heap_number_with_random_function( Isolate* isolate); diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index 4592cb5c4e..d62b757521 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -615,7 +615,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Register object, Destination destination, - FPURegister double_dst, + DoubleRegister double_dst, Register dst1, Register dst2, Register heap_number_map, @@ -651,25 +651,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, // Load the double value. __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); - // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). - // On MIPS a lot of things cannot be implemented the same way so right - // now it makes a lot more sense to just do things manually. - - // Save FCSR. - __ cfc1(scratch1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - __ trunc_w_d(single_scratch, double_dst); - // Retrieve FCSR. - __ cfc1(scratch2, FCSR); - // Restore FCSR. - __ ctc1(scratch1, FCSR); - - // Check for inexact conversion or exception. - __ And(scratch2, scratch2, kFCSRFlagMask); + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + single_scratch, + double_dst, + scratch1, + except_flag, + kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. - __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); + __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); if (destination == kCoreRegisters) { __ Move(dst1, dst2, double_dst); @@ -706,7 +697,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - FPURegister double_scratch, + DoubleRegister double_scratch, Label* not_int32) { ASSERT(!dst.is(object)); ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); @@ -735,27 +726,19 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, // Load the double value. __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); - // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). - // On MIPS a lot of things cannot be implemented the same way so right - // now it makes a lot more sense to just do things manually. - - // Save FCSR. - __ cfc1(scratch1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - __ trunc_w_d(double_scratch, double_scratch); - // Retrieve FCSR. - __ cfc1(scratch2, FCSR); - // Restore FCSR. - __ ctc1(scratch1, FCSR); - - // Check for inexact conversion or exception. - __ And(scratch2, scratch2, kFCSRFlagMask); + FPURegister single_scratch = double_scratch.low(); + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + single_scratch, + double_scratch, + scratch1, + except_flag, + kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. - __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); + __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); // Get the result in the destination register. - __ mfc1(dst, double_scratch); + __ mfc1(dst, single_scratch); } else { // Load the double value in the destination registers. @@ -884,7 +867,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( { AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 4); + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); } // Store answer in the overwritable heap number. if (!IsMipsSoftFloatABI) { @@ -1260,7 +1243,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { if (!CpuFeatures::IsSupported(FPU)) { __ push(ra); - __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. + __ PrepareCallCFunction(0, 2, t4); if (!IsMipsSoftFloatABI) { // We are not using MIPS FPU instructions, and parameters for the runtime // function call are prepaired in a0-a3 registers, but function we are @@ -1270,19 +1253,15 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { __ Move(f12, a0, a1); __ Move(f14, a2, a3); } - __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); + __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), + 0, 2); __ pop(ra); // Because this function returns int, result is in v0. __ Ret(); } else { CpuFeatures::Scope scope(FPU); Label equal, less_than; - __ c(EQ, D, f12, f14); - __ bc1t(&equal); - __ nop(); - - __ c(OLT, D, f12, f14); - __ bc1t(&less_than); - __ nop(); + __ BranchF(&equal, NULL, eq, f12, f14); + __ BranchF(&less_than, NULL, lt, f12, f14); // Not equal, not less, not NaN, must be greater. __ li(v0, Operand(GREATER)); @@ -1475,9 +1454,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ JumpIfSmi(probe, not_found); __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); - __ c(EQ, D, f12, f14); - __ bc1t(&load_result_from_cache); - __ nop(); // bc1t() requires explicit fill of branch delay slot. + __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); __ Branch(not_found); } else { // Note that there is no cache check for non-FPU case, even though @@ -1593,9 +1570,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ li(t2, Operand(EQUAL)); // Check if either rhs or lhs is NaN. - __ c(UN, D, f12, f14); - __ bc1t(&nan); - __ nop(); + __ BranchF(NULL, &nan, eq, f12, f14); // Check if LESS condition is satisfied. If true, move conditionally // result to v0. @@ -1713,89 +1688,116 @@ void CompareStub::Generate(MacroAssembler* masm) { } -// The stub returns zero for false, and a non-zero value for true. +// The stub expects its argument in the tos_ register and returns its result in +// it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { // This stub uses FPU instructions. CpuFeatures::Scope scope(FPU); - Label false_result; - Label not_heap_number; - Register scratch0 = t5.is(tos_) ? t3 : t5; + Label patch; + const Register map = t5.is(tos_) ? t3 : t5; - // undefined -> false - __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); - __ Branch(&false_result, eq, tos_, Operand(scratch0)); + // undefined -> false. + CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); - // Boolean -> its value - __ LoadRoot(scratch0, Heap::kFalseValueRootIndex); - __ Branch(&false_result, eq, tos_, Operand(scratch0)); - __ LoadRoot(scratch0, Heap::kTrueValueRootIndex); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the equal condition is satisfied. - __ Ret(eq, tos_, Operand(scratch0)); + // Boolean -> its value. + CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); + CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); - // Smis: 0 -> false, all other -> true - __ And(scratch0, tos_, tos_); - __ Branch(&false_result, eq, scratch0, Operand(zero_reg)); - __ And(scratch0, tos_, Operand(kSmiTagMask)); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the not equal condition is satisfied. - __ Ret(eq, scratch0, Operand(zero_reg)); + // 'null' -> false. + CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); - // 'null' -> false - __ LoadRoot(scratch0, Heap::kNullValueRootIndex); - __ Branch(&false_result, eq, tos_, Operand(scratch0)); + if (types_.Contains(SMI)) { + // Smis: 0 -> false, all other -> true + __ And(at, tos_, kSmiTagMask); + // tos_ contains the correct return value already + __ Ret(eq, at, Operand(zero_reg)); + } else if (types_.NeedsMap()) { + // If we need a map later and have a Smi -> patch. + __ JumpIfSmi(tos_, &patch); + } - // HeapNumber => false if +0, -0, or NaN. - __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(¬_heap_number, ne, scratch0, Operand(at)); + if (types_.NeedsMap()) { + __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset)); - __ fcmp(f12, 0.0, UEQ); + if (types_.CanBeUndetectable()) { + __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(at, at, Operand(1 << Map::kIsUndetectable)); + // Undetectable -> false. + __ movn(tos_, zero_reg, at); + __ Ret(ne, at, Operand(zero_reg)); + } + } - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ movt(tos_, zero_reg); - __ Ret(); + if (types_.Contains(SPEC_OBJECT)) { + // Spec object -> true. + __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); + // tos_ contains the correct non-zero return value already. + __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); + } - __ bind(¬_heap_number); + if (types_.Contains(STRING)) { + // String value -> false iff empty. + __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); + Label skip; + __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE)); + __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); + __ Ret(); // the string length is OK as the return value + __ bind(&skip); + } - // It can be an undetectable object. - // Undetectable => false. - __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset)); - __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); - __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); + if (types_.Contains(HEAP_NUMBER)) { + // Heap number -> false iff +0, -0, or NaN. + Label not_heap_number; + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + __ Branch(¬_heap_number, ne, map, Operand(at)); + Label zero_or_nan, number; + __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset)); + __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ bind(&zero_or_nan); + __ mov(tos_, zero_reg); + __ bind(&number); + __ Ret(); + __ bind(¬_heap_number); + } - // JavaScript object => true. - __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); + __ bind(&patch); + GenerateTypeTransition(masm); +} - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); - // Check for string. - __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE)); +void ToBooleanStub::CheckOddball(MacroAssembler* masm, + Type type, + Heap::RootListIndex value, + bool result) { + if (types_.Contains(type)) { + // If we see an expected oddball, return its ToBoolean value tos_. + __ LoadRoot(at, value); + __ Subu(at, at, tos_); // This is a check for equality for the movz below. + // The value of a root is never NULL, so we can avoid loading a non-null + // value into tos_ when we want to return 'true'. + if (!result) { + __ movz(tos_, zero_reg, at); + } + __ Ret(eq, at, Operand(zero_reg)); + } +} - // String value => false iff empty, i.e., length is zero. - __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); - // If length is zero, "tos_" contains zero ==> false. - // If length is not zero, "tos_" contains a non-zero value ==> true. - __ Ret(); - // Return 0 in "tos_" for false. - __ bind(&false_result); - __ mov(tos_, zero_reg); - __ Ret(); +void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { + __ Move(a3, tos_); + __ li(a2, Operand(Smi::FromInt(tos_.code()))); + __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); + __ Push(a3, a2, a1); + // Patch the caller to an appropriate specialized stub and return the + // operation result to the caller of the stub. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), + 3, + 1); } @@ -2721,26 +2723,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Otherwise return a heap number if allowed, or jump to type // transition. - // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). - // On MIPS a lot of things cannot be implemented the same way so right - // now it makes a lot more sense to just do things manually. - - // Save FCSR. - __ cfc1(scratch1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - __ trunc_w_d(single_scratch, f10); - // Retrieve FCSR. - __ cfc1(scratch2, FCSR); - // Restore FCSR. - __ ctc1(scratch1, FCSR); - - // Check for inexact conversion or exception. - __ And(scratch2, scratch2, kFCSRFlagMask); + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + single_scratch, + f10, + scratch1, + except_flag); if (result_type_ <= BinaryOpIC::INT32) { - // If scratch2 != 0, result does not fit in a 32-bit integer. - __ Branch(&transition, ne, scratch2, Operand(zero_reg)); + // If except_flag != 0, result does not fit in a 32-bit integer. + __ Branch(&transition, ne, except_flag, Operand(zero_reg)); } // Check if the result fits in a smi. @@ -3229,7 +3221,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ lw(t0, MemOperand(cache_entry, 0)); __ lw(t1, MemOperand(cache_entry, 4)); __ lw(t2, MemOperand(cache_entry, 8)); - __ Addu(cache_entry, cache_entry, 12); __ Branch(&calculate, ne, a2, Operand(t0)); __ Branch(&calculate, ne, a3, Operand(t1)); // Cache hit. Load result, cleanup and return. @@ -3263,13 +3254,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Register a0 holds precalculated cache entry address; preserve // it on the stack and pop it into register cache_entry after the // call. - __ push(cache_entry); + __ Push(cache_entry, a2, a3); GenerateCallCFunction(masm, scratch0); __ GetCFunctionDoubleResult(f4); // Try to update the cache. If we cannot allocate a // heap number, we return the result without updating. - __ pop(cache_entry); + __ Pop(cache_entry, a2, a3); __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); @@ -3323,22 +3314,25 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, __ push(ra); __ PrepareCallCFunction(2, scratch); if (IsMipsSoftFloatABI) { - __ Move(v0, v1, f4); + __ Move(a0, a1, f4); } else { __ mov_d(f12, f4); } switch (type_) { case TranscendentalCache::SIN: __ CallCFunction( - ExternalReference::math_sin_double_function(masm->isolate()), 2); + ExternalReference::math_sin_double_function(masm->isolate()), + 0, 1); break; case TranscendentalCache::COS: __ CallCFunction( - ExternalReference::math_cos_double_function(masm->isolate()), 2); + ExternalReference::math_cos_double_function(masm->isolate()), + 0, 1); break; case TranscendentalCache::LOG: __ CallCFunction( - ExternalReference::math_log_double_function(masm->isolate()), 2); + ExternalReference::math_log_double_function(masm->isolate()), + 0, 1); break; default: UNIMPLEMENTED(); @@ -3421,12 +3415,12 @@ void MathPowStub::Generate(MacroAssembler* masm) { heapnumbermap, &call_runtime); __ push(ra); - __ PrepareCallCFunction(3, scratch); + __ PrepareCallCFunction(1, 1, scratch); __ SetCallCDoubleArguments(double_base, exponent); { AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), 3); + ExternalReference::power_double_int_function(masm->isolate()), 1, 1); __ pop(ra); __ GetCFunctionDoubleResult(double_result); } @@ -3452,7 +3446,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { heapnumbermap, &call_runtime); __ push(ra); - __ PrepareCallCFunction(4, scratch); + __ PrepareCallCFunction(0, 2, scratch); // ABI (o32) for func(double a, double b): a in f12, b in f14. ASSERT(double_base.is(f12)); ASSERT(double_exponent.is(f14)); @@ -3460,7 +3454,9 @@ void MathPowStub::Generate(MacroAssembler* masm) { { AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), 4); + ExternalReference::power_double_double_function(masm->isolate()), + 0, + 2); __ pop(ra); __ GetCFunctionDoubleResult(double_result); } @@ -3505,9 +3501,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, if (do_gc) { // Move result passed in v0 into a0 to call PerformGC. __ mov(a0, v0); - __ PrepareCallCFunction(1, a1); + __ PrepareCallCFunction(1, 0, a1); __ CallCFunction( - ExternalReference::perform_gc_function(masm->isolate()), 1); + ExternalReference::perform_gc_function(masm->isolate()), + 1, 0); } ExternalReference scope_depth = @@ -3712,8 +3709,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { CpuFeatures::Scope scope(FPU); // Save callee-saved FPU registers. __ MultiPushFPU(kCalleeSavedFPU); + // Set up the reserved register for 0.0. + __ Move(kDoubleRegZero, 0.0); } + // Load argv in s0 register. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; if (CpuFeatures::IsSupported(FPU)) { @@ -3870,11 +3870,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // * object: a0 or at sp + 1 * kPointerSize. // * function: a1 or at sp. // -// Inlined call site patching is a crankshaft-specific feature that is not -// implemented on MIPS. +// An inlined call site may have been generated before calling this stub. +// In this case the offset to the inline site to patch is passed on the stack, +// in the safepoint slot for register t0. void InstanceofStub::Generate(MacroAssembler* masm) { - // This is a crankshaft-specific feature that has not been implemented yet. - ASSERT(!HasCallSiteInlineCheck()); // Call site inlining and patching implies arguments in registers. ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); // ReturnTrueFalse is only implemented for inlined call sites. @@ -3888,6 +3887,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { const Register inline_site = t5; const Register scratch = a2; + const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; + Label slow, loop, is_instance, is_not_instance, not_js_object; if (!HasArgsInRegisters()) { @@ -3903,10 +3904,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // real lookup and update the call site cache. if (!HasCallSiteInlineCheck()) { Label miss; - __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); - __ Branch(&miss, ne, function, Operand(t1)); - __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); - __ Branch(&miss, ne, map, Operand(t1)); + __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex); + __ Branch(&miss, ne, function, Operand(at)); + __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex); + __ Branch(&miss, ne, map, Operand(at)); __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); __ DropAndRet(HasArgsInRegisters() ? 0 : 2); @@ -3926,7 +3927,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); } else { - UNIMPLEMENTED_MIPS(); + ASSERT(HasArgsInRegisters()); + // Patch the (relocated) inlined map check. + + // The offset was stored in t0 safepoint slot. + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) + __ LoadFromSafepointRegisterSlot(scratch, t0); + __ Subu(inline_site, ra, scratch); + // Patch the relocated value to map. + __ PatchRelocatedValue(inline_site, scratch, map); } // Register mapping: a3 is object map and t0 is function prototype. @@ -3952,7 +3961,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(v0, zero_reg); __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); } else { - UNIMPLEMENTED_MIPS(); + // Patch the call site to return true. + __ LoadRoot(v0, Heap::kTrueValueRootIndex); + __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, v0); + + if (!ReturnTrueFalseObject()) { + ASSERT_EQ(Smi::FromInt(0), 0); + __ mov(v0, zero_reg); + } } __ DropAndRet(HasArgsInRegisters() ? 0 : 2); @@ -3961,8 +3979,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ li(v0, Operand(Smi::FromInt(1))); __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); } else { - UNIMPLEMENTED_MIPS(); + // Patch the call site to return false. + __ LoadRoot(v0, Heap::kFalseValueRootIndex); + __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, v0); + + if (!ReturnTrueFalseObject()) { + __ li(v0, Operand(Smi::FromInt(1))); + } } + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); Label object_not_null, object_not_null_or_smi; @@ -6477,39 +6504,25 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { __ Subu(a2, a0, Operand(kHeapObjectTag)); __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); - Label fpu_eq, fpu_lt, fpu_gt; - // Compare operands (test if unordered). - __ c(UN, D, f0, f2); - // Don't base result on status bits when a NaN is involved. - __ bc1t(&unordered); - __ nop(); + // Return a result of -1, 0, or 1, or use CompareStub for NaNs. + Label fpu_eq, fpu_lt; + // Test if equal, and also handle the unordered/NaN case. + __ BranchF(&fpu_eq, &unordered, eq, f0, f2); - // Test if equal. - __ c(EQ, D, f0, f2); - __ bc1t(&fpu_eq); - __ nop(); + // Test if less (unordered case is already handled). + __ BranchF(&fpu_lt, NULL, lt, f0, f2); - // Test if unordered or less (unordered case is already handled). - __ c(ULT, D, f0, f2); - __ bc1t(&fpu_lt); - __ nop(); + // Otherwise it's greater, so just fall thru, and return. + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(GREATER)); // In delay slot. - // Otherwise it's greater. - __ bc1f(&fpu_gt); - __ nop(); - - // Return a result of -1, 0, or 1. __ bind(&fpu_eq); - __ li(v0, Operand(EQUAL)); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(EQUAL)); // In delay slot. __ bind(&fpu_lt); - __ li(v0, Operand(LESS)); - __ Ret(); - - __ bind(&fpu_gt); - __ li(v0, Operand(GREATER)); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(LESS)); // In delay slot. __ bind(&unordered); } diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index dd3282c219..be5c9bd300 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -818,6 +818,20 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) { } +void MacroAssembler::FlushICache(Register address, unsigned instructions) { + RegList saved_regs = kJSCallerSaved | ra.bit(); + MultiPush(saved_regs); + + // Save to a0 in case address == t0. + Move(a0, address); + PrepareCallCFunction(2, t0); + + li(a1, instructions * kInstrSize); + CallCFunction(ExternalReference::flush_icache_function(isolate()), 2); + MultiPop(saved_regs); +} + + void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos, @@ -4605,6 +4619,37 @@ void MacroAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK +void MacroAssembler::PatchRelocatedValue(Register li_location, + Register scratch, + Register new_value) { + lw(scratch, MemOperand(li_location)); + // At this point scratch is a lui(at, ...) instruction. + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, "The instruction to patch should be a lui.", + scratch, Operand(LUI)); + lw(scratch, MemOperand(li_location)); + } + srl(t9, new_value, kImm16Bits); + Ins(scratch, t9, 0, kImm16Bits); + sw(scratch, MemOperand(li_location)); + + lw(scratch, MemOperand(li_location, kInstrSize)); + // scratch is now ori(at, ...). + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, "The instruction to patch should be an ori.", + scratch, Operand(ORI)); + lw(scratch, MemOperand(li_location, kInstrSize)); + } + Ins(scratch, new_value, 0, kImm16Bits); + sw(scratch, MemOperand(li_location, kInstrSize)); + + // Update the I-cache so the new lui and ori can be executed. + FlushICache(li_location, 2); +} + + void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { lw(descriptors, diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index 4db897b574..5f60aa1e10 100644 --- a/src/mips/macro-assembler-mips.h +++ b/src/mips/macro-assembler-mips.h @@ -577,6 +577,10 @@ class MacroAssembler: public Assembler { // into register dst. void LoadFromSafepointRegisterSlot(Register dst, Register src); + // Flush the I-cache from asm code. You should use CPU::FlushICache from C. + // Does not handle errors. + void FlushICache(Register address, unsigned instructions); + // MIPS32 R2 instruction macro. void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); @@ -1210,6 +1214,11 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); + // Patch the relocated value (lui/ori pair). + void PatchRelocatedValue(Register li_location, + Register scratch, + Register new_value); + private: void CallCFunctionHelper(Register function, ExternalReference function_reference, diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc index 085aa86919..251c6ff5d8 100644 --- a/src/mips/stub-cache-mips.cc +++ b/src/mips/stub-cache-mips.cc @@ -2560,7 +2560,12 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object, ? CALL_AS_FUNCTION : CALL_AS_METHOD; if (V8::UseCrankshaft()) { - UNIMPLEMENTED_MIPS(); + // TODO(kasperl): For now, we always call indirectly through the + // code field in the function to allow recompilation to take effect + // without changing any of the call sites. + __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION, + NullCallWrapper(), call_kind); } else { __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET, JUMP_FUNCTION, call_kind); @@ -3837,7 +3842,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset)); // Check that the index is in range. - __ SmiUntag(t0, key); __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset)); // Unsigned comparison catches both negative and too-large values. __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1)); @@ -3845,7 +3849,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // Handle both smis and HeapNumbers in the fast path. Go to the // runtime for all other kinds of values. // a3: external array. - // t0: key (integer). if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { // Double to pixel conversion is only implemented in the runtime for now. @@ -3857,7 +3860,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); // a3: base pointer of external storage. - // t0: key (integer). // t1: value (integer). switch (elements_kind) { @@ -3874,33 +3876,36 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ mov(v0, t1); // Value is in range 0..255. __ bind(&done); __ mov(t1, v0); - __ addu(t8, a3, t0); + + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t1, MemOperand(t8, 0)); } break; case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ addu(t8, a3, t0); + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t1, MemOperand(t8, 0)); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ sll(t8, t0, 1); - __ addu(t8, a3, t8); + __ addu(t8, a3, key); __ sh(t1, MemOperand(t8, 0)); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ sw(t1, MemOperand(t8, 0)); break; case EXTERNAL_FLOAT_ELEMENTS: // Perform int-to-float conversion and store to memory. + __ SmiUntag(t0, key); StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4); break; case EXTERNAL_DOUBLE_ELEMENTS: - __ sll(t8, t0, 3); + __ sll(t8, key, 2); __ addu(a3, a3, t8); // a3: effective address of the double element FloatingPointHelper::Destination destination; @@ -3930,12 +3935,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } // Entry registers are intact, a0 holds the value which is the return value. - __ mov(v0, value); + __ mov(v0, a0); __ Ret(); if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { // a3: external array. - // t0: index (integer). __ bind(&check_heap_number); __ GetObjectType(value, t1, t2); __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE)); @@ -3943,7 +3947,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); // a3: base pointer of external storage. - // t0: key (integer). // The WebGL specification leaves the behavior of storing NaN and // +/-Infinity into integer arrays basically undefined. For more @@ -3956,11 +3959,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ cvt_s_d(f0, f0); - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ swc1(f0, MemOperand(t8, 0)); } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ sll(t8, t0, 3); + __ sll(t8, key, 2); __ addu(t8, a3, t8); __ sdc1(f0, MemOperand(t8, 0)); } else { @@ -3969,18 +3972,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ addu(t8, a3, t0); + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t3, MemOperand(t8, 0)); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ sll(t8, t0, 1); - __ addu(t8, a3, t8); + __ addu(t8, a3, key); __ sh(t3, MemOperand(t8, 0)); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ sw(t3, MemOperand(t8, 0)); break; @@ -3998,7 +4001,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // Entry registers are intact, a0 holds the value // which is the return value. - __ mov(v0, value); + __ mov(v0, a0); __ Ret(); } else { // FPU is not available, do manual conversions. @@ -4053,13 +4056,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ or_(t3, t7, t6); __ bind(&done); - __ sll(t9, a1, 2); + __ sll(t9, key, 1); __ addu(t9, a2, t9); __ sw(t3, MemOperand(t9, 0)); // Entry registers are intact, a0 holds the value which is the return // value. - __ mov(v0, value); + __ mov(v0, a0); __ Ret(); __ bind(&nan_or_infinity_or_zero); @@ -4077,6 +4080,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // t8: effective address of destination element. __ sw(t4, MemOperand(t8, 0)); __ sw(t3, MemOperand(t8, Register::kSizeInBytes)); + __ mov(v0, a0); __ Ret(); } else { bool is_signed_type = IsElementTypeSigned(elements_kind); @@ -4139,18 +4143,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ addu(t8, a3, t0); + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t3, MemOperand(t8, 0)); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ sll(t8, t0, 1); - __ addu(t8, a3, t8); + __ addu(t8, a3, key); __ sh(t3, MemOperand(t8, 0)); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ sw(t3, MemOperand(t8, 0)); break;