MIPS: pre-crankshaft updates to code-stubs and stub-cache (3/3)
Highlights: - code-stubs-mips.cc -- use EmitFPUTruncate in place of inline code in several places. -- use BranchF macro rather than lower-level FP cmp and branch for readability. -- Port of Sven's r8859 (Implement type recording for ToBoolean) and r8886 (Simplify and optimize ToBoolean handling.) -- Fix bug in TranscendentalCacheStub::Generate where some regs were not saved across CFunction call. -- use updated xxxCFunction macros. -- update InstanceOfStub to support crankshaft DoDeferredLInstanceOfKnownGlobal -- Provide code-patching and I-cache flushing support for generated code, used for InstanceOfStub under crankshaft (not submitted here). This requires adding new ExternalReference to src/assember.cc,h - stub-cache-mips.cc -- port Danno's r8901 (Create a common base class for Fixed-, FixedDouble- and ExternalArrays) to mips crankshaft branch. BUG= TEST= Review URL: http://codereview.chromium.org/7890001 Patch from Paul Lind <plind44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9308 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
c579bfe6e2
commit
c74aae242a
@ -736,6 +736,10 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
|
|||||||
: address_(table_ref.address()) {}
|
: address_(table_ref.address()) {}
|
||||||
|
|
||||||
|
|
||||||
|
ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
|
||||||
|
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
|
||||||
|
}
|
||||||
|
|
||||||
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
|
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
|
||||||
return ExternalReference(Redirect(isolate,
|
return ExternalReference(Redirect(isolate,
|
||||||
FUNCTION_ADDR(Runtime::PerformGC)));
|
FUNCTION_ADDR(Runtime::PerformGC)));
|
||||||
|
@ -561,6 +561,7 @@ class ExternalReference BASE_EMBEDDED {
|
|||||||
// pattern. This means that they have to be added to the
|
// pattern. This means that they have to be added to the
|
||||||
// ExternalReferenceTable in serialize.cc manually.
|
// ExternalReferenceTable in serialize.cc manually.
|
||||||
|
|
||||||
|
static ExternalReference flush_icache_function(Isolate* isolate);
|
||||||
static ExternalReference perform_gc_function(Isolate* isolate);
|
static ExternalReference perform_gc_function(Isolate* isolate);
|
||||||
static ExternalReference fill_heap_number_with_random_function(
|
static ExternalReference fill_heap_number_with_random_function(
|
||||||
Isolate* isolate);
|
Isolate* isolate);
|
||||||
|
@ -615,7 +615,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
|||||||
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
||||||
Register object,
|
Register object,
|
||||||
Destination destination,
|
Destination destination,
|
||||||
FPURegister double_dst,
|
DoubleRegister double_dst,
|
||||||
Register dst1,
|
Register dst1,
|
||||||
Register dst2,
|
Register dst2,
|
||||||
Register heap_number_map,
|
Register heap_number_map,
|
||||||
@ -651,25 +651,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
|||||||
// Load the double value.
|
// Load the double value.
|
||||||
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
|
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||||
|
|
||||||
// NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
|
Register except_flag = scratch2;
|
||||||
// On MIPS a lot of things cannot be implemented the same way so right
|
__ EmitFPUTruncate(kRoundToZero,
|
||||||
// now it makes a lot more sense to just do things manually.
|
single_scratch,
|
||||||
|
double_dst,
|
||||||
// Save FCSR.
|
scratch1,
|
||||||
__ cfc1(scratch1, FCSR);
|
except_flag,
|
||||||
// Disable FPU exceptions.
|
kCheckForInexactConversion);
|
||||||
__ ctc1(zero_reg, FCSR);
|
|
||||||
__ trunc_w_d(single_scratch, double_dst);
|
|
||||||
// Retrieve FCSR.
|
|
||||||
__ cfc1(scratch2, FCSR);
|
|
||||||
// Restore FCSR.
|
|
||||||
__ ctc1(scratch1, FCSR);
|
|
||||||
|
|
||||||
// Check for inexact conversion or exception.
|
|
||||||
__ And(scratch2, scratch2, kFCSRFlagMask);
|
|
||||||
|
|
||||||
// Jump to not_int32 if the operation did not succeed.
|
// Jump to not_int32 if the operation did not succeed.
|
||||||
__ Branch(not_int32, ne, scratch2, Operand(zero_reg));
|
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
|
||||||
|
|
||||||
if (destination == kCoreRegisters) {
|
if (destination == kCoreRegisters) {
|
||||||
__ Move(dst1, dst2, double_dst);
|
__ Move(dst1, dst2, double_dst);
|
||||||
@ -706,7 +697,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|||||||
Register scratch1,
|
Register scratch1,
|
||||||
Register scratch2,
|
Register scratch2,
|
||||||
Register scratch3,
|
Register scratch3,
|
||||||
FPURegister double_scratch,
|
DoubleRegister double_scratch,
|
||||||
Label* not_int32) {
|
Label* not_int32) {
|
||||||
ASSERT(!dst.is(object));
|
ASSERT(!dst.is(object));
|
||||||
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
|
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
|
||||||
@ -735,27 +726,19 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|||||||
// Load the double value.
|
// Load the double value.
|
||||||
__ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
|
__ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||||
|
|
||||||
// NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
|
FPURegister single_scratch = double_scratch.low();
|
||||||
// On MIPS a lot of things cannot be implemented the same way so right
|
Register except_flag = scratch2;
|
||||||
// now it makes a lot more sense to just do things manually.
|
__ EmitFPUTruncate(kRoundToZero,
|
||||||
|
single_scratch,
|
||||||
// Save FCSR.
|
double_scratch,
|
||||||
__ cfc1(scratch1, FCSR);
|
scratch1,
|
||||||
// Disable FPU exceptions.
|
except_flag,
|
||||||
__ ctc1(zero_reg, FCSR);
|
kCheckForInexactConversion);
|
||||||
__ trunc_w_d(double_scratch, double_scratch);
|
|
||||||
// Retrieve FCSR.
|
|
||||||
__ cfc1(scratch2, FCSR);
|
|
||||||
// Restore FCSR.
|
|
||||||
__ ctc1(scratch1, FCSR);
|
|
||||||
|
|
||||||
// Check for inexact conversion or exception.
|
|
||||||
__ And(scratch2, scratch2, kFCSRFlagMask);
|
|
||||||
|
|
||||||
// Jump to not_int32 if the operation did not succeed.
|
// Jump to not_int32 if the operation did not succeed.
|
||||||
__ Branch(not_int32, ne, scratch2, Operand(zero_reg));
|
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
|
||||||
// Get the result in the destination register.
|
// Get the result in the destination register.
|
||||||
__ mfc1(dst, double_scratch);
|
__ mfc1(dst, single_scratch);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// Load the double value in the destination registers.
|
// Load the double value in the destination registers.
|
||||||
@ -884,7 +867,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
|
|||||||
{
|
{
|
||||||
AllowExternalCallThatCantCauseGC scope(masm);
|
AllowExternalCallThatCantCauseGC scope(masm);
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::double_fp_operation(op, masm->isolate()), 4);
|
ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
|
||||||
}
|
}
|
||||||
// Store answer in the overwritable heap number.
|
// Store answer in the overwritable heap number.
|
||||||
if (!IsMipsSoftFloatABI) {
|
if (!IsMipsSoftFloatABI) {
|
||||||
@ -1260,7 +1243,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
|
|||||||
|
|
||||||
if (!CpuFeatures::IsSupported(FPU)) {
|
if (!CpuFeatures::IsSupported(FPU)) {
|
||||||
__ push(ra);
|
__ push(ra);
|
||||||
__ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
|
__ PrepareCallCFunction(0, 2, t4);
|
||||||
if (!IsMipsSoftFloatABI) {
|
if (!IsMipsSoftFloatABI) {
|
||||||
// We are not using MIPS FPU instructions, and parameters for the runtime
|
// We are not using MIPS FPU instructions, and parameters for the runtime
|
||||||
// function call are prepaired in a0-a3 registers, but function we are
|
// function call are prepaired in a0-a3 registers, but function we are
|
||||||
@ -1270,19 +1253,15 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
|
|||||||
__ Move(f12, a0, a1);
|
__ Move(f12, a0, a1);
|
||||||
__ Move(f14, a2, a3);
|
__ Move(f14, a2, a3);
|
||||||
}
|
}
|
||||||
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
|
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
|
||||||
|
0, 2);
|
||||||
__ pop(ra); // Because this function returns int, result is in v0.
|
__ pop(ra); // Because this function returns int, result is in v0.
|
||||||
__ Ret();
|
__ Ret();
|
||||||
} else {
|
} else {
|
||||||
CpuFeatures::Scope scope(FPU);
|
CpuFeatures::Scope scope(FPU);
|
||||||
Label equal, less_than;
|
Label equal, less_than;
|
||||||
__ c(EQ, D, f12, f14);
|
__ BranchF(&equal, NULL, eq, f12, f14);
|
||||||
__ bc1t(&equal);
|
__ BranchF(&less_than, NULL, lt, f12, f14);
|
||||||
__ nop();
|
|
||||||
|
|
||||||
__ c(OLT, D, f12, f14);
|
|
||||||
__ bc1t(&less_than);
|
|
||||||
__ nop();
|
|
||||||
|
|
||||||
// Not equal, not less, not NaN, must be greater.
|
// Not equal, not less, not NaN, must be greater.
|
||||||
__ li(v0, Operand(GREATER));
|
__ li(v0, Operand(GREATER));
|
||||||
@ -1475,9 +1454,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|||||||
__ JumpIfSmi(probe, not_found);
|
__ JumpIfSmi(probe, not_found);
|
||||||
__ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
|
__ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||||
__ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
|
__ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
|
||||||
__ c(EQ, D, f12, f14);
|
__ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
|
||||||
__ bc1t(&load_result_from_cache);
|
|
||||||
__ nop(); // bc1t() requires explicit fill of branch delay slot.
|
|
||||||
__ Branch(not_found);
|
__ Branch(not_found);
|
||||||
} else {
|
} else {
|
||||||
// Note that there is no cache check for non-FPU case, even though
|
// Note that there is no cache check for non-FPU case, even though
|
||||||
@ -1593,9 +1570,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|||||||
__ li(t2, Operand(EQUAL));
|
__ li(t2, Operand(EQUAL));
|
||||||
|
|
||||||
// Check if either rhs or lhs is NaN.
|
// Check if either rhs or lhs is NaN.
|
||||||
__ c(UN, D, f12, f14);
|
__ BranchF(NULL, &nan, eq, f12, f14);
|
||||||
__ bc1t(&nan);
|
|
||||||
__ nop();
|
|
||||||
|
|
||||||
// Check if LESS condition is satisfied. If true, move conditionally
|
// Check if LESS condition is satisfied. If true, move conditionally
|
||||||
// result to v0.
|
// result to v0.
|
||||||
@ -1713,89 +1688,116 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// The stub returns zero for false, and a non-zero value for true.
|
// The stub expects its argument in the tos_ register and returns its result in
|
||||||
|
// it, too: zero for false, and a non-zero value for true.
|
||||||
void ToBooleanStub::Generate(MacroAssembler* masm) {
|
void ToBooleanStub::Generate(MacroAssembler* masm) {
|
||||||
// This stub uses FPU instructions.
|
// This stub uses FPU instructions.
|
||||||
CpuFeatures::Scope scope(FPU);
|
CpuFeatures::Scope scope(FPU);
|
||||||
|
|
||||||
Label false_result;
|
Label patch;
|
||||||
Label not_heap_number;
|
const Register map = t5.is(tos_) ? t3 : t5;
|
||||||
Register scratch0 = t5.is(tos_) ? t3 : t5;
|
|
||||||
|
|
||||||
// undefined -> false
|
// undefined -> false.
|
||||||
__ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
|
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
|
||||||
__ Branch(&false_result, eq, tos_, Operand(scratch0));
|
|
||||||
|
|
||||||
// Boolean -> its value
|
// Boolean -> its value.
|
||||||
__ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
|
CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
|
||||||
__ Branch(&false_result, eq, tos_, Operand(scratch0));
|
CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
|
||||||
__ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
|
|
||||||
// "tos_" is a register and contains a non-zero value. Hence we implicitly
|
|
||||||
// return true if the equal condition is satisfied.
|
|
||||||
__ Ret(eq, tos_, Operand(scratch0));
|
|
||||||
|
|
||||||
|
// 'null' -> false.
|
||||||
|
CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
|
||||||
|
|
||||||
|
if (types_.Contains(SMI)) {
|
||||||
// Smis: 0 -> false, all other -> true
|
// Smis: 0 -> false, all other -> true
|
||||||
__ And(scratch0, tos_, tos_);
|
__ And(at, tos_, kSmiTagMask);
|
||||||
__ Branch(&false_result, eq, scratch0, Operand(zero_reg));
|
// tos_ contains the correct return value already
|
||||||
__ And(scratch0, tos_, Operand(kSmiTagMask));
|
__ Ret(eq, at, Operand(zero_reg));
|
||||||
// "tos_" is a register and contains a non-zero value. Hence we implicitly
|
} else if (types_.NeedsMap()) {
|
||||||
// return true if the not equal condition is satisfied.
|
// If we need a map later and have a Smi -> patch.
|
||||||
__ Ret(eq, scratch0, Operand(zero_reg));
|
__ JumpIfSmi(tos_, &patch);
|
||||||
|
}
|
||||||
|
|
||||||
// 'null' -> false
|
if (types_.NeedsMap()) {
|
||||||
__ LoadRoot(scratch0, Heap::kNullValueRootIndex);
|
__ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
|
||||||
__ Branch(&false_result, eq, tos_, Operand(scratch0));
|
|
||||||
|
|
||||||
// HeapNumber => false if +0, -0, or NaN.
|
if (types_.CanBeUndetectable()) {
|
||||||
__ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
|
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||||
|
__ And(at, at, Operand(1 << Map::kIsUndetectable));
|
||||||
|
// Undetectable -> false.
|
||||||
|
__ movn(tos_, zero_reg, at);
|
||||||
|
__ Ret(ne, at, Operand(zero_reg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (types_.Contains(SPEC_OBJECT)) {
|
||||||
|
// Spec object -> true.
|
||||||
|
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
||||||
|
// tos_ contains the correct non-zero return value already.
|
||||||
|
__ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (types_.Contains(STRING)) {
|
||||||
|
// String value -> false iff empty.
|
||||||
|
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
||||||
|
Label skip;
|
||||||
|
__ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
|
||||||
|
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
|
||||||
|
__ Ret(); // the string length is OK as the return value
|
||||||
|
__ bind(&skip);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (types_.Contains(HEAP_NUMBER)) {
|
||||||
|
// Heap number -> false iff +0, -0, or NaN.
|
||||||
|
Label not_heap_number;
|
||||||
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
|
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
|
||||||
__ Branch(¬_heap_number, ne, scratch0, Operand(at));
|
__ Branch(¬_heap_number, ne, map, Operand(at));
|
||||||
|
Label zero_or_nan, number;
|
||||||
__ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
|
__ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
|
||||||
__ fcmp(f12, 0.0, UEQ);
|
__ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
|
||||||
|
|
||||||
// "tos_" is a register, and contains a non zero value by default.
|
// "tos_" is a register, and contains a non zero value by default.
|
||||||
// Hence we only need to overwrite "tos_" with zero to return false for
|
// Hence we only need to overwrite "tos_" with zero to return false for
|
||||||
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
||||||
__ movt(tos_, zero_reg);
|
__ bind(&zero_or_nan);
|
||||||
__ Ret();
|
|
||||||
|
|
||||||
__ bind(¬_heap_number);
|
|
||||||
|
|
||||||
// It can be an undetectable object.
|
|
||||||
// Undetectable => false.
|
|
||||||
__ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
|
|
||||||
__ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
|
|
||||||
__ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
|
|
||||||
__ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
|
|
||||||
|
|
||||||
// JavaScript object => true.
|
|
||||||
__ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
|
|
||||||
__ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
|
|
||||||
|
|
||||||
// "tos_" is a register and contains a non-zero value.
|
|
||||||
// Hence we implicitly return true if the greater than
|
|
||||||
// condition is satisfied.
|
|
||||||
__ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
||||||
|
|
||||||
// Check for string.
|
|
||||||
__ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
|
|
||||||
__ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
|
|
||||||
// "tos_" is a register and contains a non-zero value.
|
|
||||||
// Hence we implicitly return true if the greater than
|
|
||||||
// condition is satisfied.
|
|
||||||
__ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
|
|
||||||
|
|
||||||
// String value => false iff empty, i.e., length is zero.
|
|
||||||
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
|
|
||||||
// If length is zero, "tos_" contains zero ==> false.
|
|
||||||
// If length is not zero, "tos_" contains a non-zero value ==> true.
|
|
||||||
__ Ret();
|
|
||||||
|
|
||||||
// Return 0 in "tos_" for false.
|
|
||||||
__ bind(&false_result);
|
|
||||||
__ mov(tos_, zero_reg);
|
__ mov(tos_, zero_reg);
|
||||||
|
__ bind(&number);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
__ bind(¬_heap_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
__ bind(&patch);
|
||||||
|
GenerateTypeTransition(masm);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
|
||||||
|
Type type,
|
||||||
|
Heap::RootListIndex value,
|
||||||
|
bool result) {
|
||||||
|
if (types_.Contains(type)) {
|
||||||
|
// If we see an expected oddball, return its ToBoolean value tos_.
|
||||||
|
__ LoadRoot(at, value);
|
||||||
|
__ Subu(at, at, tos_); // This is a check for equality for the movz below.
|
||||||
|
// The value of a root is never NULL, so we can avoid loading a non-null
|
||||||
|
// value into tos_ when we want to return 'true'.
|
||||||
|
if (!result) {
|
||||||
|
__ movz(tos_, zero_reg, at);
|
||||||
|
}
|
||||||
|
__ Ret(eq, at, Operand(zero_reg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
|
||||||
|
__ Move(a3, tos_);
|
||||||
|
__ li(a2, Operand(Smi::FromInt(tos_.code())));
|
||||||
|
__ li(a1, Operand(Smi::FromInt(types_.ToByte())));
|
||||||
|
__ Push(a3, a2, a1);
|
||||||
|
// Patch the caller to an appropriate specialized stub and return the
|
||||||
|
// operation result to the caller of the stub.
|
||||||
|
__ TailCallExternalReference(
|
||||||
|
ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
|
||||||
|
3,
|
||||||
|
1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2721,26 +2723,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|||||||
// Otherwise return a heap number if allowed, or jump to type
|
// Otherwise return a heap number if allowed, or jump to type
|
||||||
// transition.
|
// transition.
|
||||||
|
|
||||||
// NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
|
Register except_flag = scratch2;
|
||||||
// On MIPS a lot of things cannot be implemented the same way so right
|
__ EmitFPUTruncate(kRoundToZero,
|
||||||
// now it makes a lot more sense to just do things manually.
|
single_scratch,
|
||||||
|
f10,
|
||||||
// Save FCSR.
|
scratch1,
|
||||||
__ cfc1(scratch1, FCSR);
|
except_flag);
|
||||||
// Disable FPU exceptions.
|
|
||||||
__ ctc1(zero_reg, FCSR);
|
|
||||||
__ trunc_w_d(single_scratch, f10);
|
|
||||||
// Retrieve FCSR.
|
|
||||||
__ cfc1(scratch2, FCSR);
|
|
||||||
// Restore FCSR.
|
|
||||||
__ ctc1(scratch1, FCSR);
|
|
||||||
|
|
||||||
// Check for inexact conversion or exception.
|
|
||||||
__ And(scratch2, scratch2, kFCSRFlagMask);
|
|
||||||
|
|
||||||
if (result_type_ <= BinaryOpIC::INT32) {
|
if (result_type_ <= BinaryOpIC::INT32) {
|
||||||
// If scratch2 != 0, result does not fit in a 32-bit integer.
|
// If except_flag != 0, result does not fit in a 32-bit integer.
|
||||||
__ Branch(&transition, ne, scratch2, Operand(zero_reg));
|
__ Branch(&transition, ne, except_flag, Operand(zero_reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the result fits in a smi.
|
// Check if the result fits in a smi.
|
||||||
@ -3229,7 +3221,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
|||||||
__ lw(t0, MemOperand(cache_entry, 0));
|
__ lw(t0, MemOperand(cache_entry, 0));
|
||||||
__ lw(t1, MemOperand(cache_entry, 4));
|
__ lw(t1, MemOperand(cache_entry, 4));
|
||||||
__ lw(t2, MemOperand(cache_entry, 8));
|
__ lw(t2, MemOperand(cache_entry, 8));
|
||||||
__ Addu(cache_entry, cache_entry, 12);
|
|
||||||
__ Branch(&calculate, ne, a2, Operand(t0));
|
__ Branch(&calculate, ne, a2, Operand(t0));
|
||||||
__ Branch(&calculate, ne, a3, Operand(t1));
|
__ Branch(&calculate, ne, a3, Operand(t1));
|
||||||
// Cache hit. Load result, cleanup and return.
|
// Cache hit. Load result, cleanup and return.
|
||||||
@ -3263,13 +3254,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
|||||||
// Register a0 holds precalculated cache entry address; preserve
|
// Register a0 holds precalculated cache entry address; preserve
|
||||||
// it on the stack and pop it into register cache_entry after the
|
// it on the stack and pop it into register cache_entry after the
|
||||||
// call.
|
// call.
|
||||||
__ push(cache_entry);
|
__ Push(cache_entry, a2, a3);
|
||||||
GenerateCallCFunction(masm, scratch0);
|
GenerateCallCFunction(masm, scratch0);
|
||||||
__ GetCFunctionDoubleResult(f4);
|
__ GetCFunctionDoubleResult(f4);
|
||||||
|
|
||||||
// Try to update the cache. If we cannot allocate a
|
// Try to update the cache. If we cannot allocate a
|
||||||
// heap number, we return the result without updating.
|
// heap number, we return the result without updating.
|
||||||
__ pop(cache_entry);
|
__ Pop(cache_entry, a2, a3);
|
||||||
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
|
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
|
||||||
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
|
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
|
||||||
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
|
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
|
||||||
@ -3323,22 +3314,25 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
|
|||||||
__ push(ra);
|
__ push(ra);
|
||||||
__ PrepareCallCFunction(2, scratch);
|
__ PrepareCallCFunction(2, scratch);
|
||||||
if (IsMipsSoftFloatABI) {
|
if (IsMipsSoftFloatABI) {
|
||||||
__ Move(v0, v1, f4);
|
__ Move(a0, a1, f4);
|
||||||
} else {
|
} else {
|
||||||
__ mov_d(f12, f4);
|
__ mov_d(f12, f4);
|
||||||
}
|
}
|
||||||
switch (type_) {
|
switch (type_) {
|
||||||
case TranscendentalCache::SIN:
|
case TranscendentalCache::SIN:
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::math_sin_double_function(masm->isolate()), 2);
|
ExternalReference::math_sin_double_function(masm->isolate()),
|
||||||
|
0, 1);
|
||||||
break;
|
break;
|
||||||
case TranscendentalCache::COS:
|
case TranscendentalCache::COS:
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::math_cos_double_function(masm->isolate()), 2);
|
ExternalReference::math_cos_double_function(masm->isolate()),
|
||||||
|
0, 1);
|
||||||
break;
|
break;
|
||||||
case TranscendentalCache::LOG:
|
case TranscendentalCache::LOG:
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::math_log_double_function(masm->isolate()), 2);
|
ExternalReference::math_log_double_function(masm->isolate()),
|
||||||
|
0, 1);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
@ -3421,12 +3415,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|||||||
heapnumbermap,
|
heapnumbermap,
|
||||||
&call_runtime);
|
&call_runtime);
|
||||||
__ push(ra);
|
__ push(ra);
|
||||||
__ PrepareCallCFunction(3, scratch);
|
__ PrepareCallCFunction(1, 1, scratch);
|
||||||
__ SetCallCDoubleArguments(double_base, exponent);
|
__ SetCallCDoubleArguments(double_base, exponent);
|
||||||
{
|
{
|
||||||
AllowExternalCallThatCantCauseGC scope(masm);
|
AllowExternalCallThatCantCauseGC scope(masm);
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::power_double_int_function(masm->isolate()), 3);
|
ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
|
||||||
__ pop(ra);
|
__ pop(ra);
|
||||||
__ GetCFunctionDoubleResult(double_result);
|
__ GetCFunctionDoubleResult(double_result);
|
||||||
}
|
}
|
||||||
@ -3452,7 +3446,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|||||||
heapnumbermap,
|
heapnumbermap,
|
||||||
&call_runtime);
|
&call_runtime);
|
||||||
__ push(ra);
|
__ push(ra);
|
||||||
__ PrepareCallCFunction(4, scratch);
|
__ PrepareCallCFunction(0, 2, scratch);
|
||||||
// ABI (o32) for func(double a, double b): a in f12, b in f14.
|
// ABI (o32) for func(double a, double b): a in f12, b in f14.
|
||||||
ASSERT(double_base.is(f12));
|
ASSERT(double_base.is(f12));
|
||||||
ASSERT(double_exponent.is(f14));
|
ASSERT(double_exponent.is(f14));
|
||||||
@ -3460,7 +3454,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|||||||
{
|
{
|
||||||
AllowExternalCallThatCantCauseGC scope(masm);
|
AllowExternalCallThatCantCauseGC scope(masm);
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::power_double_double_function(masm->isolate()), 4);
|
ExternalReference::power_double_double_function(masm->isolate()),
|
||||||
|
0,
|
||||||
|
2);
|
||||||
__ pop(ra);
|
__ pop(ra);
|
||||||
__ GetCFunctionDoubleResult(double_result);
|
__ GetCFunctionDoubleResult(double_result);
|
||||||
}
|
}
|
||||||
@ -3505,9 +3501,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
|||||||
if (do_gc) {
|
if (do_gc) {
|
||||||
// Move result passed in v0 into a0 to call PerformGC.
|
// Move result passed in v0 into a0 to call PerformGC.
|
||||||
__ mov(a0, v0);
|
__ mov(a0, v0);
|
||||||
__ PrepareCallCFunction(1, a1);
|
__ PrepareCallCFunction(1, 0, a1);
|
||||||
__ CallCFunction(
|
__ CallCFunction(
|
||||||
ExternalReference::perform_gc_function(masm->isolate()), 1);
|
ExternalReference::perform_gc_function(masm->isolate()),
|
||||||
|
1, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ExternalReference scope_depth =
|
ExternalReference scope_depth =
|
||||||
@ -3712,8 +3709,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
|||||||
CpuFeatures::Scope scope(FPU);
|
CpuFeatures::Scope scope(FPU);
|
||||||
// Save callee-saved FPU registers.
|
// Save callee-saved FPU registers.
|
||||||
__ MultiPushFPU(kCalleeSavedFPU);
|
__ MultiPushFPU(kCalleeSavedFPU);
|
||||||
|
// Set up the reserved register for 0.0.
|
||||||
|
__ Move(kDoubleRegZero, 0.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Load argv in s0 register.
|
// Load argv in s0 register.
|
||||||
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
||||||
if (CpuFeatures::IsSupported(FPU)) {
|
if (CpuFeatures::IsSupported(FPU)) {
|
||||||
@ -3870,11 +3870,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
|||||||
// * object: a0 or at sp + 1 * kPointerSize.
|
// * object: a0 or at sp + 1 * kPointerSize.
|
||||||
// * function: a1 or at sp.
|
// * function: a1 or at sp.
|
||||||
//
|
//
|
||||||
// Inlined call site patching is a crankshaft-specific feature that is not
|
// An inlined call site may have been generated before calling this stub.
|
||||||
// implemented on MIPS.
|
// In this case the offset to the inline site to patch is passed on the stack,
|
||||||
|
// in the safepoint slot for register t0.
|
||||||
void InstanceofStub::Generate(MacroAssembler* masm) {
|
void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||||
// This is a crankshaft-specific feature that has not been implemented yet.
|
|
||||||
ASSERT(!HasCallSiteInlineCheck());
|
|
||||||
// Call site inlining and patching implies arguments in registers.
|
// Call site inlining and patching implies arguments in registers.
|
||||||
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
||||||
// ReturnTrueFalse is only implemented for inlined call sites.
|
// ReturnTrueFalse is only implemented for inlined call sites.
|
||||||
@ -3888,6 +3887,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
|||||||
const Register inline_site = t5;
|
const Register inline_site = t5;
|
||||||
const Register scratch = a2;
|
const Register scratch = a2;
|
||||||
|
|
||||||
|
const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
|
||||||
|
|
||||||
Label slow, loop, is_instance, is_not_instance, not_js_object;
|
Label slow, loop, is_instance, is_not_instance, not_js_object;
|
||||||
|
|
||||||
if (!HasArgsInRegisters()) {
|
if (!HasArgsInRegisters()) {
|
||||||
@ -3903,10 +3904,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
|||||||
// real lookup and update the call site cache.
|
// real lookup and update the call site cache.
|
||||||
if (!HasCallSiteInlineCheck()) {
|
if (!HasCallSiteInlineCheck()) {
|
||||||
Label miss;
|
Label miss;
|
||||||
__ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
|
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
|
||||||
__ Branch(&miss, ne, function, Operand(t1));
|
__ Branch(&miss, ne, function, Operand(at));
|
||||||
__ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
|
__ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
|
||||||
__ Branch(&miss, ne, map, Operand(t1));
|
__ Branch(&miss, ne, map, Operand(at));
|
||||||
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
||||||
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
||||||
|
|
||||||
@ -3926,7 +3927,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
|||||||
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
||||||
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED_MIPS();
|
ASSERT(HasArgsInRegisters());
|
||||||
|
// Patch the (relocated) inlined map check.
|
||||||
|
|
||||||
|
// The offset was stored in t0 safepoint slot.
|
||||||
|
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
|
||||||
|
__ LoadFromSafepointRegisterSlot(scratch, t0);
|
||||||
|
__ Subu(inline_site, ra, scratch);
|
||||||
|
// Patch the relocated value to map.
|
||||||
|
__ PatchRelocatedValue(inline_site, scratch, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register mapping: a3 is object map and t0 is function prototype.
|
// Register mapping: a3 is object map and t0 is function prototype.
|
||||||
@ -3952,7 +3961,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
|||||||
__ mov(v0, zero_reg);
|
__ mov(v0, zero_reg);
|
||||||
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED_MIPS();
|
// Patch the call site to return true.
|
||||||
|
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
|
||||||
|
__ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
||||||
|
// Get the boolean result location in scratch and patch it.
|
||||||
|
__ PatchRelocatedValue(inline_site, scratch, v0);
|
||||||
|
|
||||||
|
if (!ReturnTrueFalseObject()) {
|
||||||
|
ASSERT_EQ(Smi::FromInt(0), 0);
|
||||||
|
__ mov(v0, zero_reg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
||||||
|
|
||||||
@ -3961,8 +3979,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
|||||||
__ li(v0, Operand(Smi::FromInt(1)));
|
__ li(v0, Operand(Smi::FromInt(1)));
|
||||||
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED_MIPS();
|
// Patch the call site to return false.
|
||||||
|
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
||||||
|
__ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
||||||
|
// Get the boolean result location in scratch and patch it.
|
||||||
|
__ PatchRelocatedValue(inline_site, scratch, v0);
|
||||||
|
|
||||||
|
if (!ReturnTrueFalseObject()) {
|
||||||
|
__ li(v0, Operand(Smi::FromInt(1)));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
|
||||||
|
|
||||||
Label object_not_null, object_not_null_or_smi;
|
Label object_not_null, object_not_null_or_smi;
|
||||||
@ -6477,39 +6504,25 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
|||||||
__ Subu(a2, a0, Operand(kHeapObjectTag));
|
__ Subu(a2, a0, Operand(kHeapObjectTag));
|
||||||
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
||||||
|
|
||||||
Label fpu_eq, fpu_lt, fpu_gt;
|
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
|
||||||
// Compare operands (test if unordered).
|
Label fpu_eq, fpu_lt;
|
||||||
__ c(UN, D, f0, f2);
|
// Test if equal, and also handle the unordered/NaN case.
|
||||||
// Don't base result on status bits when a NaN is involved.
|
__ BranchF(&fpu_eq, &unordered, eq, f0, f2);
|
||||||
__ bc1t(&unordered);
|
|
||||||
__ nop();
|
|
||||||
|
|
||||||
// Test if equal.
|
// Test if less (unordered case is already handled).
|
||||||
__ c(EQ, D, f0, f2);
|
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
|
||||||
__ bc1t(&fpu_eq);
|
|
||||||
__ nop();
|
|
||||||
|
|
||||||
// Test if unordered or less (unordered case is already handled).
|
// Otherwise it's greater, so just fall thru, and return.
|
||||||
__ c(ULT, D, f0, f2);
|
__ Ret(USE_DELAY_SLOT);
|
||||||
__ bc1t(&fpu_lt);
|
__ li(v0, Operand(GREATER)); // In delay slot.
|
||||||
__ nop();
|
|
||||||
|
|
||||||
// Otherwise it's greater.
|
|
||||||
__ bc1f(&fpu_gt);
|
|
||||||
__ nop();
|
|
||||||
|
|
||||||
// Return a result of -1, 0, or 1.
|
|
||||||
__ bind(&fpu_eq);
|
__ bind(&fpu_eq);
|
||||||
__ li(v0, Operand(EQUAL));
|
__ Ret(USE_DELAY_SLOT);
|
||||||
__ Ret();
|
__ li(v0, Operand(EQUAL)); // In delay slot.
|
||||||
|
|
||||||
__ bind(&fpu_lt);
|
__ bind(&fpu_lt);
|
||||||
__ li(v0, Operand(LESS));
|
__ Ret(USE_DELAY_SLOT);
|
||||||
__ Ret();
|
__ li(v0, Operand(LESS)); // In delay slot.
|
||||||
|
|
||||||
__ bind(&fpu_gt);
|
|
||||||
__ li(v0, Operand(GREATER));
|
|
||||||
__ Ret();
|
|
||||||
|
|
||||||
__ bind(&unordered);
|
__ bind(&unordered);
|
||||||
}
|
}
|
||||||
|
@ -818,6 +818,20 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MacroAssembler::FlushICache(Register address, unsigned instructions) {
|
||||||
|
RegList saved_regs = kJSCallerSaved | ra.bit();
|
||||||
|
MultiPush(saved_regs);
|
||||||
|
|
||||||
|
// Save to a0 in case address == t0.
|
||||||
|
Move(a0, address);
|
||||||
|
PrepareCallCFunction(2, t0);
|
||||||
|
|
||||||
|
li(a1, instructions * kInstrSize);
|
||||||
|
CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
|
||||||
|
MultiPop(saved_regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void MacroAssembler::Ext(Register rt,
|
void MacroAssembler::Ext(Register rt,
|
||||||
Register rs,
|
Register rs,
|
||||||
uint16_t pos,
|
uint16_t pos,
|
||||||
@ -4605,6 +4619,37 @@ void MacroAssembler::CallCFunctionHelper(Register function,
|
|||||||
#undef BRANCH_ARGS_CHECK
|
#undef BRANCH_ARGS_CHECK
|
||||||
|
|
||||||
|
|
||||||
|
void MacroAssembler::PatchRelocatedValue(Register li_location,
|
||||||
|
Register scratch,
|
||||||
|
Register new_value) {
|
||||||
|
lw(scratch, MemOperand(li_location));
|
||||||
|
// At this point scratch is a lui(at, ...) instruction.
|
||||||
|
if (emit_debug_code()) {
|
||||||
|
And(scratch, scratch, kOpcodeMask);
|
||||||
|
Check(eq, "The instruction to patch should be a lui.",
|
||||||
|
scratch, Operand(LUI));
|
||||||
|
lw(scratch, MemOperand(li_location));
|
||||||
|
}
|
||||||
|
srl(t9, new_value, kImm16Bits);
|
||||||
|
Ins(scratch, t9, 0, kImm16Bits);
|
||||||
|
sw(scratch, MemOperand(li_location));
|
||||||
|
|
||||||
|
lw(scratch, MemOperand(li_location, kInstrSize));
|
||||||
|
// scratch is now ori(at, ...).
|
||||||
|
if (emit_debug_code()) {
|
||||||
|
And(scratch, scratch, kOpcodeMask);
|
||||||
|
Check(eq, "The instruction to patch should be an ori.",
|
||||||
|
scratch, Operand(ORI));
|
||||||
|
lw(scratch, MemOperand(li_location, kInstrSize));
|
||||||
|
}
|
||||||
|
Ins(scratch, new_value, 0, kImm16Bits);
|
||||||
|
sw(scratch, MemOperand(li_location, kInstrSize));
|
||||||
|
|
||||||
|
// Update the I-cache so the new lui and ori can be executed.
|
||||||
|
FlushICache(li_location, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void MacroAssembler::LoadInstanceDescriptors(Register map,
|
void MacroAssembler::LoadInstanceDescriptors(Register map,
|
||||||
Register descriptors) {
|
Register descriptors) {
|
||||||
lw(descriptors,
|
lw(descriptors,
|
||||||
|
@ -577,6 +577,10 @@ class MacroAssembler: public Assembler {
|
|||||||
// into register dst.
|
// into register dst.
|
||||||
void LoadFromSafepointRegisterSlot(Register dst, Register src);
|
void LoadFromSafepointRegisterSlot(Register dst, Register src);
|
||||||
|
|
||||||
|
// Flush the I-cache from asm code. You should use CPU::FlushICache from C.
|
||||||
|
// Does not handle errors.
|
||||||
|
void FlushICache(Register address, unsigned instructions);
|
||||||
|
|
||||||
// MIPS32 R2 instruction macro.
|
// MIPS32 R2 instruction macro.
|
||||||
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
|
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
|
||||||
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
|
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
|
||||||
@ -1210,6 +1214,11 @@ class MacroAssembler: public Assembler {
|
|||||||
void EnterFrame(StackFrame::Type type);
|
void EnterFrame(StackFrame::Type type);
|
||||||
void LeaveFrame(StackFrame::Type type);
|
void LeaveFrame(StackFrame::Type type);
|
||||||
|
|
||||||
|
// Patch the relocated value (lui/ori pair).
|
||||||
|
void PatchRelocatedValue(Register li_location,
|
||||||
|
Register scratch,
|
||||||
|
Register new_value);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void CallCFunctionHelper(Register function,
|
void CallCFunctionHelper(Register function,
|
||||||
ExternalReference function_reference,
|
ExternalReference function_reference,
|
||||||
|
@ -2560,7 +2560,12 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
|||||||
? CALL_AS_FUNCTION
|
? CALL_AS_FUNCTION
|
||||||
: CALL_AS_METHOD;
|
: CALL_AS_METHOD;
|
||||||
if (V8::UseCrankshaft()) {
|
if (V8::UseCrankshaft()) {
|
||||||
UNIMPLEMENTED_MIPS();
|
// TODO(kasperl): For now, we always call indirectly through the
|
||||||
|
// code field in the function to allow recompilation to take effect
|
||||||
|
// without changing any of the call sites.
|
||||||
|
__ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
||||||
|
__ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
|
||||||
|
NullCallWrapper(), call_kind);
|
||||||
} else {
|
} else {
|
||||||
__ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
|
__ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
|
||||||
JUMP_FUNCTION, call_kind);
|
JUMP_FUNCTION, call_kind);
|
||||||
@ -3837,7 +3842,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||||
|
|
||||||
// Check that the index is in range.
|
// Check that the index is in range.
|
||||||
__ SmiUntag(t0, key);
|
|
||||||
__ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
|
__ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
|
||||||
// Unsigned comparison catches both negative and too-large values.
|
// Unsigned comparison catches both negative and too-large values.
|
||||||
__ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
|
__ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
|
||||||
@ -3845,7 +3849,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
// Handle both smis and HeapNumbers in the fast path. Go to the
|
// Handle both smis and HeapNumbers in the fast path. Go to the
|
||||||
// runtime for all other kinds of values.
|
// runtime for all other kinds of values.
|
||||||
// a3: external array.
|
// a3: external array.
|
||||||
// t0: key (integer).
|
|
||||||
|
|
||||||
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
|
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
|
||||||
// Double to pixel conversion is only implemented in the runtime for now.
|
// Double to pixel conversion is only implemented in the runtime for now.
|
||||||
@ -3857,7 +3860,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
|
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
|
||||||
|
|
||||||
// a3: base pointer of external storage.
|
// a3: base pointer of external storage.
|
||||||
// t0: key (integer).
|
|
||||||
// t1: value (integer).
|
// t1: value (integer).
|
||||||
|
|
||||||
switch (elements_kind) {
|
switch (elements_kind) {
|
||||||
@ -3874,33 +3876,36 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
__ mov(v0, t1); // Value is in range 0..255.
|
__ mov(v0, t1); // Value is in range 0..255.
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ mov(t1, v0);
|
__ mov(t1, v0);
|
||||||
__ addu(t8, a3, t0);
|
|
||||||
|
__ srl(t8, key, 1);
|
||||||
|
__ addu(t8, a3, t8);
|
||||||
__ sb(t1, MemOperand(t8, 0));
|
__ sb(t1, MemOperand(t8, 0));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_BYTE_ELEMENTS:
|
case EXTERNAL_BYTE_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
||||||
__ addu(t8, a3, t0);
|
__ srl(t8, key, 1);
|
||||||
|
__ addu(t8, a3, t8);
|
||||||
__ sb(t1, MemOperand(t8, 0));
|
__ sb(t1, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_SHORT_ELEMENTS:
|
case EXTERNAL_SHORT_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
||||||
__ sll(t8, t0, 1);
|
__ addu(t8, a3, key);
|
||||||
__ addu(t8, a3, t8);
|
|
||||||
__ sh(t1, MemOperand(t8, 0));
|
__ sh(t1, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_INT_ELEMENTS:
|
case EXTERNAL_INT_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||||
__ sll(t8, t0, 2);
|
__ sll(t8, key, 1);
|
||||||
__ addu(t8, a3, t8);
|
__ addu(t8, a3, t8);
|
||||||
__ sw(t1, MemOperand(t8, 0));
|
__ sw(t1, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_FLOAT_ELEMENTS:
|
case EXTERNAL_FLOAT_ELEMENTS:
|
||||||
// Perform int-to-float conversion and store to memory.
|
// Perform int-to-float conversion and store to memory.
|
||||||
|
__ SmiUntag(t0, key);
|
||||||
StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
|
StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_DOUBLE_ELEMENTS:
|
case EXTERNAL_DOUBLE_ELEMENTS:
|
||||||
__ sll(t8, t0, 3);
|
__ sll(t8, key, 2);
|
||||||
__ addu(a3, a3, t8);
|
__ addu(a3, a3, t8);
|
||||||
// a3: effective address of the double element
|
// a3: effective address of the double element
|
||||||
FloatingPointHelper::Destination destination;
|
FloatingPointHelper::Destination destination;
|
||||||
@ -3930,12 +3935,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Entry registers are intact, a0 holds the value which is the return value.
|
// Entry registers are intact, a0 holds the value which is the return value.
|
||||||
__ mov(v0, value);
|
__ mov(v0, a0);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
|
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
|
||||||
// a3: external array.
|
// a3: external array.
|
||||||
// t0: index (integer).
|
|
||||||
__ bind(&check_heap_number);
|
__ bind(&check_heap_number);
|
||||||
__ GetObjectType(value, t1, t2);
|
__ GetObjectType(value, t1, t2);
|
||||||
__ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
|
__ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
|
||||||
@ -3943,7 +3947,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
|
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
|
||||||
|
|
||||||
// a3: base pointer of external storage.
|
// a3: base pointer of external storage.
|
||||||
// t0: key (integer).
|
|
||||||
|
|
||||||
// The WebGL specification leaves the behavior of storing NaN and
|
// The WebGL specification leaves the behavior of storing NaN and
|
||||||
// +/-Infinity into integer arrays basically undefined. For more
|
// +/-Infinity into integer arrays basically undefined. For more
|
||||||
@ -3956,11 +3959,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
|
|
||||||
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
||||||
__ cvt_s_d(f0, f0);
|
__ cvt_s_d(f0, f0);
|
||||||
__ sll(t8, t0, 2);
|
__ sll(t8, key, 1);
|
||||||
__ addu(t8, a3, t8);
|
__ addu(t8, a3, t8);
|
||||||
__ swc1(f0, MemOperand(t8, 0));
|
__ swc1(f0, MemOperand(t8, 0));
|
||||||
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
||||||
__ sll(t8, t0, 3);
|
__ sll(t8, key, 2);
|
||||||
__ addu(t8, a3, t8);
|
__ addu(t8, a3, t8);
|
||||||
__ sdc1(f0, MemOperand(t8, 0));
|
__ sdc1(f0, MemOperand(t8, 0));
|
||||||
} else {
|
} else {
|
||||||
@ -3969,18 +3972,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
switch (elements_kind) {
|
switch (elements_kind) {
|
||||||
case EXTERNAL_BYTE_ELEMENTS:
|
case EXTERNAL_BYTE_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
||||||
__ addu(t8, a3, t0);
|
__ srl(t8, key, 1);
|
||||||
|
__ addu(t8, a3, t8);
|
||||||
__ sb(t3, MemOperand(t8, 0));
|
__ sb(t3, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_SHORT_ELEMENTS:
|
case EXTERNAL_SHORT_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
||||||
__ sll(t8, t0, 1);
|
__ addu(t8, a3, key);
|
||||||
__ addu(t8, a3, t8);
|
|
||||||
__ sh(t3, MemOperand(t8, 0));
|
__ sh(t3, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_INT_ELEMENTS:
|
case EXTERNAL_INT_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||||
__ sll(t8, t0, 2);
|
__ sll(t8, key, 1);
|
||||||
__ addu(t8, a3, t8);
|
__ addu(t8, a3, t8);
|
||||||
__ sw(t3, MemOperand(t8, 0));
|
__ sw(t3, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
@ -3998,7 +4001,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
|
|
||||||
// Entry registers are intact, a0 holds the value
|
// Entry registers are intact, a0 holds the value
|
||||||
// which is the return value.
|
// which is the return value.
|
||||||
__ mov(v0, value);
|
__ mov(v0, a0);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
} else {
|
} else {
|
||||||
// FPU is not available, do manual conversions.
|
// FPU is not available, do manual conversions.
|
||||||
@ -4053,13 +4056,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
__ or_(t3, t7, t6);
|
__ or_(t3, t7, t6);
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ sll(t9, a1, 2);
|
__ sll(t9, key, 1);
|
||||||
__ addu(t9, a2, t9);
|
__ addu(t9, a2, t9);
|
||||||
__ sw(t3, MemOperand(t9, 0));
|
__ sw(t3, MemOperand(t9, 0));
|
||||||
|
|
||||||
// Entry registers are intact, a0 holds the value which is the return
|
// Entry registers are intact, a0 holds the value which is the return
|
||||||
// value.
|
// value.
|
||||||
__ mov(v0, value);
|
__ mov(v0, a0);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ bind(&nan_or_infinity_or_zero);
|
__ bind(&nan_or_infinity_or_zero);
|
||||||
@ -4077,6 +4080,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
// t8: effective address of destination element.
|
// t8: effective address of destination element.
|
||||||
__ sw(t4, MemOperand(t8, 0));
|
__ sw(t4, MemOperand(t8, 0));
|
||||||
__ sw(t3, MemOperand(t8, Register::kSizeInBytes));
|
__ sw(t3, MemOperand(t8, Register::kSizeInBytes));
|
||||||
|
__ mov(v0, a0);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
} else {
|
} else {
|
||||||
bool is_signed_type = IsElementTypeSigned(elements_kind);
|
bool is_signed_type = IsElementTypeSigned(elements_kind);
|
||||||
@ -4139,18 +4143,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
|||||||
switch (elements_kind) {
|
switch (elements_kind) {
|
||||||
case EXTERNAL_BYTE_ELEMENTS:
|
case EXTERNAL_BYTE_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
||||||
__ addu(t8, a3, t0);
|
__ srl(t8, key, 1);
|
||||||
|
__ addu(t8, a3, t8);
|
||||||
__ sb(t3, MemOperand(t8, 0));
|
__ sb(t3, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_SHORT_ELEMENTS:
|
case EXTERNAL_SHORT_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
||||||
__ sll(t8, t0, 1);
|
__ addu(t8, a3, key);
|
||||||
__ addu(t8, a3, t8);
|
|
||||||
__ sh(t3, MemOperand(t8, 0));
|
__ sh(t3, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
case EXTERNAL_INT_ELEMENTS:
|
case EXTERNAL_INT_ELEMENTS:
|
||||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||||
__ sll(t8, t0, 2);
|
__ sll(t8, key, 1);
|
||||||
__ addu(t8, a3, t8);
|
__ addu(t8, a3, t8);
|
||||||
__ sw(t3, MemOperand(t8, 0));
|
__ sw(t3, MemOperand(t8, 0));
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user