[stubs] Convert DoubleToIStub and MathPowStub to builtins
This is mostly a simple copy & paste of the stub implementation from code-stubs-arch.cc to builtins-arch.cc. The conversion allows removal of a special case for the DoubleToIStub within the compiler & wasm pipelines, and also makes the following builtins isolate-independent (in conjunction with https://crrev.com/c/1006581): TFC BitwiseAnd TFC BitwiseOr TFC BitwiseXor TFC Exponentiate TFC ShiftLeft TFC ShiftRight TFC ShiftRightLogical TFJ AtomicsAdd TFJ AtomicsAnd TFJ AtomicsCompareExchange TFJ AtomicsExchange TFJ AtomicsLoad TFJ AtomicsOr TFJ AtomicsStore TFJ AtomicsSub TFJ AtomicsXor TFJ MathClz32 TFJ MathImul TFJ MathPow TFJ NumberParseInt TFJ StringFromCharCode TFJ TypedArrayFrom TFJ TypedArrayOf TFJ TypedArrayPrototypeMap Drive-by: dead code removal & TODOs in code-stubs.h. Bug: v8:6666 Change-Id: I763cba2242bcadc2d130b0aaa16a9787212b466a Reviewed-on: https://chromium-review.googlesource.com/1012024 Commit-Queue: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Cr-Commit-Position: refs/heads/master@{#52591}
This commit is contained in:
parent
7c36eff8d5
commit
a3b6067525
@ -38,180 +38,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
}
|
||||
|
||||
|
||||
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label negate, done;
|
||||
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register result_reg = r7;
|
||||
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
|
||||
Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
|
||||
LowDwVfpRegister double_scratch = temps.AcquireLowD();
|
||||
|
||||
// Save the old values from these temporary registers on the stack.
|
||||
__ Push(result_reg, double_high, double_low);
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 3 * kPointerSize;
|
||||
|
||||
MemOperand input_operand(sp, kArgumentOffset);
|
||||
MemOperand result_operand = input_operand;
|
||||
|
||||
// Load double input.
|
||||
__ vldr(double_scratch, input_operand);
|
||||
__ vmov(double_low, double_high, double_scratch);
|
||||
// Try to convert with a FPU convert instruction. This handles all
|
||||
// non-saturating cases.
|
||||
__ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
|
||||
|
||||
Register scratch = temps.Acquire();
|
||||
__ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
// Load scratch with exponent - 1. This is faster than loading
|
||||
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
|
||||
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
|
||||
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
|
||||
// If exponent is greater than or equal to 84, the 32 less significant
|
||||
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
|
||||
// the result is 0.
|
||||
// Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
|
||||
// greater than this, the conversion is out of range, so return zero.
|
||||
__ cmp(scratch, Operand(83));
|
||||
__ mov(result_reg, Operand::Zero(), LeaveCC, ge);
|
||||
__ b(ge, &done);
|
||||
|
||||
// If we reach this code, 30 <= exponent <= 83.
|
||||
// `TryInlineTruncateDoubleToI` above will have truncated any double with an
|
||||
// exponent lower than 30.
|
||||
if (masm->emit_debug_code()) {
|
||||
// Scratch is exponent - 1.
|
||||
__ cmp(scratch, Operand(30 - 1));
|
||||
__ Check(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
|
||||
// We don't have to handle cases where 0 <= exponent <= 20 for which we would
|
||||
// need to shift right the high part of the mantissa.
|
||||
// Scratch contains exponent - 1.
|
||||
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
|
||||
__ rsb(scratch, scratch, Operand(51), SetCC);
|
||||
|
||||
// 52 <= exponent <= 83, shift only double_low.
|
||||
// On entry, scratch contains: 52 - exponent.
|
||||
__ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
|
||||
__ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
|
||||
__ b(ls, &negate);
|
||||
|
||||
// 21 <= exponent <= 51, shift double_low and double_high
|
||||
// to generate the result.
|
||||
__ mov(double_low, Operand(double_low, LSR, scratch));
|
||||
// Scratch contains: 52 - exponent.
|
||||
// We needs: exponent - 20.
|
||||
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
|
||||
__ rsb(scratch, scratch, Operand(32));
|
||||
__ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
|
||||
// Set the implicit 1 before the mantissa part in double_high.
|
||||
__ orr(result_reg, result_reg,
|
||||
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
||||
__ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
|
||||
|
||||
__ bind(&negate);
|
||||
// If input was positive, double_high ASR 31 equals 0 and
|
||||
// double_high LSR 31 equals zero.
|
||||
// New result = (result eor 0) + 0 = result.
|
||||
// If the input was negative, we have to negate the result.
|
||||
// Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
|
||||
// New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
|
||||
__ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
|
||||
__ add(result_reg, result_reg, Operand(double_high, LSR, 31));
|
||||
|
||||
__ bind(&done);
|
||||
__ str(result_reg, result_operand);
|
||||
|
||||
// Restore registers corrupted in this routine and return.
|
||||
__ Pop(result_reg, double_high, double_low);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == r2);
|
||||
const LowDwVfpRegister double_base = d0;
|
||||
const LowDwVfpRegister double_exponent = d1;
|
||||
const LowDwVfpRegister double_result = d2;
|
||||
const LowDwVfpRegister double_scratch = d3;
|
||||
const SwVfpRegister single_scratch = s6;
|
||||
const Register scratch = r9;
|
||||
const Register scratch2 = r4;
|
||||
|
||||
Label call_runtime, done, int_exponent;
|
||||
|
||||
// Detect integer exponents stored as double.
|
||||
__ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
|
||||
__ b(eq, &int_exponent);
|
||||
|
||||
__ push(lr);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
}
|
||||
__ pop(lr);
|
||||
__ MovFromFloatResult(double_result);
|
||||
__ b(&done);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
|
||||
__ vmov(double_scratch, double_base); // Back up base.
|
||||
__ vmov(double_result, Double(1.0), scratch2);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
__ cmp(scratch, Operand::Zero());
|
||||
__ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
|
||||
|
||||
Label while_true;
|
||||
__ bind(&while_true);
|
||||
__ mov(scratch, Operand(scratch, LSR, 1), SetCC);
|
||||
__ vmul(double_result, double_result, double_scratch, cs);
|
||||
__ vmul(double_scratch, double_scratch, double_scratch, ne);
|
||||
__ b(ne, &while_true);
|
||||
|
||||
__ cmp(exponent, Operand::Zero());
|
||||
__ b(ge, &done);
|
||||
__ vmov(double_scratch, Double(1.0), scratch);
|
||||
__ vdiv(double_result, double_scratch, double_result);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ VFPCompareAndSetFlags(double_result, 0.0);
|
||||
__ b(ne, &done);
|
||||
// double_exponent may not containe the exponent value if the input was a
|
||||
// smi. We set it with exponent value before bailing out.
|
||||
__ vmov(single_scratch, exponent);
|
||||
__ vcvt_f64_s32(double_exponent, single_scratch);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ push(lr);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
}
|
||||
__ pop(lr);
|
||||
__ MovFromFloatResult(double_result);
|
||||
|
||||
__ bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
|
||||
|
||||
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
||||
|
@ -1647,8 +1647,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
b(lt, done);
|
||||
}
|
||||
|
||||
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DwVfpRegister double_input) {
|
||||
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
|
||||
Register result,
|
||||
DwVfpRegister double_input) {
|
||||
Label done;
|
||||
|
||||
TryInlineTruncateDoubleToI(result, double_input, &done);
|
||||
@ -1658,7 +1659,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
|
||||
vstr(double_input, MemOperand(sp, 0));
|
||||
|
||||
CallStubDelayed(new (zone) DoubleToIStub(nullptr));
|
||||
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
|
||||
ldr(result, MemOperand(sp, 0));
|
||||
|
||||
add(sp, sp, Operand(kDoubleSize));
|
||||
|
@ -520,8 +520,8 @@ class TurboAssembler : public Assembler {
|
||||
// Performs a truncating conversion of a floating point number as used by
|
||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
|
||||
// Exits with 'result' holding the answer.
|
||||
void TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DwVfpRegister double_input);
|
||||
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
|
||||
DwVfpRegister double_input);
|
||||
|
||||
// EABI variant for double arguments in use.
|
||||
bool use_eabi_hardfloat() {
|
||||
|
@ -36,189 +36,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
}
|
||||
|
||||
|
||||
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label done;
|
||||
Register result = x7;
|
||||
|
||||
DCHECK(result.Is64Bits());
|
||||
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch1 = temps.AcquireX();
|
||||
Register scratch2 = temps.AcquireX();
|
||||
DoubleRegister double_scratch = temps.AcquireD();
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 2 * kPointerSize;
|
||||
|
||||
__ Push(result, scratch1); // scratch1 is also pushed to preserve alignment.
|
||||
__ Peek(double_scratch, kArgumentOffset);
|
||||
|
||||
// Try to convert with a FPU convert instruction. This handles all
|
||||
// non-saturating cases.
|
||||
__ TryConvertDoubleToInt64(result, double_scratch, &done);
|
||||
__ Fmov(result, double_scratch);
|
||||
|
||||
// If we reach here we need to manually convert the input to an int32.
|
||||
|
||||
// Extract the exponent.
|
||||
Register exponent = scratch1;
|
||||
__ Ubfx(exponent, result, HeapNumber::kMantissaBits,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
|
||||
// the mantissa gets shifted completely out of the int32_t result.
|
||||
__ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
|
||||
__ CzeroX(result, ge);
|
||||
__ B(ge, &done);
|
||||
|
||||
// The Fcvtzs sequence handles all cases except where the conversion causes
|
||||
// signed overflow in the int64_t target. Since we've already handled
|
||||
// exponents >= 84, we can guarantee that 63 <= exponent < 84.
|
||||
|
||||
if (masm->emit_debug_code()) {
|
||||
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
|
||||
// Exponents less than this should have been handled by the Fcvt case.
|
||||
__ Check(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
|
||||
// Isolate the mantissa bits, and set the implicit '1'.
|
||||
Register mantissa = scratch2;
|
||||
__ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
|
||||
__ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
|
||||
|
||||
// Negate the mantissa if necessary.
|
||||
__ Tst(result, kXSignMask);
|
||||
__ Cneg(mantissa, mantissa, ne);
|
||||
|
||||
// Shift the mantissa bits in the correct place. We know that we have to shift
|
||||
// it left here, because exponent >= 63 >= kMantissaBits.
|
||||
__ Sub(exponent, exponent,
|
||||
HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
|
||||
__ Lsl(result, mantissa, exponent);
|
||||
|
||||
__ Bind(&done);
|
||||
__ Poke(result, kArgumentOffset);
|
||||
__ Pop(scratch1, result);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
// Stack on entry:
|
||||
// sp[0]: Exponent (as a tagged value).
|
||||
// sp[1]: Base (as a tagged value).
|
||||
//
|
||||
// The (tagged) result will be returned in x0, as a heap number.
|
||||
|
||||
Register exponent_tagged = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent_tagged.is(x11));
|
||||
Register exponent_integer = MathPowIntegerDescriptor::exponent();
|
||||
DCHECK(exponent_integer.is(x12));
|
||||
Register saved_lr = x19;
|
||||
VRegister result_double = d0;
|
||||
VRegister base_double = d0;
|
||||
VRegister exponent_double = d1;
|
||||
VRegister base_double_copy = d2;
|
||||
VRegister scratch1_double = d6;
|
||||
VRegister scratch0_double = d7;
|
||||
|
||||
// A fast-path for integer exponents.
|
||||
Label exponent_is_smi, exponent_is_integer;
|
||||
// Allocate a heap number for the result, and return it.
|
||||
Label done;
|
||||
|
||||
// Unpack the inputs.
|
||||
|
||||
// Handle double (heap number) exponents.
|
||||
// Detect integer exponents stored as doubles and handle those in the
|
||||
// integer fast-path.
|
||||
__ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
|
||||
scratch0_double, &exponent_is_integer);
|
||||
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ Mov(saved_lr, lr);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
__ Mov(lr, saved_lr);
|
||||
__ B(&done);
|
||||
}
|
||||
|
||||
// Handle SMI exponents.
|
||||
__ Bind(&exponent_is_smi);
|
||||
// x10 base_tagged The tagged base (input).
|
||||
// x11 exponent_tagged The tagged exponent (input).
|
||||
// d1 base_double The base as a double.
|
||||
__ SmiUntag(exponent_integer, exponent_tagged);
|
||||
|
||||
__ Bind(&exponent_is_integer);
|
||||
// x10 base_tagged The tagged base (input).
|
||||
// x11 exponent_tagged The tagged exponent (input).
|
||||
// x12 exponent_integer The exponent as an integer.
|
||||
// d1 base_double The base as a double.
|
||||
|
||||
// Find abs(exponent). For negative exponents, we can find the inverse later.
|
||||
Register exponent_abs = x13;
|
||||
__ Cmp(exponent_integer, 0);
|
||||
__ Cneg(exponent_abs, exponent_integer, mi);
|
||||
// x13 exponent_abs The value of abs(exponent_integer).
|
||||
|
||||
// Repeatedly multiply to calculate the power.
|
||||
// result = 1.0;
|
||||
// For each bit n (exponent_integer{n}) {
|
||||
// if (exponent_integer{n}) {
|
||||
// result *= base;
|
||||
// }
|
||||
// base *= base;
|
||||
// if (remaining bits in exponent_integer are all zero) {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
Label power_loop, power_loop_entry, power_loop_exit;
|
||||
__ Fmov(scratch1_double, base_double);
|
||||
__ Fmov(base_double_copy, base_double);
|
||||
__ Fmov(result_double, 1.0);
|
||||
__ B(&power_loop_entry);
|
||||
|
||||
__ Bind(&power_loop);
|
||||
__ Fmul(scratch1_double, scratch1_double, scratch1_double);
|
||||
__ Lsr(exponent_abs, exponent_abs, 1);
|
||||
__ Cbz(exponent_abs, &power_loop_exit);
|
||||
|
||||
__ Bind(&power_loop_entry);
|
||||
__ Tbz(exponent_abs, 0, &power_loop);
|
||||
__ Fmul(result_double, result_double, scratch1_double);
|
||||
__ B(&power_loop);
|
||||
|
||||
__ Bind(&power_loop_exit);
|
||||
|
||||
// If the exponent was positive, result_double holds the result.
|
||||
__ Tbz(exponent_integer, kXSignBit, &done);
|
||||
|
||||
// The exponent was negative, so find the inverse.
|
||||
__ Fmov(scratch0_double, 1.0);
|
||||
__ Fdiv(result_double, scratch0_double, result_double);
|
||||
// ECMA-262 only requires Math.pow to return an 'implementation-dependent
|
||||
// approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
|
||||
// to calculate the subnormal value 2^-1074. This method of calculating
|
||||
// negative powers doesn't work because 2^1074 overflows to infinity. To
|
||||
// catch this corner-case, we bail out if the result was 0. (This can only
|
||||
// occur if the divisor is infinity or the base is zero.)
|
||||
__ Fcmp(result_double, 0.0);
|
||||
__ B(&done, ne);
|
||||
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ Mov(saved_lr, lr);
|
||||
__ Fmov(base_double, base_double_copy);
|
||||
__ Scvtf(exponent_double, exponent_integer);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
__ Mov(lr, saved_lr);
|
||||
__ Bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
||||
// It is important that the following stubs are generated in this order
|
||||
// because pregenerated stubs can only call other pregenerated stubs.
|
||||
|
@ -2308,8 +2308,9 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
|
||||
B(vc, done);
|
||||
}
|
||||
|
||||
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input) {
|
||||
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
|
||||
Register result,
|
||||
DoubleRegister double_input) {
|
||||
Label done;
|
||||
|
||||
// Try to convert the double to an int64. If successful, the bottom 32 bits
|
||||
@ -2319,9 +2320,8 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
// If we fell through then inline version didn't succeed - call stub instead.
|
||||
Push(lr, double_input);
|
||||
|
||||
auto stub = new (zone) DoubleToIStub(nullptr);
|
||||
// DoubleToIStub preserves any registers it needs to clobber.
|
||||
CallStubDelayed(stub);
|
||||
// DoubleToI preserves any registers it needs to clobber.
|
||||
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
|
||||
Ldr(result, MemOperand(sp, 0));
|
||||
|
||||
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
|
||||
|
@ -902,8 +902,8 @@ class TurboAssembler : public Assembler {
|
||||
// Performs a truncating conversion of a floating point number as used by
|
||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
|
||||
// Exits with 'result' holding the answer.
|
||||
void TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input);
|
||||
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
|
||||
DoubleRegister double_input);
|
||||
|
||||
inline void Mul(const Register& rd, const Register& rn, const Register& rm);
|
||||
|
||||
|
@ -2598,6 +2598,179 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
// Finally, jump to the entrypoint.
|
||||
__ Jump(r8);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
Label negate, done;
|
||||
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register result_reg = r7;
|
||||
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
|
||||
Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
|
||||
LowDwVfpRegister double_scratch = temps.AcquireLowD();
|
||||
|
||||
// Save the old values from these temporary registers on the stack.
|
||||
__ Push(result_reg, double_high, double_low);
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 3 * kPointerSize;
|
||||
|
||||
MemOperand input_operand(sp, kArgumentOffset);
|
||||
MemOperand result_operand = input_operand;
|
||||
|
||||
// Load double input.
|
||||
__ vldr(double_scratch, input_operand);
|
||||
__ vmov(double_low, double_high, double_scratch);
|
||||
// Try to convert with a FPU convert instruction. This handles all
|
||||
// non-saturating cases.
|
||||
__ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
|
||||
|
||||
Register scratch = temps.Acquire();
|
||||
__ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
// Load scratch with exponent - 1. This is faster than loading
|
||||
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
|
||||
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
|
||||
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
|
||||
// If exponent is greater than or equal to 84, the 32 less significant
|
||||
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
|
||||
// the result is 0.
|
||||
// Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
|
||||
// greater than this, the conversion is out of range, so return zero.
|
||||
__ cmp(scratch, Operand(83));
|
||||
__ mov(result_reg, Operand::Zero(), LeaveCC, ge);
|
||||
__ b(ge, &done);
|
||||
|
||||
// If we reach this code, 30 <= exponent <= 83.
|
||||
// `TryInlineTruncateDoubleToI` above will have truncated any double with an
|
||||
// exponent lower than 30.
|
||||
if (masm->emit_debug_code()) {
|
||||
// Scratch is exponent - 1.
|
||||
__ cmp(scratch, Operand(30 - 1));
|
||||
__ Check(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
|
||||
// We don't have to handle cases where 0 <= exponent <= 20 for which we would
|
||||
// need to shift right the high part of the mantissa.
|
||||
// Scratch contains exponent - 1.
|
||||
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
|
||||
__ rsb(scratch, scratch, Operand(51), SetCC);
|
||||
|
||||
// 52 <= exponent <= 83, shift only double_low.
|
||||
// On entry, scratch contains: 52 - exponent.
|
||||
__ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
|
||||
__ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
|
||||
__ b(ls, &negate);
|
||||
|
||||
// 21 <= exponent <= 51, shift double_low and double_high
|
||||
// to generate the result.
|
||||
__ mov(double_low, Operand(double_low, LSR, scratch));
|
||||
// Scratch contains: 52 - exponent.
|
||||
// We needs: exponent - 20.
|
||||
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
|
||||
__ rsb(scratch, scratch, Operand(32));
|
||||
__ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
|
||||
// Set the implicit 1 before the mantissa part in double_high.
|
||||
__ orr(result_reg, result_reg,
|
||||
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
||||
__ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
|
||||
|
||||
__ bind(&negate);
|
||||
// If input was positive, double_high ASR 31 equals 0 and
|
||||
// double_high LSR 31 equals zero.
|
||||
// New result = (result eor 0) + 0 = result.
|
||||
// If the input was negative, we have to negate the result.
|
||||
// Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
|
||||
// New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
|
||||
__ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
|
||||
__ add(result_reg, result_reg, Operand(double_high, LSR, 31));
|
||||
|
||||
__ bind(&done);
|
||||
__ str(result_reg, result_operand);
|
||||
|
||||
// Restore registers corrupted in this routine and return.
|
||||
__ Pop(result_reg, double_high, double_low);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == r2);
|
||||
const LowDwVfpRegister double_base = d0;
|
||||
const LowDwVfpRegister double_exponent = d1;
|
||||
const LowDwVfpRegister double_result = d2;
|
||||
const LowDwVfpRegister double_scratch = d3;
|
||||
const SwVfpRegister single_scratch = s6;
|
||||
const Register scratch = r9;
|
||||
const Register scratch2 = r4;
|
||||
|
||||
Label call_runtime, done, int_exponent;
|
||||
|
||||
// Detect integer exponents stored as double.
|
||||
__ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
|
||||
__ b(eq, &int_exponent);
|
||||
|
||||
__ push(lr);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
}
|
||||
__ pop(lr);
|
||||
__ MovFromFloatResult(double_result);
|
||||
__ b(&done);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
|
||||
__ vmov(double_scratch, double_base); // Back up base.
|
||||
__ vmov(double_result, Double(1.0), scratch2);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
__ cmp(scratch, Operand::Zero());
|
||||
__ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
|
||||
|
||||
Label while_true;
|
||||
__ bind(&while_true);
|
||||
__ mov(scratch, Operand(scratch, LSR, 1), SetCC);
|
||||
__ vmul(double_result, double_result, double_scratch, cs);
|
||||
__ vmul(double_scratch, double_scratch, double_scratch, ne);
|
||||
__ b(ne, &while_true);
|
||||
|
||||
__ cmp(exponent, Operand::Zero());
|
||||
__ b(ge, &done);
|
||||
__ vmov(double_scratch, Double(1.0), scratch);
|
||||
__ vdiv(double_result, double_scratch, double_result);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ VFPCompareAndSetFlags(double_result, 0.0);
|
||||
__ b(ne, &done);
|
||||
// double_exponent may not containe the exponent value if the input was a
|
||||
// smi. We set it with exponent value before bailing out.
|
||||
__ vmov(single_scratch, exponent);
|
||||
__ vcvt_f64_s32(double_exponent, single_scratch);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ push(lr);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
}
|
||||
__ pop(lr);
|
||||
__ MovFromFloatResult(double_result);
|
||||
|
||||
__ bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
|
@ -3064,6 +3064,187 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
__ Jump(x8);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
Label done;
|
||||
Register result = x7;
|
||||
|
||||
DCHECK(result.Is64Bits());
|
||||
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch1 = temps.AcquireX();
|
||||
Register scratch2 = temps.AcquireX();
|
||||
DoubleRegister double_scratch = temps.AcquireD();
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 2 * kPointerSize;
|
||||
|
||||
__ Push(result, scratch1); // scratch1 is also pushed to preserve alignment.
|
||||
__ Peek(double_scratch, kArgumentOffset);
|
||||
|
||||
// Try to convert with a FPU convert instruction. This handles all
|
||||
// non-saturating cases.
|
||||
__ TryConvertDoubleToInt64(result, double_scratch, &done);
|
||||
__ Fmov(result, double_scratch);
|
||||
|
||||
// If we reach here we need to manually convert the input to an int32.
|
||||
|
||||
// Extract the exponent.
|
||||
Register exponent = scratch1;
|
||||
__ Ubfx(exponent, result, HeapNumber::kMantissaBits,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
|
||||
// the mantissa gets shifted completely out of the int32_t result.
|
||||
__ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
|
||||
__ CzeroX(result, ge);
|
||||
__ B(ge, &done);
|
||||
|
||||
// The Fcvtzs sequence handles all cases except where the conversion causes
|
||||
// signed overflow in the int64_t target. Since we've already handled
|
||||
// exponents >= 84, we can guarantee that 63 <= exponent < 84.
|
||||
|
||||
if (masm->emit_debug_code()) {
|
||||
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
|
||||
// Exponents less than this should have been handled by the Fcvt case.
|
||||
__ Check(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
|
||||
// Isolate the mantissa bits, and set the implicit '1'.
|
||||
Register mantissa = scratch2;
|
||||
__ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
|
||||
__ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
|
||||
|
||||
// Negate the mantissa if necessary.
|
||||
__ Tst(result, kXSignMask);
|
||||
__ Cneg(mantissa, mantissa, ne);
|
||||
|
||||
// Shift the mantissa bits in the correct place. We know that we have to shift
|
||||
// it left here, because exponent >= 63 >= kMantissaBits.
|
||||
__ Sub(exponent, exponent,
|
||||
HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
|
||||
__ Lsl(result, mantissa, exponent);
|
||||
|
||||
__ Bind(&done);
|
||||
__ Poke(result, kArgumentOffset);
|
||||
__ Pop(scratch1, result);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
|
||||
// Stack on entry:
|
||||
// sp[0]: Exponent (as a tagged value).
|
||||
// sp[1]: Base (as a tagged value).
|
||||
//
|
||||
// The (tagged) result will be returned in x0, as a heap number.
|
||||
|
||||
Register exponent_tagged = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent_tagged.is(x11));
|
||||
Register exponent_integer = MathPowIntegerDescriptor::exponent();
|
||||
DCHECK(exponent_integer.is(x12));
|
||||
Register saved_lr = x19;
|
||||
VRegister result_double = d0;
|
||||
VRegister base_double = d0;
|
||||
VRegister exponent_double = d1;
|
||||
VRegister base_double_copy = d2;
|
||||
VRegister scratch1_double = d6;
|
||||
VRegister scratch0_double = d7;
|
||||
|
||||
// A fast-path for integer exponents.
|
||||
Label exponent_is_smi, exponent_is_integer;
|
||||
// Allocate a heap number for the result, and return it.
|
||||
Label done;
|
||||
|
||||
// Unpack the inputs.
|
||||
|
||||
// Handle double (heap number) exponents.
|
||||
// Detect integer exponents stored as doubles and handle those in the
|
||||
// integer fast-path.
|
||||
__ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
|
||||
scratch0_double, &exponent_is_integer);
|
||||
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ Mov(saved_lr, lr);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
__ Mov(lr, saved_lr);
|
||||
__ B(&done);
|
||||
}
|
||||
|
||||
// Handle SMI exponents.
|
||||
__ Bind(&exponent_is_smi);
|
||||
// x10 base_tagged The tagged base (input).
|
||||
// x11 exponent_tagged The tagged exponent (input).
|
||||
// d1 base_double The base as a double.
|
||||
__ SmiUntag(exponent_integer, exponent_tagged);
|
||||
|
||||
__ Bind(&exponent_is_integer);
|
||||
// x10 base_tagged The tagged base (input).
|
||||
// x11 exponent_tagged The tagged exponent (input).
|
||||
// x12 exponent_integer The exponent as an integer.
|
||||
// d1 base_double The base as a double.
|
||||
|
||||
// Find abs(exponent). For negative exponents, we can find the inverse later.
|
||||
Register exponent_abs = x13;
|
||||
__ Cmp(exponent_integer, 0);
|
||||
__ Cneg(exponent_abs, exponent_integer, mi);
|
||||
// x13 exponent_abs The value of abs(exponent_integer).
|
||||
|
||||
// Repeatedly multiply to calculate the power.
|
||||
// result = 1.0;
|
||||
// For each bit n (exponent_integer{n}) {
|
||||
// if (exponent_integer{n}) {
|
||||
// result *= base;
|
||||
// }
|
||||
// base *= base;
|
||||
// if (remaining bits in exponent_integer are all zero) {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
Label power_loop, power_loop_entry, power_loop_exit;
|
||||
__ Fmov(scratch1_double, base_double);
|
||||
__ Fmov(base_double_copy, base_double);
|
||||
__ Fmov(result_double, 1.0);
|
||||
__ B(&power_loop_entry);
|
||||
|
||||
__ Bind(&power_loop);
|
||||
__ Fmul(scratch1_double, scratch1_double, scratch1_double);
|
||||
__ Lsr(exponent_abs, exponent_abs, 1);
|
||||
__ Cbz(exponent_abs, &power_loop_exit);
|
||||
|
||||
__ Bind(&power_loop_entry);
|
||||
__ Tbz(exponent_abs, 0, &power_loop);
|
||||
__ Fmul(result_double, result_double, scratch1_double);
|
||||
__ B(&power_loop);
|
||||
|
||||
__ Bind(&power_loop_exit);
|
||||
|
||||
// If the exponent was positive, result_double holds the result.
|
||||
__ Tbz(exponent_integer, kXSignBit, &done);
|
||||
|
||||
// The exponent was negative, so find the inverse.
|
||||
__ Fmov(scratch0_double, 1.0);
|
||||
__ Fdiv(result_double, scratch0_double, result_double);
|
||||
// ECMA-262 only requires Math.pow to return an 'implementation-dependent
|
||||
// approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
|
||||
// to calculate the subnormal value 2^-1074. This method of calculating
|
||||
// negative powers doesn't work because 2^1074 overflows to infinity. To
|
||||
// catch this corner-case, we bail out if the result was 0. (This can only
|
||||
// occur if the divisor is infinity or the base is zero.)
|
||||
__ Fcmp(result_double, 0.0);
|
||||
__ B(&done, ne);
|
||||
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ Mov(saved_lr, lr);
|
||||
__ Fmov(base_double, base_double_copy);
|
||||
__ Scvtf(exponent_double, exponent_integer);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
__ Mov(lr, saved_lr);
|
||||
__ Bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
|
@ -1247,7 +1247,12 @@ namespace internal {
|
||||
/* #sec-%asyncfromsynciteratorprototype%.return */ \
|
||||
TFJ(AsyncFromSyncIteratorPrototypeReturn, 1, kValue) \
|
||||
/* #sec-async-iterator-value-unwrap-functions */ \
|
||||
TFJ(AsyncIteratorValueUnwrap, 1, kValue)
|
||||
TFJ(AsyncIteratorValueUnwrap, 1, kValue) \
|
||||
\
|
||||
/* Miscellaneous */ \
|
||||
\
|
||||
ASM(DoubleToI) \
|
||||
ASM(MathPowInternal)
|
||||
|
||||
#ifdef V8_INTL_SUPPORT
|
||||
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
|
||||
|
@ -2800,7 +2800,234 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
__ jmp(edi);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
Label check_negative, process_64_bits, done;
|
||||
|
||||
// Account for return address and saved regs.
|
||||
const int kArgumentOffset = 4 * kPointerSize;
|
||||
|
||||
MemOperand mantissa_operand(MemOperand(esp, kArgumentOffset));
|
||||
MemOperand exponent_operand(
|
||||
MemOperand(esp, kArgumentOffset + kDoubleSize / 2));
|
||||
|
||||
// The result is returned on the stack.
|
||||
MemOperand return_operand = mantissa_operand;
|
||||
|
||||
Register scratch1 = ebx;
|
||||
|
||||
// Since we must use ecx for shifts below, use some other register (eax)
|
||||
// to calculate the result.
|
||||
Register result_reg = eax;
|
||||
// Save ecx if it isn't the return register and therefore volatile, or if it
|
||||
// is the return register, then save the temp register we use in its stead for
|
||||
// the result.
|
||||
Register save_reg = eax;
|
||||
__ push(ecx);
|
||||
__ push(scratch1);
|
||||
__ push(save_reg);
|
||||
|
||||
__ mov(scratch1, mantissa_operand);
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
CpuFeatureScope scope(masm, SSE3);
|
||||
// Load x87 register with heap number.
|
||||
__ fld_d(mantissa_operand);
|
||||
}
|
||||
__ mov(ecx, exponent_operand);
|
||||
|
||||
__ and_(ecx, HeapNumber::kExponentMask);
|
||||
__ shr(ecx, HeapNumber::kExponentShift);
|
||||
__ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
|
||||
__ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
|
||||
__ j(below, &process_64_bits);
|
||||
|
||||
// Result is entirely in lower 32-bits of mantissa
|
||||
int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
__ fstp(0);
|
||||
}
|
||||
__ sub(ecx, Immediate(delta));
|
||||
__ xor_(result_reg, result_reg);
|
||||
__ cmp(ecx, Immediate(31));
|
||||
__ j(above, &done);
|
||||
__ shl_cl(scratch1);
|
||||
__ jmp(&check_negative);
|
||||
|
||||
__ bind(&process_64_bits);
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
CpuFeatureScope scope(masm, SSE3);
|
||||
// Reserve space for 64 bit answer.
|
||||
__ sub(esp, Immediate(kDoubleSize)); // Nolint.
|
||||
// Do conversion, which cannot fail because we checked the exponent.
|
||||
__ fisttp_d(Operand(esp, 0));
|
||||
__ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ jmp(&done);
|
||||
} else {
|
||||
// Result must be extracted from shifted 32-bit mantissa
|
||||
__ sub(ecx, Immediate(delta));
|
||||
__ neg(ecx);
|
||||
__ mov(result_reg, exponent_operand);
|
||||
__ and_(result_reg,
|
||||
Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
|
||||
__ add(result_reg,
|
||||
Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
|
||||
__ shrd_cl(scratch1, result_reg);
|
||||
__ shr_cl(result_reg);
|
||||
__ test(ecx, Immediate(32));
|
||||
__ cmov(not_equal, scratch1, result_reg);
|
||||
}
|
||||
|
||||
// If the double was negative, negate the integer result.
|
||||
__ bind(&check_negative);
|
||||
__ mov(result_reg, scratch1);
|
||||
__ neg(result_reg);
|
||||
__ cmp(exponent_operand, Immediate(0));
|
||||
__ cmov(greater, result_reg, scratch1);
|
||||
|
||||
// Restore registers
|
||||
__ bind(&done);
|
||||
__ mov(return_operand, result_reg);
|
||||
__ pop(save_reg);
|
||||
__ pop(scratch1);
|
||||
__ pop(ecx);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == eax);
|
||||
const Register scratch = ecx;
|
||||
const XMMRegister double_result = xmm3;
|
||||
const XMMRegister double_base = xmm2;
|
||||
const XMMRegister double_exponent = xmm1;
|
||||
const XMMRegister double_scratch = xmm4;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
|
||||
// Save 1 in double_result - we need this several times later on.
|
||||
__ mov(scratch, Immediate(1));
|
||||
__ Cvtsi2sd(double_result, scratch);
|
||||
|
||||
Label fast_power, try_arithmetic_simplification;
|
||||
__ DoubleToI(exponent, double_exponent, double_scratch,
|
||||
&try_arithmetic_simplification, &try_arithmetic_simplification);
|
||||
__ jmp(&int_exponent);
|
||||
|
||||
__ bind(&try_arithmetic_simplification);
|
||||
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
|
||||
__ cvttsd2si(exponent, Operand(double_exponent));
|
||||
__ cmp(exponent, Immediate(0x1));
|
||||
__ j(overflow, &call_runtime);
|
||||
|
||||
// Using FPU instructions to calculate power.
|
||||
Label fast_power_failed;
|
||||
__ bind(&fast_power);
|
||||
__ fnclex(); // Clear flags to catch exceptions later.
|
||||
// Transfer (B)ase and (E)xponent onto the FPU register stack.
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
__ movsd(Operand(esp, 0), double_exponent);
|
||||
__ fld_d(Operand(esp, 0)); // E
|
||||
__ movsd(Operand(esp, 0), double_base);
|
||||
__ fld_d(Operand(esp, 0)); // B, E
|
||||
|
||||
// Exponent is in st(1) and base is in st(0)
|
||||
// B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
|
||||
// FYL2X calculates st(1) * log2(st(0))
|
||||
__ fyl2x(); // X
|
||||
__ fld(0); // X, X
|
||||
__ frndint(); // rnd(X), X
|
||||
__ fsub(1); // rnd(X), X-rnd(X)
|
||||
__ fxch(1); // X - rnd(X), rnd(X)
|
||||
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
|
||||
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ faddp(1); // 2^(X-rnd(X)), rnd(X)
|
||||
// FSCALE calculates st(0) * 2^st(1)
|
||||
__ fscale(); // 2^X, rnd(X)
|
||||
__ fstp(1); // 2^X
|
||||
// Bail out to runtime in case of exceptions in the status word.
|
||||
__ fnstsw_ax();
|
||||
__ test_b(eax, Immediate(0x5F)); // We check for all but precision exception.
|
||||
__ j(not_zero, &fast_power_failed, Label::kNear);
|
||||
__ fstp_d(Operand(esp, 0));
|
||||
__ movsd(double_result, Operand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&fast_power_failed);
|
||||
__ fninit();
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ jmp(&call_runtime);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
const XMMRegister double_scratch2 = double_exponent;
|
||||
__ mov(scratch, exponent); // Back up exponent.
|
||||
__ movsd(double_scratch, double_base); // Back up base.
|
||||
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label no_neg, while_true, while_false;
|
||||
__ test(scratch, scratch);
|
||||
__ j(positive, &no_neg, Label::kNear);
|
||||
__ neg(scratch);
|
||||
__ bind(&no_neg);
|
||||
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
__ shr(scratch, 1);
|
||||
// Above condition means CF==0 && ZF==0. This means that the
|
||||
// bit that has been shifted out is 0 and the result is not 0.
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ movsd(double_result, double_scratch);
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
|
||||
__ bind(&while_true);
|
||||
__ shr(scratch, 1);
|
||||
__ mulsd(double_scratch, double_scratch);
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ mulsd(double_result, double_scratch);
|
||||
__ j(not_zero, &while_true);
|
||||
|
||||
__ bind(&while_false);
|
||||
// scratch has the original value of the exponent - if the exponent is
|
||||
// negative, return 1/result.
|
||||
__ test(exponent, exponent);
|
||||
__ j(positive, &done);
|
||||
__ divsd(double_scratch2, double_result);
|
||||
__ movsd(double_result, double_scratch2);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ xorps(double_scratch2, double_scratch2);
|
||||
__ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
|
||||
// double_exponent aliased as double_scratch2 has already been overwritten
|
||||
// and may not have contained the exponent value in the first place when the
|
||||
// exponent is a smi. We reset it with exponent value before bailing out.
|
||||
__ j(not_equal, &done);
|
||||
__ Cvtsi2sd(double_exponent, exponent);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ bind(&call_runtime);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(4, scratch);
|
||||
__ movsd(Operand(esp, 0 * kDoubleSize), double_base);
|
||||
__ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 4);
|
||||
}
|
||||
// Return value is in st(0) on ia32.
|
||||
// Store it into the (fixed) result register.
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
__ fstp_d(Operand(esp, 0));
|
||||
__ movsd(double_result, Operand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
|
||||
__ bind(&done);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -2656,6 +2656,237 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
__ Jump(at, v0, 0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
Label out_of_range, only_low, negate, done;
|
||||
Register result_reg = t0;
|
||||
|
||||
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
|
||||
Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
|
||||
Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
|
||||
DoubleRegister double_scratch = kLithiumScratchDouble;
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 4 * kPointerSize;
|
||||
|
||||
__ Push(result_reg);
|
||||
__ Push(scratch, scratch2, scratch3);
|
||||
|
||||
// Load double input.
|
||||
__ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
|
||||
|
||||
// Clear cumulative exception flags and save the FCSR.
|
||||
__ cfc1(scratch2, FCSR);
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
|
||||
// Try a conversion to a signed integer.
|
||||
__ Trunc_w_d(double_scratch, double_scratch);
|
||||
// Move the converted value into the result register.
|
||||
__ mfc1(scratch3, double_scratch);
|
||||
|
||||
// Retrieve and restore the FCSR.
|
||||
__ cfc1(scratch, FCSR);
|
||||
__ ctc1(scratch2, FCSR);
|
||||
|
||||
// Check for overflow and NaNs.
|
||||
__ And(
|
||||
scratch, scratch,
|
||||
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
|
||||
// If we had no exceptions then set result_reg and we are done.
|
||||
Label error;
|
||||
__ Branch(&error, ne, scratch, Operand(zero_reg));
|
||||
__ Move(result_reg, scratch3);
|
||||
__ Branch(&done);
|
||||
__ bind(&error);
|
||||
|
||||
// Load the double value and perform a manual truncation.
|
||||
Register input_high = scratch2;
|
||||
Register input_low = scratch3;
|
||||
|
||||
__ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
|
||||
__ lw(input_high,
|
||||
MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
|
||||
|
||||
Label normal_exponent, restore_sign;
|
||||
// Extract the biased exponent in result.
|
||||
__ Ext(result_reg, input_high, HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// Check for Infinity and NaNs, which should return 0.
|
||||
__ Subu(scratch, result_reg, HeapNumber::kExponentMask);
|
||||
__ Movz(result_reg, zero_reg, scratch);
|
||||
__ Branch(&done, eq, scratch, Operand(zero_reg));
|
||||
|
||||
// Express exponent as delta to (number of mantissa bits + 31).
|
||||
__ Subu(result_reg, result_reg,
|
||||
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
|
||||
|
||||
// If the delta is strictly positive, all bits would be shifted away,
|
||||
// which means that we can return 0.
|
||||
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
|
||||
__ mov(result_reg, zero_reg);
|
||||
__ Branch(&done);
|
||||
|
||||
__ bind(&normal_exponent);
|
||||
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
|
||||
// Calculate shift.
|
||||
__ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
|
||||
|
||||
// Save the sign.
|
||||
Register sign = result_reg;
|
||||
result_reg = no_reg;
|
||||
__ And(sign, input_high, Operand(HeapNumber::kSignMask));
|
||||
|
||||
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
|
||||
// to check for this specific case.
|
||||
Label high_shift_needed, high_shift_done;
|
||||
__ Branch(&high_shift_needed, lt, scratch, Operand(32));
|
||||
__ mov(input_high, zero_reg);
|
||||
__ Branch(&high_shift_done);
|
||||
__ bind(&high_shift_needed);
|
||||
|
||||
// Set the implicit 1 before the mantissa part in input_high.
|
||||
__ Or(input_high, input_high,
|
||||
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
||||
// Shift the mantissa bits to the correct position.
|
||||
// We don't need to clear non-mantissa bits as they will be shifted away.
|
||||
// If they weren't, it would mean that the answer is in the 32bit range.
|
||||
__ sllv(input_high, input_high, scratch);
|
||||
|
||||
__ bind(&high_shift_done);
|
||||
|
||||
// Replace the shifted bits with bits from the lower mantissa word.
|
||||
Label pos_shift, shift_done;
|
||||
__ li(at, 32);
|
||||
__ subu(scratch, at, scratch);
|
||||
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
|
||||
|
||||
// Negate scratch.
|
||||
__ Subu(scratch, zero_reg, scratch);
|
||||
__ sllv(input_low, input_low, scratch);
|
||||
__ Branch(&shift_done);
|
||||
|
||||
__ bind(&pos_shift);
|
||||
__ srlv(input_low, input_low, scratch);
|
||||
|
||||
__ bind(&shift_done);
|
||||
__ Or(input_high, input_high, Operand(input_low));
|
||||
// Restore sign if necessary.
|
||||
__ mov(scratch, sign);
|
||||
result_reg = sign;
|
||||
sign = no_reg;
|
||||
__ Subu(result_reg, zero_reg, input_high);
|
||||
__ Movz(result_reg, input_high, scratch);
|
||||
|
||||
__ bind(&done);
|
||||
__ sw(result_reg, MemOperand(sp, kArgumentOffset));
|
||||
__ Pop(scratch, scratch2, scratch3);
|
||||
__ Pop(result_reg);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == a2);
|
||||
const DoubleRegister double_base = f2;
|
||||
const DoubleRegister double_exponent = f4;
|
||||
const DoubleRegister double_result = f0;
|
||||
const DoubleRegister double_scratch = f6;
|
||||
const FPURegister single_scratch = f8;
|
||||
const Register scratch = t5;
|
||||
const Register scratch2 = t3;
|
||||
|
||||
Label call_runtime, done, int_exponent;
|
||||
|
||||
Label int_exponent_convert;
|
||||
// Detect integer exponents stored as double.
|
||||
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
|
||||
double_scratch, scratch2, kCheckForInexactConversion);
|
||||
// scratch2 == 0 means there was no conversion error.
|
||||
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
|
||||
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
|
||||
__ mov_d(double_scratch, double_base); // Back up base.
|
||||
__ Move(double_result, 1.0);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label positive_exponent, bail_out;
|
||||
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
|
||||
__ Subu(scratch, zero_reg, scratch);
|
||||
// Check when Subu overflows and we get negative result
|
||||
// (happens only when input is MIN_INT).
|
||||
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
|
||||
__ bind(&positive_exponent);
|
||||
__ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
|
||||
Operand(zero_reg));
|
||||
|
||||
Label while_true, no_carry, loop_end;
|
||||
__ bind(&while_true);
|
||||
|
||||
__ And(scratch2, scratch, 1);
|
||||
|
||||
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
|
||||
__ mul_d(double_result, double_result, double_scratch);
|
||||
__ bind(&no_carry);
|
||||
|
||||
__ sra(scratch, scratch, 1);
|
||||
|
||||
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
|
||||
__ mul_d(double_scratch, double_scratch, double_scratch);
|
||||
|
||||
__ Branch(&while_true);
|
||||
|
||||
__ bind(&loop_end);
|
||||
|
||||
__ Branch(&done, ge, exponent, Operand(zero_reg));
|
||||
__ Move(double_scratch, 1.0);
|
||||
__ div_d(double_result, double_scratch, double_result);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ CompareF64(EQ, double_result, kDoubleRegZero);
|
||||
__ BranchFalseShortF(&done);
|
||||
|
||||
// double_exponent may not contain the exponent value if the input was a
|
||||
// smi. We set it with exponent value before bailing out.
|
||||
__ bind(&bail_out);
|
||||
__ mtc1(exponent, single_scratch);
|
||||
__ cvt_d_w(double_exponent, single_scratch);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
|
||||
__ bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
|
@ -2675,6 +2675,238 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
__ Jump(v0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
Label out_of_range, only_low, negate, done;
|
||||
Register result_reg = t0;
|
||||
|
||||
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
|
||||
Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
|
||||
Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
|
||||
DoubleRegister double_scratch = kLithiumScratchDouble;
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 4 * kPointerSize;
|
||||
|
||||
__ Push(result_reg);
|
||||
__ Push(scratch, scratch2, scratch3);
|
||||
|
||||
// Load double input.
|
||||
__ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
|
||||
|
||||
// Clear cumulative exception flags and save the FCSR.
|
||||
__ cfc1(scratch2, FCSR);
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
|
||||
// Try a conversion to a signed integer.
|
||||
__ Trunc_w_d(double_scratch, double_scratch);
|
||||
// Move the converted value into the result register.
|
||||
__ mfc1(scratch3, double_scratch);
|
||||
|
||||
// Retrieve and restore the FCSR.
|
||||
__ cfc1(scratch, FCSR);
|
||||
__ ctc1(scratch2, FCSR);
|
||||
|
||||
// Check for overflow and NaNs.
|
||||
__ And(
|
||||
scratch, scratch,
|
||||
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
|
||||
// If we had no exceptions then set result_reg and we are done.
|
||||
Label error;
|
||||
__ Branch(&error, ne, scratch, Operand(zero_reg));
|
||||
__ Move(result_reg, scratch3);
|
||||
__ Branch(&done);
|
||||
__ bind(&error);
|
||||
|
||||
// Load the double value and perform a manual truncation.
|
||||
Register input_high = scratch2;
|
||||
Register input_low = scratch3;
|
||||
|
||||
__ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
|
||||
__ Lw(input_high,
|
||||
MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
|
||||
|
||||
Label normal_exponent, restore_sign;
|
||||
// Extract the biased exponent in result.
|
||||
__ Ext(result_reg, input_high, HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// Check for Infinity and NaNs, which should return 0.
|
||||
__ Subu(scratch, result_reg, HeapNumber::kExponentMask);
|
||||
__ Movz(result_reg, zero_reg, scratch);
|
||||
__ Branch(&done, eq, scratch, Operand(zero_reg));
|
||||
|
||||
// Express exponent as delta to (number of mantissa bits + 31).
|
||||
__ Subu(result_reg, result_reg,
|
||||
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
|
||||
|
||||
// If the delta is strictly positive, all bits would be shifted away,
|
||||
// which means that we can return 0.
|
||||
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
|
||||
__ mov(result_reg, zero_reg);
|
||||
__ Branch(&done);
|
||||
|
||||
__ bind(&normal_exponent);
|
||||
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
|
||||
// Calculate shift.
|
||||
__ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
|
||||
|
||||
// Save the sign.
|
||||
Register sign = result_reg;
|
||||
result_reg = no_reg;
|
||||
__ And(sign, input_high, Operand(HeapNumber::kSignMask));
|
||||
|
||||
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
|
||||
// to check for this specific case.
|
||||
Label high_shift_needed, high_shift_done;
|
||||
__ Branch(&high_shift_needed, lt, scratch, Operand(32));
|
||||
__ mov(input_high, zero_reg);
|
||||
__ Branch(&high_shift_done);
|
||||
__ bind(&high_shift_needed);
|
||||
|
||||
// Set the implicit 1 before the mantissa part in input_high.
|
||||
__ Or(input_high, input_high,
|
||||
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
||||
// Shift the mantissa bits to the correct position.
|
||||
// We don't need to clear non-mantissa bits as they will be shifted away.
|
||||
// If they weren't, it would mean that the answer is in the 32bit range.
|
||||
__ sllv(input_high, input_high, scratch);
|
||||
|
||||
__ bind(&high_shift_done);
|
||||
|
||||
// Replace the shifted bits with bits from the lower mantissa word.
|
||||
Label pos_shift, shift_done;
|
||||
__ li(at, 32);
|
||||
__ subu(scratch, at, scratch);
|
||||
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
|
||||
|
||||
// Negate scratch.
|
||||
__ Subu(scratch, zero_reg, scratch);
|
||||
__ sllv(input_low, input_low, scratch);
|
||||
__ Branch(&shift_done);
|
||||
|
||||
__ bind(&pos_shift);
|
||||
__ srlv(input_low, input_low, scratch);
|
||||
|
||||
__ bind(&shift_done);
|
||||
__ Or(input_high, input_high, Operand(input_low));
|
||||
// Restore sign if necessary.
|
||||
__ mov(scratch, sign);
|
||||
result_reg = sign;
|
||||
sign = no_reg;
|
||||
__ Subu(result_reg, zero_reg, input_high);
|
||||
__ Movz(result_reg, input_high, scratch);
|
||||
|
||||
__ bind(&done);
|
||||
|
||||
__ Sd(result_reg, MemOperand(sp, kArgumentOffset));
|
||||
__ Pop(scratch, scratch2, scratch3);
|
||||
__ Pop(result_reg);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == a2);
|
||||
const DoubleRegister double_base = f2;
|
||||
const DoubleRegister double_exponent = f4;
|
||||
const DoubleRegister double_result = f0;
|
||||
const DoubleRegister double_scratch = f6;
|
||||
const FPURegister single_scratch = f8;
|
||||
const Register scratch = t1;
|
||||
const Register scratch2 = a7;
|
||||
|
||||
Label call_runtime, done, int_exponent;
|
||||
|
||||
Label int_exponent_convert;
|
||||
// Detect integer exponents stored as double.
|
||||
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
|
||||
double_scratch, scratch2, kCheckForInexactConversion);
|
||||
// scratch2 == 0 means there was no conversion error.
|
||||
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
|
||||
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
|
||||
__ mov_d(double_scratch, double_base); // Back up base.
|
||||
__ Move(double_result, 1.0);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label positive_exponent, bail_out;
|
||||
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
|
||||
__ Dsubu(scratch, zero_reg, scratch);
|
||||
// Check when Dsubu overflows and we get negative result
|
||||
// (happens only when input is MIN_INT).
|
||||
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
|
||||
__ bind(&positive_exponent);
|
||||
__ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
|
||||
Operand(zero_reg));
|
||||
|
||||
Label while_true, no_carry, loop_end;
|
||||
__ bind(&while_true);
|
||||
|
||||
__ And(scratch2, scratch, 1);
|
||||
|
||||
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
|
||||
__ mul_d(double_result, double_result, double_scratch);
|
||||
__ bind(&no_carry);
|
||||
|
||||
__ dsra(scratch, scratch, 1);
|
||||
|
||||
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
|
||||
__ mul_d(double_scratch, double_scratch, double_scratch);
|
||||
|
||||
__ Branch(&while_true);
|
||||
|
||||
__ bind(&loop_end);
|
||||
|
||||
__ Branch(&done, ge, exponent, Operand(zero_reg));
|
||||
__ Move(double_scratch, 1.0);
|
||||
__ div_d(double_result, double_scratch, double_result);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ CompareF64(EQ, double_result, kDoubleRegZero);
|
||||
__ BranchFalseShortF(&done);
|
||||
|
||||
// double_exponent may not contain the exponent value if the input was a
|
||||
// smi. We set it with exponent value before bailing out.
|
||||
__ bind(&bail_out);
|
||||
__ mtc1(exponent, single_scratch);
|
||||
__ cvt_d_w(double_exponent, single_scratch);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
|
||||
__ bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
|
@ -2755,6 +2755,202 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
__ jmp(r11);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
Label check_negative, process_64_bits, done;
|
||||
|
||||
// Account for return address and saved regs.
|
||||
const int kArgumentOffset = 4 * kRegisterSize;
|
||||
|
||||
MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
|
||||
MemOperand exponent_operand(
|
||||
MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
|
||||
|
||||
// The result is returned on the stack.
|
||||
MemOperand return_operand = mantissa_operand;
|
||||
|
||||
Register scratch1 = rbx;
|
||||
|
||||
// Since we must use rcx for shifts below, use some other register (rax)
|
||||
// to calculate the result if ecx is the requested return register.
|
||||
Register result_reg = rax;
|
||||
// Save ecx if it isn't the return register and therefore volatile, or if it
|
||||
// is the return register, then save the temp register we use in its stead
|
||||
// for the result.
|
||||
Register save_reg = rax;
|
||||
__ pushq(rcx);
|
||||
__ pushq(scratch1);
|
||||
__ pushq(save_reg);
|
||||
|
||||
__ movl(scratch1, mantissa_operand);
|
||||
__ Movsd(kScratchDoubleReg, mantissa_operand);
|
||||
__ movl(rcx, exponent_operand);
|
||||
|
||||
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
|
||||
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
|
||||
__ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
|
||||
__ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
|
||||
__ j(below, &process_64_bits);
|
||||
|
||||
// Result is entirely in lower 32-bits of mantissa
|
||||
int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
|
||||
__ subl(rcx, Immediate(delta));
|
||||
__ xorl(result_reg, result_reg);
|
||||
__ cmpl(rcx, Immediate(31));
|
||||
__ j(above, &done);
|
||||
__ shll_cl(scratch1);
|
||||
__ jmp(&check_negative);
|
||||
|
||||
__ bind(&process_64_bits);
|
||||
__ Cvttsd2siq(result_reg, kScratchDoubleReg);
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// If the double was negative, negate the integer result.
|
||||
__ bind(&check_negative);
|
||||
__ movl(result_reg, scratch1);
|
||||
__ negl(result_reg);
|
||||
__ cmpl(exponent_operand, Immediate(0));
|
||||
__ cmovl(greater, result_reg, scratch1);
|
||||
|
||||
// Restore registers
|
||||
__ bind(&done);
|
||||
__ movl(return_operand, result_reg);
|
||||
__ popq(save_reg);
|
||||
__ popq(scratch1);
|
||||
__ popq(rcx);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == rdx);
|
||||
const Register scratch = rcx;
|
||||
const XMMRegister double_result = xmm3;
|
||||
const XMMRegister double_base = xmm2;
|
||||
const XMMRegister double_exponent = xmm1;
|
||||
const XMMRegister double_scratch = xmm4;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
|
||||
// Save 1 in double_result - we need this several times later on.
|
||||
__ movp(scratch, Immediate(1));
|
||||
__ Cvtlsi2sd(double_result, scratch);
|
||||
|
||||
Label fast_power, try_arithmetic_simplification;
|
||||
// Detect integer exponents stored as double.
|
||||
__ DoubleToI(exponent, double_exponent, double_scratch,
|
||||
&try_arithmetic_simplification, &try_arithmetic_simplification);
|
||||
__ jmp(&int_exponent);
|
||||
|
||||
__ bind(&try_arithmetic_simplification);
|
||||
__ Cvttsd2si(exponent, double_exponent);
|
||||
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
|
||||
__ cmpl(exponent, Immediate(0x1));
|
||||
__ j(overflow, &call_runtime);
|
||||
|
||||
// Using FPU instructions to calculate power.
|
||||
Label fast_power_failed;
|
||||
__ bind(&fast_power);
|
||||
__ fnclex(); // Clear flags to catch exceptions later.
|
||||
// Transfer (B)ase and (E)xponent onto the FPU register stack.
|
||||
__ subp(rsp, Immediate(kDoubleSize));
|
||||
__ Movsd(Operand(rsp, 0), double_exponent);
|
||||
__ fld_d(Operand(rsp, 0)); // E
|
||||
__ Movsd(Operand(rsp, 0), double_base);
|
||||
__ fld_d(Operand(rsp, 0)); // B, E
|
||||
|
||||
// Exponent is in st(1) and base is in st(0)
|
||||
// B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
|
||||
// FYL2X calculates st(1) * log2(st(0))
|
||||
__ fyl2x(); // X
|
||||
__ fld(0); // X, X
|
||||
__ frndint(); // rnd(X), X
|
||||
__ fsub(1); // rnd(X), X-rnd(X)
|
||||
__ fxch(1); // X - rnd(X), rnd(X)
|
||||
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
|
||||
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ faddp(1); // 2^(X-rnd(X)), rnd(X)
|
||||
// FSCALE calculates st(0) * 2^st(1)
|
||||
__ fscale(); // 2^X, rnd(X)
|
||||
__ fstp(1);
|
||||
// Bail out to runtime in case of exceptions in the status word.
|
||||
__ fnstsw_ax();
|
||||
__ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
|
||||
__ j(not_zero, &fast_power_failed, Label::kNear);
|
||||
__ fstp_d(Operand(rsp, 0));
|
||||
__ Movsd(double_result, Operand(rsp, 0));
|
||||
__ addp(rsp, Immediate(kDoubleSize));
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&fast_power_failed);
|
||||
__ fninit();
|
||||
__ addp(rsp, Immediate(kDoubleSize));
|
||||
__ jmp(&call_runtime);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
const XMMRegister double_scratch2 = double_exponent;
|
||||
// Back up exponent as we need to check if exponent is negative later.
|
||||
__ movp(scratch, exponent); // Back up exponent.
|
||||
__ Movsd(double_scratch, double_base); // Back up base.
|
||||
__ Movsd(double_scratch2, double_result); // Load double_exponent with 1.
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label no_neg, while_true, while_false;
|
||||
__ testl(scratch, scratch);
|
||||
__ j(positive, &no_neg, Label::kNear);
|
||||
__ negl(scratch);
|
||||
__ bind(&no_neg);
|
||||
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
__ shrl(scratch, Immediate(1));
|
||||
// Above condition means CF==0 && ZF==0. This means that the
|
||||
// bit that has been shifted out is 0 and the result is not 0.
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ Movsd(double_result, double_scratch);
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
|
||||
__ bind(&while_true);
|
||||
__ shrl(scratch, Immediate(1));
|
||||
__ Mulsd(double_scratch, double_scratch);
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ Mulsd(double_result, double_scratch);
|
||||
__ j(not_zero, &while_true);
|
||||
|
||||
__ bind(&while_false);
|
||||
// If the exponent is negative, return 1/result.
|
||||
__ testl(exponent, exponent);
|
||||
__ j(greater, &done);
|
||||
__ Divsd(double_scratch2, double_result);
|
||||
__ Movsd(double_result, double_scratch2);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ Xorpd(double_scratch2, double_scratch2);
|
||||
__ Ucomisd(double_scratch2, double_result);
|
||||
// double_exponent aliased as double_scratch2 has already been overwritten
|
||||
// and may not have contained the exponent value in the first place when the
|
||||
// input was a smi. We reset it with exponent value before bailing out.
|
||||
__ j(not_equal, &done);
|
||||
__ Cvtlsi2sd(double_exponent, exponent);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ bind(&call_runtime);
|
||||
// Move base to the correct argument register. Exponent is already in xmm1.
|
||||
__ Movsd(xmm0, double_base);
|
||||
DCHECK(double_exponent == xmm1);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(2);
|
||||
__ CallCFunction(
|
||||
ExternalReference::power_double_double_function(masm->isolate()), 2);
|
||||
}
|
||||
// Return value is in xmm0.
|
||||
__ Movsd(double_result, xmm0);
|
||||
|
||||
__ bind(&done);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
|
@ -32,10 +32,8 @@ class Node;
|
||||
V(CallApiCallback) \
|
||||
V(CallApiGetter) \
|
||||
V(CEntry) \
|
||||
V(DoubleToI) \
|
||||
V(InternalArrayConstructor) \
|
||||
V(JSEntry) \
|
||||
V(MathPow) \
|
||||
V(ProfileEntryHook) \
|
||||
/* --- TurboFanCodeStubs --- */ \
|
||||
V(StoreSlowElement) \
|
||||
@ -434,6 +432,7 @@ class TurboFanCodeStub : public CodeStub {
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class StoreInterceptorStub : public TurboFanCodeStub {
|
||||
public:
|
||||
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
|
||||
@ -473,6 +472,7 @@ class TransitionElementsKindStub : public TurboFanCodeStub {
|
||||
DEFINE_TURBOFAN_CODE_STUB(TransitionElementsKind, TurboFanCodeStub);
|
||||
};
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class LoadIndexedInterceptorStub : public TurboFanCodeStub {
|
||||
public:
|
||||
explicit LoadIndexedInterceptorStub(Isolate* isolate)
|
||||
@ -483,6 +483,7 @@ class LoadIndexedInterceptorStub : public TurboFanCodeStub {
|
||||
};
|
||||
|
||||
// ES6 [[Get]] operation.
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class GetPropertyStub : public TurboFanCodeStub {
|
||||
public:
|
||||
explicit GetPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
|
||||
@ -491,14 +492,13 @@ class GetPropertyStub : public TurboFanCodeStub {
|
||||
DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
|
||||
};
|
||||
|
||||
|
||||
enum AllocationSiteOverrideMode {
|
||||
DONT_OVERRIDE,
|
||||
DISABLE_ALLOCATION_SITES,
|
||||
LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
|
||||
};
|
||||
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class ArrayConstructorStub: public PlatformCodeStub {
|
||||
public:
|
||||
explicit ArrayConstructorStub(Isolate* isolate);
|
||||
@ -511,7 +511,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
|
||||
DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
|
||||
};
|
||||
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class InternalArrayConstructorStub: public PlatformCodeStub {
|
||||
public:
|
||||
explicit InternalArrayConstructorStub(Isolate* isolate);
|
||||
@ -523,20 +523,7 @@ class InternalArrayConstructorStub: public PlatformCodeStub {
|
||||
DEFINE_PLATFORM_CODE_STUB(InternalArrayConstructor, PlatformCodeStub);
|
||||
};
|
||||
|
||||
|
||||
class MathPowStub: public PlatformCodeStub {
|
||||
public:
|
||||
MathPowStub() : PlatformCodeStub(nullptr) {}
|
||||
|
||||
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
|
||||
// A CallInterfaceDescriptor doesn't specify double registers (yet).
|
||||
return ContextOnlyDescriptor(isolate());
|
||||
}
|
||||
|
||||
private:
|
||||
DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
|
||||
};
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
|
||||
public:
|
||||
explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
|
||||
@ -584,7 +571,7 @@ class CallApiCallbackStub : public PlatformCodeStub {
|
||||
DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
|
||||
};
|
||||
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class CallApiGetterStub : public PlatformCodeStub {
|
||||
public:
|
||||
explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
|
||||
@ -713,35 +700,6 @@ class JSEntryStub : public PlatformCodeStub {
|
||||
DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
|
||||
};
|
||||
|
||||
|
||||
enum ReceiverCheckMode {
|
||||
// We don't know anything about the receiver.
|
||||
RECEIVER_IS_UNKNOWN,
|
||||
|
||||
// We know the receiver is a string.
|
||||
RECEIVER_IS_STRING
|
||||
};
|
||||
|
||||
|
||||
enum EmbedMode {
|
||||
// The code being generated is part of an IC handler, which may MISS
|
||||
// to an IC in failure cases.
|
||||
PART_OF_IC_HANDLER,
|
||||
|
||||
NOT_PART_OF_IC_HANDLER
|
||||
};
|
||||
|
||||
class DoubleToIStub : public PlatformCodeStub {
|
||||
public:
|
||||
explicit DoubleToIStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
|
||||
|
||||
bool SometimesSetsUpAFrame() override { return false; }
|
||||
|
||||
private:
|
||||
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
|
||||
DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
|
||||
};
|
||||
|
||||
class StoreFastElementStub : public TurboFanCodeStub {
|
||||
public:
|
||||
StoreFastElementStub(Isolate* isolate, bool is_js_array,
|
||||
@ -873,6 +831,7 @@ class InternalArraySingleArgumentConstructorStub
|
||||
CommonArrayConstructorStub);
|
||||
};
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class ArrayNArgumentsConstructorStub : public PlatformCodeStub {
|
||||
public:
|
||||
explicit ArrayNArgumentsConstructorStub(Isolate* isolate)
|
||||
@ -938,7 +897,7 @@ class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
|
||||
DEFINE_TURBOFAN_CODE_STUB(ElementsTransitionAndStore, TurboFanCodeStub);
|
||||
};
|
||||
|
||||
|
||||
// TODO(jgruber): Convert this stub into a builtin.
|
||||
class ProfileEntryHookStub : public PlatformCodeStub {
|
||||
public:
|
||||
explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
|
||||
|
@ -920,8 +920,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArchStoreWithWriteBarrier: {
|
||||
@ -1011,7 +1011,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_IEEE754_UNOP(log10);
|
||||
break;
|
||||
case kIeee754Float64Pow: {
|
||||
__ CallStubDelayed(new (zone()) MathPowStub());
|
||||
__ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
|
||||
__ vmov(d0, d2);
|
||||
break;
|
||||
}
|
||||
|
@ -855,8 +855,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ mov(i.OutputRegister(), root);
|
||||
break;
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArchStoreWithWriteBarrier: {
|
||||
RecordWriteMode mode =
|
||||
@ -940,7 +940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_IEEE754_UNOP(log10);
|
||||
break;
|
||||
case kIeee754Float64Pow: {
|
||||
__ CallStubDelayed(new (zone()) MathPowStub());
|
||||
__ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
case kIeee754Float64Sin:
|
||||
|
@ -210,12 +210,13 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
: OutOfLineCode(gen),
|
||||
result_(result),
|
||||
input_(input),
|
||||
isolate_(gen->isolate()),
|
||||
zone_(gen->zone()) {}
|
||||
|
||||
void Generate() final {
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
__ movsd(MemOperand(esp, 0), input_);
|
||||
__ SlowTruncateToIDelayed(zone_);
|
||||
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
|
||||
__ mov(result_, MemOperand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
}
|
||||
@ -223,6 +224,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
private:
|
||||
Register const result_;
|
||||
XMMRegister const input_;
|
||||
Isolate* isolate_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
@ -914,7 +916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ movaps(xmm1, xmm2);
|
||||
__ movaps(xmm2, xmm0);
|
||||
}
|
||||
__ CallStubDelayed(new (zone()) MathPowStub());
|
||||
__ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
|
||||
__ movaps(i.OutputDoubleRegister(), xmm3);
|
||||
break;
|
||||
}
|
||||
|
@ -806,8 +806,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ mov(i.OutputRegister(), kRootRegister);
|
||||
break;
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArchStoreWithWriteBarrier: {
|
||||
RecordWriteMode mode =
|
||||
@ -914,7 +914,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_IEEE754_UNOP(log2);
|
||||
break;
|
||||
case kIeee754Float64Pow: {
|
||||
__ CallStubDelayed(new (zone()) MathPowStub());
|
||||
__ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
case kIeee754Float64Sin:
|
||||
|
@ -826,8 +826,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ mov(i.OutputRegister(), kRootRegister);
|
||||
break;
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArchStoreWithWriteBarrier: {
|
||||
RecordWriteMode mode =
|
||||
@ -934,7 +934,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_IEEE754_UNOP(log10);
|
||||
break;
|
||||
case kIeee754Float64Pow: {
|
||||
__ CallStubDelayed(new (zone()) MathPowStub());
|
||||
__ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
case kIeee754Float64Sin:
|
||||
|
@ -1023,14 +1023,7 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
|
||||
bool is_wasm = target->IsCode() &&
|
||||
(Code::cast(target)->kind() == Code::WASM_FUNCTION ||
|
||||
Code::cast(target)->kind() == Code::WASM_TO_JS_FUNCTION);
|
||||
bool is_allowed_stub = false;
|
||||
if (target->IsCode()) {
|
||||
Code* code = Code::cast(target);
|
||||
is_allowed_stub =
|
||||
code->kind() == Code::STUB &&
|
||||
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
|
||||
}
|
||||
CHECK(is_immovable || is_wasm || is_allowed_stub);
|
||||
CHECK(is_immovable || is_wasm);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -4740,16 +4740,7 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
DCHECK_NOT_NULL(target);
|
||||
bool is_immovable =
|
||||
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
|
||||
bool is_allowed_stub = false;
|
||||
if (target->IsCode()) {
|
||||
Code* code = Code::cast(target);
|
||||
is_allowed_stub =
|
||||
code->kind() == Code::STUB &&
|
||||
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
|
||||
}
|
||||
DCHECK(is_immovable || is_allowed_stub);
|
||||
DCHECK(target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target)));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -193,6 +193,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
result_(result),
|
||||
input_(input),
|
||||
unwinding_info_writer_(unwinding_info_writer),
|
||||
isolate_(gen->isolate()),
|
||||
zone_(gen->zone()) {}
|
||||
|
||||
void Generate() final {
|
||||
@ -200,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
|
||||
kDoubleSize);
|
||||
__ Movsd(MemOperand(rsp, 0), input_);
|
||||
__ SlowTruncateToIDelayed(zone_);
|
||||
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
|
||||
__ movl(result_, MemOperand(rsp, 0));
|
||||
__ addp(rsp, Immediate(kDoubleSize));
|
||||
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
|
||||
@ -211,6 +212,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
Register const result_;
|
||||
XMMRegister const input_;
|
||||
UnwindingInfoWriter* const unwinding_info_writer_;
|
||||
Isolate* isolate_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
@ -1027,7 +1029,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kIeee754Float64Pow: {
|
||||
// TODO(bmeurer): Improve integration of the stub.
|
||||
__ Movsd(xmm2, xmm0);
|
||||
__ CallStubDelayed(new (zone()) MathPowStub());
|
||||
__ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
|
||||
__ Movsd(xmm0, xmm3);
|
||||
break;
|
||||
}
|
||||
|
@ -34,234 +34,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
}
|
||||
|
||||
|
||||
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label check_negative, process_64_bits, done;
|
||||
|
||||
// Account for return address and saved regs.
|
||||
const int kArgumentOffset = 4 * kPointerSize;
|
||||
|
||||
MemOperand mantissa_operand(MemOperand(esp, kArgumentOffset));
|
||||
MemOperand exponent_operand(
|
||||
MemOperand(esp, kArgumentOffset + kDoubleSize / 2));
|
||||
|
||||
// The result is returned on the stack.
|
||||
MemOperand return_operand = mantissa_operand;
|
||||
|
||||
Register scratch1 = ebx;
|
||||
|
||||
// Since we must use ecx for shifts below, use some other register (eax)
|
||||
// to calculate the result.
|
||||
Register result_reg = eax;
|
||||
// Save ecx if it isn't the return register and therefore volatile, or if it
|
||||
// is the return register, then save the temp register we use in its stead for
|
||||
// the result.
|
||||
Register save_reg = eax;
|
||||
__ push(ecx);
|
||||
__ push(scratch1);
|
||||
__ push(save_reg);
|
||||
|
||||
__ mov(scratch1, mantissa_operand);
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
CpuFeatureScope scope(masm, SSE3);
|
||||
// Load x87 register with heap number.
|
||||
__ fld_d(mantissa_operand);
|
||||
}
|
||||
__ mov(ecx, exponent_operand);
|
||||
|
||||
__ and_(ecx, HeapNumber::kExponentMask);
|
||||
__ shr(ecx, HeapNumber::kExponentShift);
|
||||
__ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
|
||||
__ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
|
||||
__ j(below, &process_64_bits);
|
||||
|
||||
// Result is entirely in lower 32-bits of mantissa
|
||||
int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
__ fstp(0);
|
||||
}
|
||||
__ sub(ecx, Immediate(delta));
|
||||
__ xor_(result_reg, result_reg);
|
||||
__ cmp(ecx, Immediate(31));
|
||||
__ j(above, &done);
|
||||
__ shl_cl(scratch1);
|
||||
__ jmp(&check_negative);
|
||||
|
||||
__ bind(&process_64_bits);
|
||||
if (CpuFeatures::IsSupported(SSE3)) {
|
||||
CpuFeatureScope scope(masm, SSE3);
|
||||
// Reserve space for 64 bit answer.
|
||||
__ sub(esp, Immediate(kDoubleSize)); // Nolint.
|
||||
// Do conversion, which cannot fail because we checked the exponent.
|
||||
__ fisttp_d(Operand(esp, 0));
|
||||
__ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ jmp(&done);
|
||||
} else {
|
||||
// Result must be extracted from shifted 32-bit mantissa
|
||||
__ sub(ecx, Immediate(delta));
|
||||
__ neg(ecx);
|
||||
__ mov(result_reg, exponent_operand);
|
||||
__ and_(result_reg,
|
||||
Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
|
||||
__ add(result_reg,
|
||||
Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
|
||||
__ shrd_cl(scratch1, result_reg);
|
||||
__ shr_cl(result_reg);
|
||||
__ test(ecx, Immediate(32));
|
||||
__ cmov(not_equal, scratch1, result_reg);
|
||||
}
|
||||
|
||||
// If the double was negative, negate the integer result.
|
||||
__ bind(&check_negative);
|
||||
__ mov(result_reg, scratch1);
|
||||
__ neg(result_reg);
|
||||
__ cmp(exponent_operand, Immediate(0));
|
||||
__ cmov(greater, result_reg, scratch1);
|
||||
|
||||
// Restore registers
|
||||
__ bind(&done);
|
||||
__ mov(return_operand, result_reg);
|
||||
__ pop(save_reg);
|
||||
__ pop(scratch1);
|
||||
__ pop(ecx);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == eax);
|
||||
const Register scratch = ecx;
|
||||
const XMMRegister double_result = xmm3;
|
||||
const XMMRegister double_base = xmm2;
|
||||
const XMMRegister double_exponent = xmm1;
|
||||
const XMMRegister double_scratch = xmm4;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
|
||||
// Save 1 in double_result - we need this several times later on.
|
||||
__ mov(scratch, Immediate(1));
|
||||
__ Cvtsi2sd(double_result, scratch);
|
||||
|
||||
Label fast_power, try_arithmetic_simplification;
|
||||
__ DoubleToI(exponent, double_exponent, double_scratch,
|
||||
&try_arithmetic_simplification, &try_arithmetic_simplification);
|
||||
__ jmp(&int_exponent);
|
||||
|
||||
__ bind(&try_arithmetic_simplification);
|
||||
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
|
||||
__ cvttsd2si(exponent, Operand(double_exponent));
|
||||
__ cmp(exponent, Immediate(0x1));
|
||||
__ j(overflow, &call_runtime);
|
||||
|
||||
// Using FPU instructions to calculate power.
|
||||
Label fast_power_failed;
|
||||
__ bind(&fast_power);
|
||||
__ fnclex(); // Clear flags to catch exceptions later.
|
||||
// Transfer (B)ase and (E)xponent onto the FPU register stack.
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
__ movsd(Operand(esp, 0), double_exponent);
|
||||
__ fld_d(Operand(esp, 0)); // E
|
||||
__ movsd(Operand(esp, 0), double_base);
|
||||
__ fld_d(Operand(esp, 0)); // B, E
|
||||
|
||||
// Exponent is in st(1) and base is in st(0)
|
||||
// B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
|
||||
// FYL2X calculates st(1) * log2(st(0))
|
||||
__ fyl2x(); // X
|
||||
__ fld(0); // X, X
|
||||
__ frndint(); // rnd(X), X
|
||||
__ fsub(1); // rnd(X), X-rnd(X)
|
||||
__ fxch(1); // X - rnd(X), rnd(X)
|
||||
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
|
||||
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ faddp(1); // 2^(X-rnd(X)), rnd(X)
|
||||
// FSCALE calculates st(0) * 2^st(1)
|
||||
__ fscale(); // 2^X, rnd(X)
|
||||
__ fstp(1); // 2^X
|
||||
// Bail out to runtime in case of exceptions in the status word.
|
||||
__ fnstsw_ax();
|
||||
__ test_b(eax, Immediate(0x5F)); // We check for all but precision exception.
|
||||
__ j(not_zero, &fast_power_failed, Label::kNear);
|
||||
__ fstp_d(Operand(esp, 0));
|
||||
__ movsd(double_result, Operand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&fast_power_failed);
|
||||
__ fninit();
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
__ jmp(&call_runtime);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
const XMMRegister double_scratch2 = double_exponent;
|
||||
__ mov(scratch, exponent); // Back up exponent.
|
||||
__ movsd(double_scratch, double_base); // Back up base.
|
||||
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label no_neg, while_true, while_false;
|
||||
__ test(scratch, scratch);
|
||||
__ j(positive, &no_neg, Label::kNear);
|
||||
__ neg(scratch);
|
||||
__ bind(&no_neg);
|
||||
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
__ shr(scratch, 1);
|
||||
// Above condition means CF==0 && ZF==0. This means that the
|
||||
// bit that has been shifted out is 0 and the result is not 0.
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ movsd(double_result, double_scratch);
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
|
||||
__ bind(&while_true);
|
||||
__ shr(scratch, 1);
|
||||
__ mulsd(double_scratch, double_scratch);
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ mulsd(double_result, double_scratch);
|
||||
__ j(not_zero, &while_true);
|
||||
|
||||
__ bind(&while_false);
|
||||
// scratch has the original value of the exponent - if the exponent is
|
||||
// negative, return 1/result.
|
||||
__ test(exponent, exponent);
|
||||
__ j(positive, &done);
|
||||
__ divsd(double_scratch2, double_result);
|
||||
__ movsd(double_result, double_scratch2);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ xorps(double_scratch2, double_scratch2);
|
||||
__ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
|
||||
// double_exponent aliased as double_scratch2 has already been overwritten
|
||||
// and may not have contained the exponent value in the first place when the
|
||||
// exponent is a smi. We reset it with exponent value before bailing out.
|
||||
__ j(not_equal, &done);
|
||||
__ Cvtsi2sd(double_exponent, exponent);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ bind(&call_runtime);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(4, scratch);
|
||||
__ movsd(Operand(esp, 0 * kDoubleSize), double_base);
|
||||
__ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
4);
|
||||
}
|
||||
// Return value is in st(0) on ia32.
|
||||
// Store it into the (fixed) result register.
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
__ fstp_d(Operand(esp, 0));
|
||||
__ movsd(double_result, Operand(esp, 0));
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
|
||||
__ bind(&done);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
Movability CEntryStub::NeedsImmovableCode() { return kMovable; }
|
||||
|
||||
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
||||
|
@ -177,10 +177,6 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
|
||||
return bytes;
|
||||
}
|
||||
|
||||
void TurboAssembler::SlowTruncateToIDelayed(Zone* zone) {
|
||||
CallStubDelayed(new (zone) DoubleToIStub(nullptr));
|
||||
}
|
||||
|
||||
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
|
||||
XMMRegister scratch, Label* lost_precision,
|
||||
Label* is_nan, Label::Distance dst) {
|
||||
|
@ -299,8 +299,6 @@ class TurboAssembler : public Assembler {
|
||||
|
||||
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
|
||||
|
||||
void SlowTruncateToIDelayed(Zone* zone);
|
||||
|
||||
void Push(Register src) { push(src); }
|
||||
void Push(Operand src) { push(src); }
|
||||
void Push(Immediate value) { push(value); }
|
||||
|
@ -35,243 +35,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
}
|
||||
|
||||
|
||||
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label out_of_range, only_low, negate, done;
|
||||
Register result_reg = t0;
|
||||
|
||||
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
|
||||
Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
|
||||
Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
|
||||
DoubleRegister double_scratch = kLithiumScratchDouble;
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 4 * kPointerSize;
|
||||
|
||||
__ Push(result_reg);
|
||||
__ Push(scratch, scratch2, scratch3);
|
||||
|
||||
// Load double input.
|
||||
__ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
|
||||
|
||||
// Clear cumulative exception flags and save the FCSR.
|
||||
__ cfc1(scratch2, FCSR);
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
|
||||
// Try a conversion to a signed integer.
|
||||
__ Trunc_w_d(double_scratch, double_scratch);
|
||||
// Move the converted value into the result register.
|
||||
__ mfc1(scratch3, double_scratch);
|
||||
|
||||
// Retrieve and restore the FCSR.
|
||||
__ cfc1(scratch, FCSR);
|
||||
__ ctc1(scratch2, FCSR);
|
||||
|
||||
// Check for overflow and NaNs.
|
||||
__ And(
|
||||
scratch, scratch,
|
||||
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
|
||||
// If we had no exceptions then set result_reg and we are done.
|
||||
Label error;
|
||||
__ Branch(&error, ne, scratch, Operand(zero_reg));
|
||||
__ Move(result_reg, scratch3);
|
||||
__ Branch(&done);
|
||||
__ bind(&error);
|
||||
|
||||
// Load the double value and perform a manual truncation.
|
||||
Register input_high = scratch2;
|
||||
Register input_low = scratch3;
|
||||
|
||||
__ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
|
||||
__ lw(input_high,
|
||||
MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
|
||||
|
||||
Label normal_exponent, restore_sign;
|
||||
// Extract the biased exponent in result.
|
||||
__ Ext(result_reg,
|
||||
input_high,
|
||||
HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// Check for Infinity and NaNs, which should return 0.
|
||||
__ Subu(scratch, result_reg, HeapNumber::kExponentMask);
|
||||
__ Movz(result_reg, zero_reg, scratch);
|
||||
__ Branch(&done, eq, scratch, Operand(zero_reg));
|
||||
|
||||
// Express exponent as delta to (number of mantissa bits + 31).
|
||||
__ Subu(result_reg,
|
||||
result_reg,
|
||||
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
|
||||
|
||||
// If the delta is strictly positive, all bits would be shifted away,
|
||||
// which means that we can return 0.
|
||||
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
|
||||
__ mov(result_reg, zero_reg);
|
||||
__ Branch(&done);
|
||||
|
||||
__ bind(&normal_exponent);
|
||||
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
|
||||
// Calculate shift.
|
||||
__ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
|
||||
|
||||
// Save the sign.
|
||||
Register sign = result_reg;
|
||||
result_reg = no_reg;
|
||||
__ And(sign, input_high, Operand(HeapNumber::kSignMask));
|
||||
|
||||
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
|
||||
// to check for this specific case.
|
||||
Label high_shift_needed, high_shift_done;
|
||||
__ Branch(&high_shift_needed, lt, scratch, Operand(32));
|
||||
__ mov(input_high, zero_reg);
|
||||
__ Branch(&high_shift_done);
|
||||
__ bind(&high_shift_needed);
|
||||
|
||||
// Set the implicit 1 before the mantissa part in input_high.
|
||||
__ Or(input_high,
|
||||
input_high,
|
||||
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
||||
// Shift the mantissa bits to the correct position.
|
||||
// We don't need to clear non-mantissa bits as they will be shifted away.
|
||||
// If they weren't, it would mean that the answer is in the 32bit range.
|
||||
__ sllv(input_high, input_high, scratch);
|
||||
|
||||
__ bind(&high_shift_done);
|
||||
|
||||
// Replace the shifted bits with bits from the lower mantissa word.
|
||||
Label pos_shift, shift_done;
|
||||
__ li(at, 32);
|
||||
__ subu(scratch, at, scratch);
|
||||
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
|
||||
|
||||
// Negate scratch.
|
||||
__ Subu(scratch, zero_reg, scratch);
|
||||
__ sllv(input_low, input_low, scratch);
|
||||
__ Branch(&shift_done);
|
||||
|
||||
__ bind(&pos_shift);
|
||||
__ srlv(input_low, input_low, scratch);
|
||||
|
||||
__ bind(&shift_done);
|
||||
__ Or(input_high, input_high, Operand(input_low));
|
||||
// Restore sign if necessary.
|
||||
__ mov(scratch, sign);
|
||||
result_reg = sign;
|
||||
sign = no_reg;
|
||||
__ Subu(result_reg, zero_reg, input_high);
|
||||
__ Movz(result_reg, input_high, scratch);
|
||||
|
||||
__ bind(&done);
|
||||
__ sw(result_reg, MemOperand(sp, kArgumentOffset));
|
||||
__ Pop(scratch, scratch2, scratch3);
|
||||
__ Pop(result_reg);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == a2);
|
||||
const DoubleRegister double_base = f2;
|
||||
const DoubleRegister double_exponent = f4;
|
||||
const DoubleRegister double_result = f0;
|
||||
const DoubleRegister double_scratch = f6;
|
||||
const FPURegister single_scratch = f8;
|
||||
const Register scratch = t5;
|
||||
const Register scratch2 = t3;
|
||||
|
||||
Label call_runtime, done, int_exponent;
|
||||
|
||||
Label int_exponent_convert;
|
||||
// Detect integer exponents stored as double.
|
||||
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
|
||||
double_scratch, scratch2, kCheckForInexactConversion);
|
||||
// scratch2 == 0 means there was no conversion error.
|
||||
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
|
||||
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
|
||||
__ mov_d(double_scratch, double_base); // Back up base.
|
||||
__ Move(double_result, 1.0);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label positive_exponent, bail_out;
|
||||
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
|
||||
__ Subu(scratch, zero_reg, scratch);
|
||||
// Check when Subu overflows and we get negative result
|
||||
// (happens only when input is MIN_INT).
|
||||
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
|
||||
__ bind(&positive_exponent);
|
||||
__ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
|
||||
Operand(zero_reg));
|
||||
|
||||
Label while_true, no_carry, loop_end;
|
||||
__ bind(&while_true);
|
||||
|
||||
__ And(scratch2, scratch, 1);
|
||||
|
||||
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
|
||||
__ mul_d(double_result, double_result, double_scratch);
|
||||
__ bind(&no_carry);
|
||||
|
||||
__ sra(scratch, scratch, 1);
|
||||
|
||||
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
|
||||
__ mul_d(double_scratch, double_scratch, double_scratch);
|
||||
|
||||
__ Branch(&while_true);
|
||||
|
||||
__ bind(&loop_end);
|
||||
|
||||
__ Branch(&done, ge, exponent, Operand(zero_reg));
|
||||
__ Move(double_scratch, 1.0);
|
||||
__ div_d(double_result, double_scratch, double_result);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ CompareF64(EQ, double_result, kDoubleRegZero);
|
||||
__ BranchFalseShortF(&done);
|
||||
|
||||
// double_exponent may not contain the exponent value if the input was a
|
||||
// smi. We set it with exponent value before bailing out.
|
||||
__ bind(&bail_out);
|
||||
__ mtc1(exponent, single_scratch);
|
||||
__ cvt_d_w(double_exponent, single_scratch);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
|
||||
__ bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
|
||||
|
||||
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
||||
|
@ -2651,8 +2651,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
Branch(done, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input) {
|
||||
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
|
||||
Register result,
|
||||
DoubleRegister double_input) {
|
||||
Label done;
|
||||
|
||||
TryInlineTruncateDoubleToI(result, double_input, &done);
|
||||
@ -2662,7 +2663,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
|
||||
Sdc1(double_input, MemOperand(sp, 0));
|
||||
|
||||
CallStubDelayed(new (zone) DoubleToIStub(nullptr));
|
||||
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
|
||||
lw(result, MemOperand(sp, 0));
|
||||
|
||||
Addu(sp, sp, Operand(kDoubleSize));
|
||||
|
@ -558,8 +558,8 @@ class TurboAssembler : public Assembler {
|
||||
// Performs a truncating conversion of a floating point number as used by
|
||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
|
||||
// Exits with 'result' holding the answer.
|
||||
void TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input);
|
||||
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
|
||||
DoubleRegister double_input);
|
||||
|
||||
// Conditional move.
|
||||
void Movz(Register rd, Register rs, Register rt);
|
||||
|
@ -34,244 +34,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
}
|
||||
|
||||
|
||||
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label out_of_range, only_low, negate, done;
|
||||
Register result_reg = t0;
|
||||
|
||||
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
|
||||
Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
|
||||
Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
|
||||
DoubleRegister double_scratch = kLithiumScratchDouble;
|
||||
|
||||
// Account for saved regs.
|
||||
const int kArgumentOffset = 4 * kPointerSize;
|
||||
|
||||
__ Push(result_reg);
|
||||
__ Push(scratch, scratch2, scratch3);
|
||||
|
||||
// Load double input.
|
||||
__ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
|
||||
|
||||
// Clear cumulative exception flags and save the FCSR.
|
||||
__ cfc1(scratch2, FCSR);
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
|
||||
// Try a conversion to a signed integer.
|
||||
__ Trunc_w_d(double_scratch, double_scratch);
|
||||
// Move the converted value into the result register.
|
||||
__ mfc1(scratch3, double_scratch);
|
||||
|
||||
// Retrieve and restore the FCSR.
|
||||
__ cfc1(scratch, FCSR);
|
||||
__ ctc1(scratch2, FCSR);
|
||||
|
||||
// Check for overflow and NaNs.
|
||||
__ And(
|
||||
scratch, scratch,
|
||||
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
|
||||
// If we had no exceptions then set result_reg and we are done.
|
||||
Label error;
|
||||
__ Branch(&error, ne, scratch, Operand(zero_reg));
|
||||
__ Move(result_reg, scratch3);
|
||||
__ Branch(&done);
|
||||
__ bind(&error);
|
||||
|
||||
// Load the double value and perform a manual truncation.
|
||||
Register input_high = scratch2;
|
||||
Register input_low = scratch3;
|
||||
|
||||
__ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
|
||||
__ Lw(input_high,
|
||||
MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
|
||||
|
||||
Label normal_exponent, restore_sign;
|
||||
// Extract the biased exponent in result.
|
||||
__ Ext(result_reg,
|
||||
input_high,
|
||||
HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// Check for Infinity and NaNs, which should return 0.
|
||||
__ Subu(scratch, result_reg, HeapNumber::kExponentMask);
|
||||
__ Movz(result_reg, zero_reg, scratch);
|
||||
__ Branch(&done, eq, scratch, Operand(zero_reg));
|
||||
|
||||
// Express exponent as delta to (number of mantissa bits + 31).
|
||||
__ Subu(result_reg,
|
||||
result_reg,
|
||||
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
|
||||
|
||||
// If the delta is strictly positive, all bits would be shifted away,
|
||||
// which means that we can return 0.
|
||||
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
|
||||
__ mov(result_reg, zero_reg);
|
||||
__ Branch(&done);
|
||||
|
||||
__ bind(&normal_exponent);
|
||||
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
|
||||
// Calculate shift.
|
||||
__ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
|
||||
|
||||
// Save the sign.
|
||||
Register sign = result_reg;
|
||||
result_reg = no_reg;
|
||||
__ And(sign, input_high, Operand(HeapNumber::kSignMask));
|
||||
|
||||
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
|
||||
// to check for this specific case.
|
||||
Label high_shift_needed, high_shift_done;
|
||||
__ Branch(&high_shift_needed, lt, scratch, Operand(32));
|
||||
__ mov(input_high, zero_reg);
|
||||
__ Branch(&high_shift_done);
|
||||
__ bind(&high_shift_needed);
|
||||
|
||||
// Set the implicit 1 before the mantissa part in input_high.
|
||||
__ Or(input_high,
|
||||
input_high,
|
||||
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
||||
// Shift the mantissa bits to the correct position.
|
||||
// We don't need to clear non-mantissa bits as they will be shifted away.
|
||||
// If they weren't, it would mean that the answer is in the 32bit range.
|
||||
__ sllv(input_high, input_high, scratch);
|
||||
|
||||
__ bind(&high_shift_done);
|
||||
|
||||
// Replace the shifted bits with bits from the lower mantissa word.
|
||||
Label pos_shift, shift_done;
|
||||
__ li(at, 32);
|
||||
__ subu(scratch, at, scratch);
|
||||
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
|
||||
|
||||
// Negate scratch.
|
||||
__ Subu(scratch, zero_reg, scratch);
|
||||
__ sllv(input_low, input_low, scratch);
|
||||
__ Branch(&shift_done);
|
||||
|
||||
__ bind(&pos_shift);
|
||||
__ srlv(input_low, input_low, scratch);
|
||||
|
||||
__ bind(&shift_done);
|
||||
__ Or(input_high, input_high, Operand(input_low));
|
||||
// Restore sign if necessary.
|
||||
__ mov(scratch, sign);
|
||||
result_reg = sign;
|
||||
sign = no_reg;
|
||||
__ Subu(result_reg, zero_reg, input_high);
|
||||
__ Movz(result_reg, input_high, scratch);
|
||||
|
||||
__ bind(&done);
|
||||
|
||||
__ Sd(result_reg, MemOperand(sp, kArgumentOffset));
|
||||
__ Pop(scratch, scratch2, scratch3);
|
||||
__ Pop(result_reg);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == a2);
|
||||
const DoubleRegister double_base = f2;
|
||||
const DoubleRegister double_exponent = f4;
|
||||
const DoubleRegister double_result = f0;
|
||||
const DoubleRegister double_scratch = f6;
|
||||
const FPURegister single_scratch = f8;
|
||||
const Register scratch = t1;
|
||||
const Register scratch2 = a7;
|
||||
|
||||
Label call_runtime, done, int_exponent;
|
||||
|
||||
Label int_exponent_convert;
|
||||
// Detect integer exponents stored as double.
|
||||
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
|
||||
double_scratch, scratch2, kCheckForInexactConversion);
|
||||
// scratch2 == 0 means there was no conversion error.
|
||||
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
|
||||
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch2);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
|
||||
__ mov_d(double_scratch, double_base); // Back up base.
|
||||
__ Move(double_result, 1.0);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label positive_exponent, bail_out;
|
||||
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
|
||||
__ Dsubu(scratch, zero_reg, scratch);
|
||||
// Check when Dsubu overflows and we get negative result
|
||||
// (happens only when input is MIN_INT).
|
||||
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
|
||||
__ bind(&positive_exponent);
|
||||
__ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
|
||||
Operand(zero_reg));
|
||||
|
||||
Label while_true, no_carry, loop_end;
|
||||
__ bind(&while_true);
|
||||
|
||||
__ And(scratch2, scratch, 1);
|
||||
|
||||
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
|
||||
__ mul_d(double_result, double_result, double_scratch);
|
||||
__ bind(&no_carry);
|
||||
|
||||
__ dsra(scratch, scratch, 1);
|
||||
|
||||
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
|
||||
__ mul_d(double_scratch, double_scratch, double_scratch);
|
||||
|
||||
__ Branch(&while_true);
|
||||
|
||||
__ bind(&loop_end);
|
||||
|
||||
__ Branch(&done, ge, exponent, Operand(zero_reg));
|
||||
__ Move(double_scratch, 1.0);
|
||||
__ div_d(double_result, double_scratch, double_result);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ CompareF64(EQ, double_result, kDoubleRegZero);
|
||||
__ BranchFalseShortF(&done);
|
||||
|
||||
// double_exponent may not contain the exponent value if the input was a
|
||||
// smi. We set it with exponent value before bailing out.
|
||||
__ bind(&bail_out);
|
||||
__ mtc1(exponent, single_scratch);
|
||||
__ cvt_d_w(double_exponent, single_scratch);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ push(ra);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(0, 2, scratch);
|
||||
__ MovToFloatParameters(double_base, double_exponent);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
0, 2);
|
||||
}
|
||||
__ pop(ra);
|
||||
__ MovFromFloatResult(double_result);
|
||||
|
||||
__ bind(&done);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
|
||||
|
||||
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
||||
|
@ -3167,8 +3167,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
Branch(done, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input) {
|
||||
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
|
||||
Register result,
|
||||
DoubleRegister double_input) {
|
||||
Label done;
|
||||
|
||||
TryInlineTruncateDoubleToI(result, double_input, &done);
|
||||
@ -3178,7 +3179,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
|
||||
Sdc1(double_input, MemOperand(sp, 0));
|
||||
|
||||
CallStubDelayed(new (zone) DoubleToIStub(nullptr));
|
||||
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
|
||||
Ld(result, MemOperand(sp, 0));
|
||||
|
||||
Daddu(sp, sp, Operand(kDoubleSize));
|
||||
|
@ -589,8 +589,8 @@ class TurboAssembler : public Assembler {
|
||||
// Performs a truncating conversion of a floating point number as used by
|
||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
|
||||
// Exits with 'result' holding the answer.
|
||||
void TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input);
|
||||
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
|
||||
DoubleRegister double_input);
|
||||
|
||||
// Conditional move.
|
||||
void Movz(Register rd, Register rs, Register rt);
|
||||
|
@ -744,24 +744,14 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
|
||||
#endif
|
||||
|
||||
Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
|
||||
if (!Heap::IsImmovable(*code)) {
|
||||
DCHECK(code->kind() == Code::STUB &&
|
||||
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI);
|
||||
uint32_t key = code->stub_key();
|
||||
auto copy = stubs_.find(key);
|
||||
if (copy == stubs_.end()) {
|
||||
WasmCode* ret = AddAnonymousCode(code, WasmCode::kCopiedStub);
|
||||
copy = stubs_.emplace(std::make_pair(key, ret)).first;
|
||||
}
|
||||
return copy->second->instructions().start();
|
||||
DCHECK(Heap::IsImmovable(*code));
|
||||
|
||||
Address index = code->raw_instruction_start();
|
||||
auto trampoline_iter = trampolines_.find(index);
|
||||
if (trampoline_iter == trampolines_.end()) {
|
||||
return CreateTrampolineTo(code);
|
||||
} else {
|
||||
Address index = code->raw_instruction_start();
|
||||
auto trampoline_iter = trampolines_.find(index);
|
||||
if (trampoline_iter == trampolines_.end()) {
|
||||
return CreateTrampolineTo(code);
|
||||
} else {
|
||||
return trampoline_iter->second;
|
||||
}
|
||||
return trampoline_iter->second;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,203 +36,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
}
|
||||
|
||||
|
||||
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Label check_negative, process_64_bits, done;
|
||||
|
||||
// Account for return address and saved regs.
|
||||
const int kArgumentOffset = 4 * kRegisterSize;
|
||||
|
||||
MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
|
||||
MemOperand exponent_operand(
|
||||
MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
|
||||
|
||||
// The result is returned on the stack.
|
||||
MemOperand return_operand = mantissa_operand;
|
||||
|
||||
Register scratch1 = rbx;
|
||||
|
||||
// Since we must use rcx for shifts below, use some other register (rax)
|
||||
// to calculate the result if ecx is the requested return register.
|
||||
Register result_reg = rax;
|
||||
// Save ecx if it isn't the return register and therefore volatile, or if it
|
||||
// is the return register, then save the temp register we use in its stead
|
||||
// for the result.
|
||||
Register save_reg = rax;
|
||||
__ pushq(rcx);
|
||||
__ pushq(scratch1);
|
||||
__ pushq(save_reg);
|
||||
|
||||
__ movl(scratch1, mantissa_operand);
|
||||
__ Movsd(kScratchDoubleReg, mantissa_operand);
|
||||
__ movl(rcx, exponent_operand);
|
||||
|
||||
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
|
||||
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
|
||||
__ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
|
||||
__ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
|
||||
__ j(below, &process_64_bits);
|
||||
|
||||
// Result is entirely in lower 32-bits of mantissa
|
||||
int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
|
||||
__ subl(rcx, Immediate(delta));
|
||||
__ xorl(result_reg, result_reg);
|
||||
__ cmpl(rcx, Immediate(31));
|
||||
__ j(above, &done);
|
||||
__ shll_cl(scratch1);
|
||||
__ jmp(&check_negative);
|
||||
|
||||
__ bind(&process_64_bits);
|
||||
__ Cvttsd2siq(result_reg, kScratchDoubleReg);
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// If the double was negative, negate the integer result.
|
||||
__ bind(&check_negative);
|
||||
__ movl(result_reg, scratch1);
|
||||
__ negl(result_reg);
|
||||
__ cmpl(exponent_operand, Immediate(0));
|
||||
__ cmovl(greater, result_reg, scratch1);
|
||||
|
||||
// Restore registers
|
||||
__ bind(&done);
|
||||
__ movl(return_operand, result_reg);
|
||||
__ popq(save_reg);
|
||||
__ popq(scratch1);
|
||||
__ popq(rcx);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register exponent = MathPowTaggedDescriptor::exponent();
|
||||
DCHECK(exponent == rdx);
|
||||
const Register scratch = rcx;
|
||||
const XMMRegister double_result = xmm3;
|
||||
const XMMRegister double_base = xmm2;
|
||||
const XMMRegister double_exponent = xmm1;
|
||||
const XMMRegister double_scratch = xmm4;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
|
||||
// Save 1 in double_result - we need this several times later on.
|
||||
__ movp(scratch, Immediate(1));
|
||||
__ Cvtlsi2sd(double_result, scratch);
|
||||
|
||||
Label fast_power, try_arithmetic_simplification;
|
||||
// Detect integer exponents stored as double.
|
||||
__ DoubleToI(exponent, double_exponent, double_scratch,
|
||||
&try_arithmetic_simplification, &try_arithmetic_simplification);
|
||||
__ jmp(&int_exponent);
|
||||
|
||||
__ bind(&try_arithmetic_simplification);
|
||||
__ Cvttsd2si(exponent, double_exponent);
|
||||
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
|
||||
__ cmpl(exponent, Immediate(0x1));
|
||||
__ j(overflow, &call_runtime);
|
||||
|
||||
// Using FPU instructions to calculate power.
|
||||
Label fast_power_failed;
|
||||
__ bind(&fast_power);
|
||||
__ fnclex(); // Clear flags to catch exceptions later.
|
||||
// Transfer (B)ase and (E)xponent onto the FPU register stack.
|
||||
__ subp(rsp, Immediate(kDoubleSize));
|
||||
__ Movsd(Operand(rsp, 0), double_exponent);
|
||||
__ fld_d(Operand(rsp, 0)); // E
|
||||
__ Movsd(Operand(rsp, 0), double_base);
|
||||
__ fld_d(Operand(rsp, 0)); // B, E
|
||||
|
||||
// Exponent is in st(1) and base is in st(0)
|
||||
// B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
|
||||
// FYL2X calculates st(1) * log2(st(0))
|
||||
__ fyl2x(); // X
|
||||
__ fld(0); // X, X
|
||||
__ frndint(); // rnd(X), X
|
||||
__ fsub(1); // rnd(X), X-rnd(X)
|
||||
__ fxch(1); // X - rnd(X), rnd(X)
|
||||
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
|
||||
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
|
||||
__ faddp(1); // 2^(X-rnd(X)), rnd(X)
|
||||
// FSCALE calculates st(0) * 2^st(1)
|
||||
__ fscale(); // 2^X, rnd(X)
|
||||
__ fstp(1);
|
||||
// Bail out to runtime in case of exceptions in the status word.
|
||||
__ fnstsw_ax();
|
||||
__ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
|
||||
__ j(not_zero, &fast_power_failed, Label::kNear);
|
||||
__ fstp_d(Operand(rsp, 0));
|
||||
__ Movsd(double_result, Operand(rsp, 0));
|
||||
__ addp(rsp, Immediate(kDoubleSize));
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&fast_power_failed);
|
||||
__ fninit();
|
||||
__ addp(rsp, Immediate(kDoubleSize));
|
||||
__ jmp(&call_runtime);
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
const XMMRegister double_scratch2 = double_exponent;
|
||||
// Back up exponent as we need to check if exponent is negative later.
|
||||
__ movp(scratch, exponent); // Back up exponent.
|
||||
__ Movsd(double_scratch, double_base); // Back up base.
|
||||
__ Movsd(double_scratch2, double_result); // Load double_exponent with 1.
|
||||
|
||||
// Get absolute value of exponent.
|
||||
Label no_neg, while_true, while_false;
|
||||
__ testl(scratch, scratch);
|
||||
__ j(positive, &no_neg, Label::kNear);
|
||||
__ negl(scratch);
|
||||
__ bind(&no_neg);
|
||||
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
__ shrl(scratch, Immediate(1));
|
||||
// Above condition means CF==0 && ZF==0. This means that the
|
||||
// bit that has been shifted out is 0 and the result is not 0.
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ Movsd(double_result, double_scratch);
|
||||
__ j(zero, &while_false, Label::kNear);
|
||||
|
||||
__ bind(&while_true);
|
||||
__ shrl(scratch, Immediate(1));
|
||||
__ Mulsd(double_scratch, double_scratch);
|
||||
__ j(above, &while_true, Label::kNear);
|
||||
__ Mulsd(double_result, double_scratch);
|
||||
__ j(not_zero, &while_true);
|
||||
|
||||
__ bind(&while_false);
|
||||
// If the exponent is negative, return 1/result.
|
||||
__ testl(exponent, exponent);
|
||||
__ j(greater, &done);
|
||||
__ Divsd(double_scratch2, double_result);
|
||||
__ Movsd(double_result, double_scratch2);
|
||||
// Test whether result is zero. Bail out to check for subnormal result.
|
||||
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
||||
__ Xorpd(double_scratch2, double_scratch2);
|
||||
__ Ucomisd(double_scratch2, double_result);
|
||||
// double_exponent aliased as double_scratch2 has already been overwritten
|
||||
// and may not have contained the exponent value in the first place when the
|
||||
// input was a smi. We reset it with exponent value before bailing out.
|
||||
__ j(not_equal, &done);
|
||||
__ Cvtlsi2sd(double_exponent, exponent);
|
||||
|
||||
// Returning or bailing out.
|
||||
__ bind(&call_runtime);
|
||||
// Move base to the correct argument register. Exponent is already in xmm1.
|
||||
__ Movsd(xmm0, double_base);
|
||||
DCHECK(double_exponent == xmm1);
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm);
|
||||
__ PrepareCallCFunction(2);
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
|
||||
2);
|
||||
}
|
||||
// Return value is in xmm0.
|
||||
__ Movsd(double_result, xmm0);
|
||||
|
||||
__ bind(&done);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
Movability CEntryStub::NeedsImmovableCode() { return kMovable; }
|
||||
|
||||
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
||||
|
@ -1765,10 +1765,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
|
||||
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
|
||||
}
|
||||
|
||||
void TurboAssembler::SlowTruncateToIDelayed(Zone* zone) {
|
||||
CallStubDelayed(new (zone) DoubleToIStub(nullptr));
|
||||
}
|
||||
|
||||
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
|
||||
XMMRegister scratch, Label* lost_precision,
|
||||
Label* is_nan, Label::Distance dst) {
|
||||
|
@ -458,8 +458,6 @@ class TurboAssembler : public Assembler {
|
||||
// HeapObjectRequest that will be fulfilled after code assembly.
|
||||
void CallStubDelayed(CodeStub* stub);
|
||||
|
||||
void SlowTruncateToIDelayed(Zone* zone);
|
||||
|
||||
// Call a runtime routine.
|
||||
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
|
||||
|
@ -52,9 +52,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
DoubleToIStub stub(isolate);
|
||||
|
||||
byte* start = stub.GetCode()->raw_instruction_start();
|
||||
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
||||
byte* start = code->InstructionStart();
|
||||
|
||||
// Save callee save registers.
|
||||
__ Push(r7, r6, r5, r4);
|
||||
|
@ -53,9 +53,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
DoubleToIStub stub(isolate);
|
||||
|
||||
byte* start = stub.GetCode()->raw_instruction_start();
|
||||
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
||||
byte* start = code->InstructionStart();
|
||||
|
||||
__ PushCalleeSavedRegisters();
|
||||
|
||||
|
@ -53,8 +53,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
DoubleToIStub stub(isolate);
|
||||
byte* start = stub.GetCode()->raw_instruction_start();
|
||||
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
||||
byte* start = code->InstructionStart();
|
||||
|
||||
__ push(ebx);
|
||||
__ push(ecx);
|
||||
|
@ -54,9 +54,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
DoubleToIStub stub(isolate);
|
||||
|
||||
byte* start = stub.GetCode()->raw_instruction_start();
|
||||
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
||||
byte* start = code->InstructionStart();
|
||||
|
||||
// Save callee save registers.
|
||||
__ MultiPush(kCalleeSaved | ra.bit());
|
||||
|
@ -54,9 +54,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
DoubleToIStub stub(isolate);
|
||||
|
||||
byte* start = stub.GetCode()->raw_instruction_start();
|
||||
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
||||
byte* start = code->InstructionStart();
|
||||
|
||||
// Save callee save registers.
|
||||
__ MultiPush(kCalleeSaved | ra.bit());
|
||||
|
@ -53,8 +53,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
DoubleToIStub stub(isolate);
|
||||
byte* start = stub.GetCode()->raw_instruction_start();
|
||||
Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
|
||||
byte* start = code->InstructionStart();
|
||||
|
||||
__ pushq(rbx);
|
||||
__ pushq(rcx);
|
||||
|
Loading…
Reference in New Issue
Block a user