[arm] Turn off the default NaN mode on arm.

The default NaN mode was originally used to identify holes in double
arrays. With (https://codereview.chromium.org/863633002/) signalling
NaNs are used for that, and the default NaN mode is not needed anymore.
Without the default NaN mode it is easier to satisfy the WebAssembly
spec which requires that quiet NaNs are preserved.

R=titzer@chromium.org, rodolph.perfetta@arm.com, bmeurer@chromium.org

Review URL: https://codereview.chromium.org/1884193003

Cr-Commit-Position: refs/heads/master@{#35526}
This commit is contained in:
ahaas 2016-04-15 05:44:59 -07:00 committed by Commit bot
parent 2742b303b8
commit 5df9406a07
6 changed files with 11 additions and 52 deletions

View File

@ -1072,8 +1072,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
__ VFPEnsureFPSCRState(r3);
// Check result for exception sentinel.
Label exception_returned;
__ CompareRoot(r0, Heap::kExceptionRootIndex);
@ -1183,7 +1181,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, 0.0);
__ VFPEnsureFPSCRState(r4);
// Get address of argv, see stm above.
// r0: code entry
@ -3238,7 +3235,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// GC safe. The RegExp backend also relies on this.
__ str(lr, MemOperand(sp, 0));
__ blx(ip); // Call the C++ function.
__ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}

View File

@ -950,30 +950,12 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
}
void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
// If needed, restore wanted bits of FPSCR.
Label fpscr_done;
vmrs(scratch);
if (emit_debug_code()) {
Label rounding_mode_correct;
tst(scratch, Operand(kVFPRoundingModeMask));
b(eq, &rounding_mode_correct);
// Don't call Assert here, since Runtime_Abort could re-enter here.
stop("Default rounding mode not set");
bind(&rounding_mode_correct);
}
tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
b(ne, &fpscr_done);
orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
vmsr(scratch);
bind(&fpscr_done);
}
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
// become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
// inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
vsub(dst, src, kDoubleRegZero, cond);
}
@ -2414,12 +2396,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Force a canonical NaN.
if (emit_debug_code()) {
vmrs(ip);
tst(ip, Operand(kVFPDefaultNaNModeControlBit));
Assert(ne, kDefaultNaNModeNotSet);
}
VFPCanonicalizeNaN(double_scratch);
b(&store);

View File

@ -489,15 +489,6 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
// Ensure that FPSCR contains values needed by JavaScript.
// We need the NaNModeControlBit to be sure that operations like
// vadd and vsub generate the Canonical NaN (if a NaN must be generated).
// In VFP3 it will be always the Canonical NaN.
// In VFP2 it will be either the Canonical NaN or the negative version
// of the Canonical NaN. It doesn't matter if we have two values. The aim
// is to be sure to never generate the hole NaN.
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,

View File

@ -698,11 +698,12 @@ const Operator* CommonOperatorBuilder::RelocatableInt32Constant(
const Operator* CommonOperatorBuilder::RelocatableInt64Constant(
int64_t value, RelocInfo::Mode rmode) {
return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
IrOpcode::kRelocatableInt64Constant, Operator::kPure, // opcode
"RelocatableInt64Constant", // name
0, 0, 0, 1, 0, 0, // counts
RelocatablePtrConstantInfo(value, rmode)); // parameter
return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
IrOpcode::kRelocatableInt64Constant, Operator::kPure, // opcode
"RelocatableInt64Constant", // name
0, 0, 0, 1, 0, 0, // counts
RelocatablePtrConstantInfo(static_cast<intptr_t>(value), // parameter
rmode));
}
const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,

View File

@ -4030,11 +4030,6 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
// Force a canonical NaN.
if (masm()->emit_debug_code()) {
__ vmrs(ip);
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
__ Assert(ne, kDefaultNaNModeNotSet);
}
__ VFPCanonicalizeNaN(double_scratch, value);
__ vstr(double_scratch, scratch, 0);
} else {

View File

@ -2630,8 +2630,8 @@ TEST(Run_Wasm_F64Max) {
}
}
// TODO(ahaas): Fix on arm and mips and reenable.
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
// TODO(ahaas): Fix on mips and reenable.
#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
TEST(Run_Wasm_F32Min_Snan) {
// Test that the instruction does not return a signalling NaN.