[turbofan] Remove the FloatXXSubPreserveNan operators.

This CL changes the semantics of FloatXXSub to match the semantics of
the semantics of FloatXXSubPreserveNan. Therefore there is no need
anymore for the FloatXXSubPreserveNan operators.

The optimizations in VisitFloatXXSub which are removed in this CL have
already been moved to machine-operator-reducer.cc in
https://codereview.chromium.org/2226663002

R=bmeurer@chromium.org

Review-Url: https://codereview.chromium.org/2220973002
Cr-Commit-Position: refs/heads/master@{#38437}
This commit is contained in:
ahaas 2016-08-08 05:08:57 -07:00 committed by Commit bot
parent 2ed9e6e634
commit f8938e5096
24 changed files with 14 additions and 512 deletions

View File

@ -1416,76 +1416,30 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
VisitRRR(this, kArmVaddF64, node);
}
namespace {
void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
ArmOperandGenerator g(selector);
Float32BinopMatcher m(node);
if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
Float32BinopMatcher mright(m.right().node());
selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
VisitRRR(selector, kArmVsubF32, node);
}
void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
ArmOperandGenerator g(selector);
Float64BinopMatcher m(node);
if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
Float64BinopMatcher mright(m.right().node());
selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
VisitRRR(selector, kArmVsubF64, node);
}
} // namespace
void InstructionSelector::VisitFloat32Sub(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
Emit(kArmVnegF32, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
Float32BinopMatcher mright(m.right().node());
Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
VisitFloat32SubHelper(this, node);
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitFloat32SubHelper(this, node);
VisitRRR(this, kArmVsubF32, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
if (m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
Emit(kArmVrintpF64, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
Float64BinopMatcher mright(m.right().node());
Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
}
Emit(kArmVnegF64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
VisitFloat64SubHelper(this, node);
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitFloat64SubHelper(this, node);
VisitRRR(this, kArmVsubF64, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {

View File

@ -1740,34 +1740,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kArm64Float32Sub, node);
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitRRR(this, kArm64Float32Sub, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Arm64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
if (m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
return;
}
}
}
Emit(kArm64Float64Neg, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
VisitRRR(this, kArm64Float64Sub, node);
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitRRR(this, kArm64Float64Sub, node);
}

View File

@ -929,44 +929,10 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
IA32OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
kSSEFloat32Neg);
return;
}
VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
if (m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
return;
}
}
}
VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
kSSEFloat64Neg);
return;
}
VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}

View File

@ -1135,8 +1135,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
return MarkAsFloat32(node), VisitFloat32Sub(node);
case IrOpcode::kFloat32SubPreserveNan:
return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node);
case IrOpcode::kFloat32Neg:
return MarkAsFloat32(node), VisitFloat32Neg(node);
case IrOpcode::kFloat32Mul:
@ -1157,8 +1155,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64SubPreserveNan:
return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node);
case IrOpcode::kFloat64Neg:
return MarkAsFloat64(node), VisitFloat64Neg(node);
case IrOpcode::kFloat64Mul:

View File

@ -160,7 +160,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float32SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \
V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
@ -187,7 +186,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \

View File

@ -342,7 +342,6 @@ class MachineOperatorBuilder final : public ZoneObject {
// (single-precision).
const Operator* Float32Add();
const Operator* Float32Sub();
const Operator* Float32SubPreserveNan();
const Operator* Float32Mul();
const Operator* Float32Div();
const Operator* Float32Sqrt();
@ -351,7 +350,6 @@ class MachineOperatorBuilder final : public ZoneObject {
// (double-precision).
const Operator* Float64Add();
const Operator* Float64Sub();
const Operator* Float64SubPreserveNan();
const Operator* Float64Mul();
const Operator* Float64Div();
const Operator* Float64Mod();

View File

@ -1055,11 +1055,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsSubPreserveNanS:
__ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsMulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@ -1126,11 +1121,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsSubPreserveNanD:
__ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),

View File

@ -47,7 +47,6 @@ namespace compiler {
V(MipsCmpS) \
V(MipsAddS) \
V(MipsSubS) \
V(MipsSubPreserveNanS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
@ -58,7 +57,6 @@ namespace compiler {
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
V(MipsSubPreserveNanD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \

View File

@ -764,32 +764,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kMipsSubS, node);
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitRRR(this, kMipsSubPreserveNanS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
return;
}
}
}
VisitRRR(this, kMipsSubD, node);
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitRRR(this, kMipsSubPreserveNanD, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
}

View File

@ -1264,11 +1264,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64SubPreserveNanS:
__ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64MulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@ -1322,11 +1317,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64SubPreserveNanD:
__ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),

View File

@ -66,7 +66,6 @@ namespace compiler {
V(Mips64CmpS) \
V(Mips64AddS) \
V(Mips64SubS) \
V(Mips64SubPreserveNanS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64ModS) \
@ -78,7 +77,6 @@ namespace compiler {
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
V(Mips64SubPreserveNanD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \

View File

@ -1169,32 +1169,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitRRR(this, kMips64SubPreserveNanS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
return;
}
}
}
VisitRRR(this, kMips64SubD, node);
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitRRR(this, kMips64SubPreserveNanD, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMips64MulS, node);
}

View File

@ -427,7 +427,6 @@
V(BitcastInt64ToFloat64) \
V(Float32Add) \
V(Float32Sub) \
V(Float32SubPreserveNan) \
V(Float32Neg) \
V(Float32Mul) \
V(Float32Div) \
@ -436,7 +435,6 @@
V(Float32RoundDown) \
V(Float64Add) \
V(Float64Sub) \
V(Float64SubPreserveNan) \
V(Float64Neg) \
V(Float64Mul) \
V(Float64Div) \

View File

@ -433,9 +433,6 @@ class RawMachineAssembler {
Node* Float32Sub(Node* a, Node* b) {
return AddNode(machine()->Float32Sub(), a, b);
}
Node* Float32SubPreserveNan(Node* a, Node* b) {
return AddNode(machine()->Float32SubPreserveNan(), a, b);
}
Node* Float32Mul(Node* a, Node* b) {
return AddNode(machine()->Float32Mul(), a, b);
}
@ -468,9 +465,6 @@ class RawMachineAssembler {
Node* Float64Sub(Node* a, Node* b) {
return AddNode(machine()->Float64Sub(), a, b);
}
Node* Float64SubPreserveNan(Node* a, Node* b) {
return AddNode(machine()->Float64SubPreserveNan(), a, b);
}
Node* Float64Mul(Node* a, Node* b) {
return AddNode(machine()->Float64Mul(), a, b);
}

View File

@ -2034,10 +2034,6 @@ Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat32SubPreserveNan(Node* node) {
return Type::Number();
}
Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
@ -2073,10 +2069,6 @@ Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64SubPreserveNan(Node* node) {
return Type::Number();
}
Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }

View File

@ -1150,7 +1150,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kUint64LessThanOrEqual:
case IrOpcode::kFloat32Add:
case IrOpcode::kFloat32Sub:
case IrOpcode::kFloat32SubPreserveNan:
case IrOpcode::kFloat32Neg:
case IrOpcode::kFloat32Mul:
case IrOpcode::kFloat32Div:
@ -1161,7 +1160,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat32LessThanOrEqual:
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
case IrOpcode::kFloat64SubPreserveNan:
case IrOpcode::kFloat64Neg:
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:

View File

@ -544,7 +544,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
op = m->Float32Add();
break;
case wasm::kExprF32Sub:
op = m->Float32SubPreserveNan();
op = m->Float32Sub();
break;
case wasm::kExprF32Mul:
op = m->Float32Mul();
@ -575,7 +575,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
op = m->Float64Add();
break;
case wasm::kExprF64Sub:
op = m->Float64SubPreserveNan();
op = m->Float64Sub();
break;
case wasm::kExprF64Mul:
op = m->Float64Mul();

View File

@ -1336,17 +1336,6 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
X64OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
kSSEFloat32Neg);
return;
}
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
@ -1376,29 +1365,6 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
if (m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
return;
}
}
}
VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
kSSEFloat64Neg);
return;
}
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}

View File

@ -18,19 +18,6 @@
namespace v8 {
namespace internal {
// Floating point constants.
const uint32_t kDoubleSignMask = HeapNumber::kSignMask;
const uint32_t kDoubleExponentShift = HeapNumber::kExponentShift;
const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
const uint32_t kDoubleNaNMask =
HeapNumber::kExponentMask | (1 << kDoubleNaNShift);
const uint32_t kSingleSignMask = kBinary32SignMask;
const uint32_t kSingleExponentMask = kBinary32ExponentMask;
const uint32_t kSingleExponentShift = kBinary32ExponentShift;
const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
@ -4757,75 +4744,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
sdc1(double_result, MemOperand(scratch1, 0));
}
void MacroAssembler::SubNanPreservePayloadAndSign_s(FloatRegister fd,
FloatRegister fs,
FloatRegister ft) {
FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
Label check_nan, save_payload, done;
Register scratch1 = t8;
Register scratch2 = t9;
sub_s(dest, fs, ft);
// Check if the result of subtraction is NaN.
BranchF32(nullptr, &check_nan, eq, fs, ft);
Branch(USE_DELAY_SLOT, &done);
dest.is(fd) ? nop() : mov_s(fd, dest);
bind(&check_nan);
// Check if first operand is a NaN.
mfc1(scratch1, fs);
BranchF32(nullptr, &save_payload, eq, fs, fs);
// Second operand must be a NaN.
mfc1(scratch1, ft);
bind(&save_payload);
// Reserve payload.
And(scratch1, scratch1,
Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
mfc1(scratch2, dest);
And(scratch2, scratch2, Operand(kSingleNaNMask));
Or(scratch2, scratch2, scratch1);
mtc1(scratch2, fd);
bind(&done);
}
void MacroAssembler::SubNanPreservePayloadAndSign_d(DoubleRegister fd,
DoubleRegister fs,
DoubleRegister ft) {
FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
Label check_nan, save_payload, done;
Register scratch1 = t8;
Register scratch2 = t9;
sub_d(dest, fs, ft);
// Check if the result of subtraction is NaN.
BranchF64(nullptr, &check_nan, eq, fs, ft);
Branch(USE_DELAY_SLOT, &done);
dest.is(fd) ? nop() : mov_d(fd, dest);
bind(&check_nan);
// Check if first operand is a NaN.
Mfhc1(scratch1, fs);
mov_s(dest, fs);
BranchF64(nullptr, &save_payload, eq, fs, fs);
// Second operand must be a NaN.
Mfhc1(scratch1, ft);
mov_s(dest, ft);
bind(&save_payload);
// Reserve payload.
And(scratch1, scratch1,
Operand(kDoubleSignMask | ((1 << kDoubleNaNShift) - 1)));
Mfhc1(scratch2, dest);
And(scratch2, scratch2, Operand(kDoubleNaNMask));
Or(scratch2, scratch2, scratch1);
Move_s(fd, dest);
Mthc1(scratch2, fd);
bind(&done);
}
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
Handle<Map> map,

View File

@ -875,12 +875,6 @@ class MacroAssembler: public Assembler {
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
// Preserve value of a NaN operand
void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
FPURegister ft);
void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
FPURegister ft);
// FP32 mode: Move the general purpose register into
// the high part of the double-register pair.
// FP64 mode: Move the general-purpose register into

View File

@ -1899,36 +1899,6 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
}
TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());

View File

@ -4219,21 +4219,6 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64());

View File

@ -751,71 +751,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
// -----------------------------------------------------------------------------
// Miscellaneous.

View File

@ -1168,71 +1168,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
// -----------------------------------------------------------------------------
// Miscellaneous.