From f8938e50964e7b83818a0fff7fc0f2b065f8bf49 Mon Sep 17 00:00:00 2001 From: ahaas Date: Mon, 8 Aug 2016 05:08:57 -0700 Subject: [PATCH] [turbofan] Remove the FloatXXSubPreserveNan operators. This CL changes the semantics of FloatXXSub to match the semantics of the semantics of FloatXXSubPreserveNan. Therefore there is no need anymore for the FloatXXSubPreserveNan operators. The optimizations in VisitFloatXXSub which are removed in this CL have already been moved to machine-operator-reducer.cc in https://codereview.chromium.org/2226663002 R=bmeurer@chromium.org Review-Url: https://codereview.chromium.org/2220973002 Cr-Commit-Position: refs/heads/master@{#38437} --- src/compiler/arm/instruction-selector-arm.cc | 70 +++------------- .../arm64/instruction-selector-arm64.cc | 27 ------ .../ia32/instruction-selector-ia32.cc | 34 -------- src/compiler/instruction-selector.cc | 4 - src/compiler/machine-operator.cc | 2 - src/compiler/machine-operator.h | 2 - src/compiler/mips/code-generator-mips.cc | 10 --- src/compiler/mips/instruction-codes-mips.h | 2 - .../mips/instruction-selector-mips.cc | 22 ----- src/compiler/mips64/code-generator-mips64.cc | 10 --- .../mips64/instruction-codes-mips64.h | 2 - .../mips64/instruction-selector-mips64.cc | 22 ----- src/compiler/opcodes.h | 2 - src/compiler/raw-machine-assembler.h | 6 -- src/compiler/typer.cc | 8 -- src/compiler/verifier.cc | 2 - src/compiler/wasm-compiler.cc | 4 +- src/compiler/x64/instruction-selector-x64.cc | 34 -------- src/mips/macro-assembler-mips.cc | 82 ------------------- src/mips/macro-assembler-mips.h | 6 -- .../arm/instruction-selector-arm-unittest.cc | 30 ------- .../instruction-selector-arm64-unittest.cc | 15 ---- .../instruction-selector-ia32-unittest.cc | 65 --------------- .../x64/instruction-selector-x64-unittest.cc | 65 --------------- 24 files changed, 14 insertions(+), 512 deletions(-) diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc index 27e86b5f5b..fa3e1dbe68 100644 --- a/src/compiler/arm/instruction-selector-arm.cc +++ b/src/compiler/arm/instruction-selector-arm.cc @@ -1416,76 +1416,30 @@ void InstructionSelector::VisitFloat64Add(Node* node) { VisitRRR(this, kArmVaddF64, node); } -namespace { -void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) { - ArmOperandGenerator g(selector); - Float32BinopMatcher m(node); - if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) { - Float32BinopMatcher mright(m.right().node()); - selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node), - g.UseRegister(m.left().node()), - g.UseRegister(mright.left().node()), - g.UseRegister(mright.right().node())); - return; - } - VisitRRR(selector, kArmVsubF32, node); -} - -void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) { - ArmOperandGenerator g(selector); - Float64BinopMatcher m(node); - if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) { - Float64BinopMatcher mright(m.right().node()); - selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node), - g.UseRegister(m.left().node()), - g.UseRegister(mright.left().node()), - g.UseRegister(mright.right().node())); - return; - } - VisitRRR(selector, kArmVsubF64, node); -} -} // namespace - void InstructionSelector::VisitFloat32Sub(Node* node) { ArmOperandGenerator g(this); Float32BinopMatcher m(node); - if (m.left().IsMinusZero()) { - Emit(kArmVnegF32, g.DefineAsRegister(node), - g.UseRegister(m.right().node())); + if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { + Float32BinopMatcher mright(m.right().node()); + Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); return; } - VisitFloat32SubHelper(this, node); -} - -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { - VisitFloat32SubHelper(this, node); + VisitRRR(this, kArmVsubF32, node); } void InstructionSelector::VisitFloat64Sub(Node* node) { ArmOperandGenerator g(this); Float64BinopMatcher m(node); - if (m.left().IsMinusZero()) { - if (m.right().IsFloat64RoundDown() && - CanCover(m.node(), m.right().node())) { - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && - CanCover(m.right().node(), m.right().InputAt(0))) { - Float64BinopMatcher mright0(m.right().InputAt(0)); - if (mright0.left().IsMinusZero()) { - Emit(kArmVrintpF64, g.DefineAsRegister(node), - g.UseRegister(mright0.right().node())); - return; - } - } - } - Emit(kArmVnegF64, g.DefineAsRegister(node), - g.UseRegister(m.right().node())); + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + Float64BinopMatcher mright(m.right().node()); + Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); return; } - VisitFloat64SubHelper(this, node); -} - -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { - VisitFloat64SubHelper(this, node); + VisitRRR(this, kArmVsubF64, node); } void InstructionSelector::VisitFloat32Mul(Node* node) { diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index 78b24c5af6..28ac51fc22 100644 --- a/src/compiler/arm64/instruction-selector-arm64.cc +++ b/src/compiler/arm64/instruction-selector-arm64.cc @@ -1740,34 +1740,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { VisitRRR(this, kArm64Float32Sub, node); } -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { - VisitRRR(this, kArm64Float32Sub, node); -} - void InstructionSelector::VisitFloat64Sub(Node* node) { - Arm64OperandGenerator g(this); - Float64BinopMatcher m(node); - if (m.left().IsMinusZero()) { - if (m.right().IsFloat64RoundDown() && - CanCover(m.node(), m.right().node())) { - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && - CanCover(m.right().node(), m.right().InputAt(0))) { - Float64BinopMatcher mright0(m.right().InputAt(0)); - if (mright0.left().IsMinusZero()) { - Emit(kArm64Float64RoundUp, g.DefineAsRegister(node), - g.UseRegister(mright0.right().node())); - return; - } - } - } - Emit(kArm64Float64Neg, g.DefineAsRegister(node), - g.UseRegister(m.right().node())); - return; - } - VisitRRR(this, kArm64Float64Sub, node); -} - -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { VisitRRR(this, kArm64Float64Sub, node); } diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc index e24d764fe2..34f2f5b73e 100644 --- a/src/compiler/ia32/instruction-selector-ia32.cc +++ b/src/compiler/ia32/instruction-selector-ia32.cc @@ -929,44 +929,10 @@ void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat32Sub(Node* node) { - IA32OperandGenerator g(this); - Float32BinopMatcher m(node); - if (m.left().IsMinusZero()) { - VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg, - kSSEFloat32Neg); - return; - } - VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub); -} - -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub); } void InstructionSelector::VisitFloat64Sub(Node* node) { - IA32OperandGenerator g(this); - Float64BinopMatcher m(node); - if (m.left().IsMinusZero()) { - if (m.right().IsFloat64RoundDown() && - CanCover(m.node(), m.right().node())) { - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && - CanCover(m.right().node(), m.right().InputAt(0))) { - Float64BinopMatcher mright0(m.right().InputAt(0)); - if (mright0.left().IsMinusZero()) { - Emit(kSSEFloat64Round | MiscField::encode(kRoundUp), - g.DefineAsRegister(node), g.UseRegister(mright0.right().node())); - return; - } - } - } - VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg, - kSSEFloat64Neg); - return; - } - VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub); -} - -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub); } diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc index 2f905e1598..86b8c10ff2 100644 --- a/src/compiler/instruction-selector.cc +++ b/src/compiler/instruction-selector.cc @@ -1135,8 +1135,6 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsFloat32(node), VisitFloat32Add(node); case IrOpcode::kFloat32Sub: return MarkAsFloat32(node), VisitFloat32Sub(node); - case IrOpcode::kFloat32SubPreserveNan: - return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node); case IrOpcode::kFloat32Neg: return MarkAsFloat32(node), VisitFloat32Neg(node); case IrOpcode::kFloat32Mul: @@ -1157,8 +1155,6 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsFloat64(node), VisitFloat64Add(node); case IrOpcode::kFloat64Sub: return MarkAsFloat64(node), VisitFloat64Sub(node); - case IrOpcode::kFloat64SubPreserveNan: - return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node); case IrOpcode::kFloat64Neg: return MarkAsFloat64(node), VisitFloat64Neg(node); case IrOpcode::kFloat64Mul: diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc index 284caf3062..09b37a8761 100644 --- a/src/compiler/machine-operator.cc +++ b/src/compiler/machine-operator.cc @@ -160,7 +160,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) { V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \ V(Float32Add, Operator::kCommutative, 2, 0, 1) \ V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \ - V(Float32SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \ V(Float32Mul, Operator::kCommutative, 2, 0, 1) \ V(Float32Div, Operator::kNoProperties, 2, 0, 1) \ V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \ @@ -187,7 +186,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) { V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \ V(Float64Add, Operator::kCommutative, 2, 0, 1) \ V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \ - V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \ V(Float64Mul, Operator::kCommutative, 2, 0, 1) \ V(Float64Div, Operator::kNoProperties, 2, 0, 1) \ V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \ diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h index 06bf615dcc..e1a1eb8584 100644 --- a/src/compiler/machine-operator.h +++ b/src/compiler/machine-operator.h @@ -342,7 +342,6 @@ class MachineOperatorBuilder final : public ZoneObject { // (single-precision). const Operator* Float32Add(); const Operator* Float32Sub(); - const Operator* Float32SubPreserveNan(); const Operator* Float32Mul(); const Operator* Float32Div(); const Operator* Float32Sqrt(); @@ -351,7 +350,6 @@ class MachineOperatorBuilder final : public ZoneObject { // (double-precision). const Operator* Float64Add(); const Operator* Float64Sub(); - const Operator* Float64SubPreserveNan(); const Operator* Float64Mul(); const Operator* Float64Div(); const Operator* Float64Mod(); diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc index 011a743137..073c9f560c 100644 --- a/src/compiler/mips/code-generator-mips.cc +++ b/src/compiler/mips/code-generator-mips.cc @@ -1055,11 +1055,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.InputDoubleRegister(1)); break; - case kMipsSubPreserveNanS: - __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(), - i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; case kMipsMulS: // TODO(plind): add special case: right op is -1.0, see arm port. __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), @@ -1126,11 +1121,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.InputDoubleRegister(1)); break; - case kMipsSubPreserveNanD: - __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(), - i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; case kMipsMulD: // TODO(plind): add special case: right op is -1.0, see arm port. __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h index c7222a6343..1da4edff11 100644 --- a/src/compiler/mips/instruction-codes-mips.h +++ b/src/compiler/mips/instruction-codes-mips.h @@ -47,7 +47,6 @@ namespace compiler { V(MipsCmpS) \ V(MipsAddS) \ V(MipsSubS) \ - V(MipsSubPreserveNanS) \ V(MipsMulS) \ V(MipsDivS) \ V(MipsModS) \ @@ -58,7 +57,6 @@ namespace compiler { V(MipsCmpD) \ V(MipsAddD) \ V(MipsSubD) \ - V(MipsSubPreserveNanD) \ V(MipsMulD) \ V(MipsDivD) \ V(MipsModD) \ diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc index d4b68e4ab2..efcec0c8f1 100644 --- a/src/compiler/mips/instruction-selector-mips.cc +++ b/src/compiler/mips/instruction-selector-mips.cc @@ -764,32 +764,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { VisitRRR(this, kMipsSubS, node); } -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { - VisitRRR(this, kMipsSubPreserveNanS, node); -} - void InstructionSelector::VisitFloat64Sub(Node* node) { - MipsOperandGenerator g(this); - Float64BinopMatcher m(node); - if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() && - CanCover(m.node(), m.right().node())) { - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && - CanCover(m.right().node(), m.right().InputAt(0))) { - Float64BinopMatcher mright0(m.right().InputAt(0)); - if (mright0.left().IsMinusZero()) { - Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node), - g.UseRegister(mright0.right().node())); - return; - } - } - } VisitRRR(this, kMipsSubD, node); } -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { - VisitRRR(this, kMipsSubPreserveNanD, node); -} - void InstructionSelector::VisitFloat32Mul(Node* node) { VisitRRR(this, kMipsMulS, node); } diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc index 3823bb0217..ba956f9cc7 100644 --- a/src/compiler/mips64/code-generator-mips64.cc +++ b/src/compiler/mips64/code-generator-mips64.cc @@ -1264,11 +1264,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.InputDoubleRegister(1)); break; - case kMips64SubPreserveNanS: - __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(), - i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; case kMips64MulS: // TODO(plind): add special case: right op is -1.0, see arm port. __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), @@ -1322,11 +1317,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.InputDoubleRegister(1)); break; - case kMips64SubPreserveNanD: - __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(), - i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; case kMips64MulD: // TODO(plind): add special case: right op is -1.0, see arm port. __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h index d2f6c10fca..f651a7073a 100644 --- a/src/compiler/mips64/instruction-codes-mips64.h +++ b/src/compiler/mips64/instruction-codes-mips64.h @@ -66,7 +66,6 @@ namespace compiler { V(Mips64CmpS) \ V(Mips64AddS) \ V(Mips64SubS) \ - V(Mips64SubPreserveNanS) \ V(Mips64MulS) \ V(Mips64DivS) \ V(Mips64ModS) \ @@ -78,7 +77,6 @@ namespace compiler { V(Mips64CmpD) \ V(Mips64AddD) \ V(Mips64SubD) \ - V(Mips64SubPreserveNanD) \ V(Mips64MulD) \ V(Mips64DivD) \ V(Mips64ModD) \ diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc index 471e50d1dd..c3284aa7ac 100644 --- a/src/compiler/mips64/instruction-selector-mips64.cc +++ b/src/compiler/mips64/instruction-selector-mips64.cc @@ -1169,32 +1169,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { VisitRRR(this, kMips64SubS, node); } -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { - VisitRRR(this, kMips64SubPreserveNanS, node); -} - void InstructionSelector::VisitFloat64Sub(Node* node) { - Mips64OperandGenerator g(this); - Float64BinopMatcher m(node); - if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() && - CanCover(m.node(), m.right().node())) { - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && - CanCover(m.right().node(), m.right().InputAt(0))) { - Float64BinopMatcher mright0(m.right().InputAt(0)); - if (mright0.left().IsMinusZero()) { - Emit(kMips64Float64RoundUp, g.DefineAsRegister(node), - g.UseRegister(mright0.right().node())); - return; - } - } - } VisitRRR(this, kMips64SubD, node); } -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { - VisitRRR(this, kMips64SubPreserveNanD, node); -} - void InstructionSelector::VisitFloat32Mul(Node* node) { VisitRRR(this, kMips64MulS, node); } diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h index c153b38402..7f3f1eed94 100644 --- a/src/compiler/opcodes.h +++ b/src/compiler/opcodes.h @@ -427,7 +427,6 @@ V(BitcastInt64ToFloat64) \ V(Float32Add) \ V(Float32Sub) \ - V(Float32SubPreserveNan) \ V(Float32Neg) \ V(Float32Mul) \ V(Float32Div) \ @@ -436,7 +435,6 @@ V(Float32RoundDown) \ V(Float64Add) \ V(Float64Sub) \ - V(Float64SubPreserveNan) \ V(Float64Neg) \ V(Float64Mul) \ V(Float64Div) \ diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h index 2a5ae1f8f1..f1bc3d7897 100644 --- a/src/compiler/raw-machine-assembler.h +++ b/src/compiler/raw-machine-assembler.h @@ -433,9 +433,6 @@ class RawMachineAssembler { Node* Float32Sub(Node* a, Node* b) { return AddNode(machine()->Float32Sub(), a, b); } - Node* Float32SubPreserveNan(Node* a, Node* b) { - return AddNode(machine()->Float32SubPreserveNan(), a, b); - } Node* Float32Mul(Node* a, Node* b) { return AddNode(machine()->Float32Mul(), a, b); } @@ -468,9 +465,6 @@ class RawMachineAssembler { Node* Float64Sub(Node* a, Node* b) { return AddNode(machine()->Float64Sub(), a, b); } - Node* Float64SubPreserveNan(Node* a, Node* b) { - return AddNode(machine()->Float64SubPreserveNan(), a, b); - } Node* Float64Mul(Node* a, Node* b) { return AddNode(machine()->Float64Mul(), a, b); } diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc index ade80e7495..daa64f8ba6 100644 --- a/src/compiler/typer.cc +++ b/src/compiler/typer.cc @@ -2034,10 +2034,6 @@ Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); } Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); } -Type* Typer::Visitor::TypeFloat32SubPreserveNan(Node* node) { - return Type::Number(); -} - Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); } Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); } @@ -2073,10 +2069,6 @@ Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); } Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); } -Type* Typer::Visitor::TypeFloat64SubPreserveNan(Node* node) { - return Type::Number(); -} - Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); } Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); } diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc index 3cd3582002..e7869d3cd4 100644 --- a/src/compiler/verifier.cc +++ b/src/compiler/verifier.cc @@ -1150,7 +1150,6 @@ void Verifier::Visitor::Check(Node* node) { case IrOpcode::kUint64LessThanOrEqual: case IrOpcode::kFloat32Add: case IrOpcode::kFloat32Sub: - case IrOpcode::kFloat32SubPreserveNan: case IrOpcode::kFloat32Neg: case IrOpcode::kFloat32Mul: case IrOpcode::kFloat32Div: @@ -1161,7 +1160,6 @@ void Verifier::Visitor::Check(Node* node) { case IrOpcode::kFloat32LessThanOrEqual: case IrOpcode::kFloat64Add: case IrOpcode::kFloat64Sub: - case IrOpcode::kFloat64SubPreserveNan: case IrOpcode::kFloat64Neg: case IrOpcode::kFloat64Mul: case IrOpcode::kFloat64Div: diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index fa4640841b..a5bfcf82bc 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -544,7 +544,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right, op = m->Float32Add(); break; case wasm::kExprF32Sub: - op = m->Float32SubPreserveNan(); + op = m->Float32Sub(); break; case wasm::kExprF32Mul: op = m->Float32Mul(); @@ -575,7 +575,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right, op = m->Float64Add(); break; case wasm::kExprF64Sub: - op = m->Float64SubPreserveNan(); + op = m->Float64Sub(); break; case wasm::kExprF64Mul: op = m->Float64Mul(); diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc index 371048bb75..30d6503667 100644 --- a/src/compiler/x64/instruction-selector-x64.cc +++ b/src/compiler/x64/instruction-selector-x64.cc @@ -1336,17 +1336,6 @@ void InstructionSelector::VisitFloat32Add(Node* node) { void InstructionSelector::VisitFloat32Sub(Node* node) { - X64OperandGenerator g(this); - Float32BinopMatcher m(node); - if (m.left().IsMinusZero()) { - VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg, - kSSEFloat32Neg); - return; - } - VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub); -} - -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub); } @@ -1376,29 +1365,6 @@ void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) { - X64OperandGenerator g(this); - Float64BinopMatcher m(node); - if (m.left().IsMinusZero()) { - if (m.right().IsFloat64RoundDown() && - CanCover(m.node(), m.right().node())) { - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && - CanCover(m.right().node(), m.right().InputAt(0))) { - Float64BinopMatcher mright0(m.right().InputAt(0)); - if (mright0.left().IsMinusZero()) { - Emit(kSSEFloat64Round | MiscField::encode(kRoundUp), - g.DefineAsRegister(node), g.UseRegister(mright0.right().node())); - return; - } - } - } - VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg, - kSSEFloat64Neg); - return; - } - VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub); -} - -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub); } diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index 22b44ca485..d1e60bd7f4 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -18,19 +18,6 @@ namespace v8 { namespace internal { -// Floating point constants. -const uint32_t kDoubleSignMask = HeapNumber::kSignMask; -const uint32_t kDoubleExponentShift = HeapNumber::kExponentShift; -const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1; -const uint32_t kDoubleNaNMask = - HeapNumber::kExponentMask | (1 << kDoubleNaNShift); - -const uint32_t kSingleSignMask = kBinary32SignMask; -const uint32_t kSingleExponentMask = kBinary32ExponentMask; -const uint32_t kSingleExponentShift = kBinary32ExponentShift; -const uint32_t kSingleNaNShift = kSingleExponentShift - 1; -const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift); - MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size, CodeObjectRequired create_code_object) : Assembler(arg_isolate, buffer, size), @@ -4757,75 +4744,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, sdc1(double_result, MemOperand(scratch1, 0)); } -void MacroAssembler::SubNanPreservePayloadAndSign_s(FloatRegister fd, - FloatRegister fs, - FloatRegister ft) { - FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd; - Label check_nan, save_payload, done; - Register scratch1 = t8; - Register scratch2 = t9; - - sub_s(dest, fs, ft); - // Check if the result of subtraction is NaN. - BranchF32(nullptr, &check_nan, eq, fs, ft); - Branch(USE_DELAY_SLOT, &done); - dest.is(fd) ? nop() : mov_s(fd, dest); - - bind(&check_nan); - // Check if first operand is a NaN. - mfc1(scratch1, fs); - BranchF32(nullptr, &save_payload, eq, fs, fs); - // Second operand must be a NaN. - mfc1(scratch1, ft); - - bind(&save_payload); - // Reserve payload. - And(scratch1, scratch1, - Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1))); - mfc1(scratch2, dest); - And(scratch2, scratch2, Operand(kSingleNaNMask)); - Or(scratch2, scratch2, scratch1); - mtc1(scratch2, fd); - - bind(&done); -} - -void MacroAssembler::SubNanPreservePayloadAndSign_d(DoubleRegister fd, - DoubleRegister fs, - DoubleRegister ft) { - FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd; - Label check_nan, save_payload, done; - Register scratch1 = t8; - Register scratch2 = t9; - - sub_d(dest, fs, ft); - // Check if the result of subtraction is NaN. - BranchF64(nullptr, &check_nan, eq, fs, ft); - Branch(USE_DELAY_SLOT, &done); - dest.is(fd) ? nop() : mov_d(fd, dest); - - bind(&check_nan); - // Check if first operand is a NaN. - Mfhc1(scratch1, fs); - mov_s(dest, fs); - BranchF64(nullptr, &save_payload, eq, fs, fs); - // Second operand must be a NaN. - Mfhc1(scratch1, ft); - mov_s(dest, ft); - - bind(&save_payload); - // Reserve payload. - And(scratch1, scratch1, - Operand(kDoubleSignMask | ((1 << kDoubleNaNShift) - 1))); - Mfhc1(scratch2, dest); - And(scratch2, scratch2, Operand(kDoubleNaNMask)); - Or(scratch2, scratch2, scratch1); - Move_s(fd, dest); - Mthc1(scratch2, fd); - - bind(&done); -} - void MacroAssembler::CompareMapAndBranch(Register obj, Register scratch, Handle map, diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index 1e60bea2e9..18698c162d 100644 --- a/src/mips/macro-assembler-mips.h +++ b/src/mips/macro-assembler-mips.h @@ -875,12 +875,6 @@ class MacroAssembler: public Assembler { void Floor_w_d(FPURegister fd, FPURegister fs); void Ceil_w_d(FPURegister fd, FPURegister fs); - // Preserve value of a NaN operand - void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs, - FPURegister ft); - void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs, - FPURegister ft); - // FP32 mode: Move the general purpose register into // the high part of the double-register pair. // FP64 mode: Move the general-purpose register into diff --git a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc index 5992bcfc3b..6317d91fa9 100644 --- a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc +++ b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc @@ -1899,36 +1899,6 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { } -TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - - -TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - - TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), MachineType::Float32(), MachineType::Float32()); diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc index 7e0bece376..baf8cc670e 100644 --- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc +++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc @@ -4219,21 +4219,6 @@ TEST_F(InstructionSelectorTest, Float64Abs) { } -TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - - TEST_F(InstructionSelectorTest, Float64Max) { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Float64()); diff --git a/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc index 5280f69aa0..f2c9c2609b 100644 --- a/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc +++ b/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc @@ -751,71 +751,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { } } - -TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); - m.Return(n); - Stream s = m.Build(AVX); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - - -TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { - { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } - { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); - m.Return(n); - Stream s = m.Build(AVX); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - - // ----------------------------------------------------------------------------- // Miscellaneous. diff --git a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc index b16182c592..44279c4652 100644 --- a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc +++ b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc @@ -1168,71 +1168,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { } } - -TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); - m.Return(n); - Stream s = m.Build(AVX); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - - -TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { - { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } - { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); - m.Return(n); - Stream s = m.Build(AVX); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - - // ----------------------------------------------------------------------------- // Miscellaneous.