From 502b9aa71b1b28578579083f11f8bc67e4cd3430 Mon Sep 17 00:00:00 2001 From: "Ilija.Pavlovic" Date: Thu, 22 Sep 2016 01:42:39 -0700 Subject: [PATCH] MIPS: Port for (fused) multiply add/subtract. Port for VisitFloat32Add, VisitFloat64Add, VisitFloat32Sub and VisitFloat64Sub in InstructionSelector. TEST=unittests/InstructionSelectorTest.Float32AddWithFloat32Mul, unittests/InstructionSelectorTest.Float64AddWithFloat64Mul, unittests/InstructionSelectorTest.Float32SubWithFloat32Mul, unittests/InstructionSelectorTest.Float64SubWithFloat64Mul BUG= Review-Url: https://codereview.chromium.org/2341303002 Cr-Commit-Position: refs/heads/master@{#39616} --- src/compiler/mips/code-generator-mips.cc | 32 +++ src/compiler/mips/instruction-codes-mips.h | 8 + .../mips/instruction-selector-mips.cc | 106 ++++++++++ src/compiler/mips64/code-generator-mips64.cc | 32 +++ .../mips64/instruction-codes-mips64.h | 8 + .../mips64/instruction-selector-mips64.cc | 106 ++++++++++ .../instruction-selector-mips-unittest.cc | 197 ++++++++++++++++++ .../instruction-selector-mips64-unittest.cc | 197 ++++++++++++++++++ 8 files changed, 686 insertions(+) diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc index 543db9d833..a8809f869d 100644 --- a/src/compiler/mips/code-generator-mips.cc +++ b/src/compiler/mips/code-generator-mips.cc @@ -1118,6 +1118,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.InputDoubleRegister(1)); break; + case kMipsMaddS: + __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0), + i.InputFloatRegister(1), i.InputFloatRegister(2)); + break; + case kMipsMaddD: + __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1), i.InputDoubleRegister(2)); + break; + case kMipsMaddfS: + __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1), + i.InputFloatRegister(2)); + break; + case kMipsMaddfD: + __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1), + i.InputDoubleRegister(2)); + break; + case kMipsMsubS: + __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0), + i.InputFloatRegister(1), i.InputFloatRegister(2)); + break; + case kMipsMsubD: + __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1), i.InputDoubleRegister(2)); + break; + case kMipsMsubfS: + __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1), + i.InputFloatRegister(2)); + break; + case kMipsMsubfD: + __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1), + i.InputDoubleRegister(2)); + break; case kMipsMulD: // TODO(plind): add special case: right op is -1.0, see arm port. __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h index 1b052547ed..45ed041175 100644 --- a/src/compiler/mips/instruction-codes-mips.h +++ b/src/compiler/mips/instruction-codes-mips.h @@ -69,6 +69,14 @@ namespace compiler { V(MipsAddPair) \ V(MipsSubPair) \ V(MipsMulPair) \ + V(MipsMaddS) \ + V(MipsMaddD) \ + V(MipsMaddfS) \ + V(MipsMaddfD) \ + V(MipsMsubS) \ + V(MipsMsubD) \ + V(MipsMsubfS) \ + V(MipsMsubfD) \ V(MipsFloat32RoundDown) \ V(MipsFloat32RoundTruncate) \ V(MipsFloat32RoundUp) \ diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc index b84c44aaf5..0a98930b5c 100644 --- a/src/compiler/mips/instruction-selector-mips.cc +++ b/src/compiler/mips/instruction-selector-mips.cc @@ -781,20 +781,126 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { void InstructionSelector::VisitFloat32Add(Node* node) { + MipsOperandGenerator g(this); + Float32BinopMatcher m(node); + if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { + // For Add.S(Mul.S(x, y), z): + Float32BinopMatcher mleft(m.left().node()); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). + Emit(kMipsMaddS, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y). + Emit(kMipsMaddfS, g.DefineSameAsFirst(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { + // For Add.S(x, Mul.S(y, z)): + Float32BinopMatcher mright(m.right().node()); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z). + Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z). + Emit(kMipsMaddfS, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMipsAddS, node); } void InstructionSelector::VisitFloat64Add(Node* node) { + MipsOperandGenerator g(this); + Float64BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + // For Add.D(Mul.D(x, y), z): + Float64BinopMatcher mleft(m.left().node()); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y). + Emit(kMipsMaddD, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y). + Emit(kMipsMaddfD, g.DefineSameAsFirst(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + // For Add.D(x, Mul.D(y, z)): + Float64BinopMatcher mright(m.right().node()); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z). + Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z). + Emit(kMipsMaddfD, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMipsAddD, node); } void InstructionSelector::VisitFloat32Sub(Node* node) { + MipsOperandGenerator g(this); + Float32BinopMatcher m(node); + if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { + if (IsMipsArchVariant(kMips32r2)) { + // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y). + Float32BinopMatcher mleft(m.left().node()); + Emit(kMipsMsubS, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { + if (IsMipsArchVariant(kMips32r6)) { + // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z). + Float32BinopMatcher mright(m.right().node()); + Emit(kMipsMsubfS, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMipsSubS, node); } void InstructionSelector::VisitFloat64Sub(Node* node) { + MipsOperandGenerator g(this); + Float64BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + if (IsMipsArchVariant(kMips32r2)) { + // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y). + Float64BinopMatcher mleft(m.left().node()); + Emit(kMipsMsubD, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + if (IsMipsArchVariant(kMips32r6)) { + // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z). + Float64BinopMatcher mright(m.right().node()); + Emit(kMipsMsubfD, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMipsSubD, node); } diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc index 4297024a0a..6fef2884be 100644 --- a/src/compiler/mips64/code-generator-mips64.cc +++ b/src/compiler/mips64/code-generator-mips64.cc @@ -1314,6 +1314,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.InputDoubleRegister(1)); break; + case kMips64MaddS: + __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0), + i.InputFloatRegister(1), i.InputFloatRegister(2)); + break; + case kMips64MaddD: + __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1), i.InputDoubleRegister(2)); + break; + case kMips64MaddfS: + __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1), + i.InputFloatRegister(2)); + break; + case kMips64MaddfD: + __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1), + i.InputDoubleRegister(2)); + break; + case kMips64MsubS: + __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0), + i.InputFloatRegister(1), i.InputFloatRegister(2)); + break; + case kMips64MsubD: + __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1), i.InputDoubleRegister(2)); + break; + case kMips64MsubfS: + __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1), + i.InputFloatRegister(2)); + break; + case kMips64MsubfD: + __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1), + i.InputDoubleRegister(2)); + break; case kMips64MulD: // TODO(plind): add special case: right op is -1.0, see arm port. __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h index efcdcc59fd..6a444342ac 100644 --- a/src/compiler/mips64/instruction-codes-mips64.h +++ b/src/compiler/mips64/instruction-codes-mips64.h @@ -85,6 +85,14 @@ namespace compiler { V(Mips64SqrtD) \ V(Mips64MaxD) \ V(Mips64MinD) \ + V(Mips64MaddS) \ + V(Mips64MaddD) \ + V(Mips64MaddfS) \ + V(Mips64MaddfD) \ + V(Mips64MsubS) \ + V(Mips64MsubD) \ + V(Mips64MsubfS) \ + V(Mips64MsubfD) \ V(Mips64Float64RoundDown) \ V(Mips64Float64RoundTruncate) \ V(Mips64Float64RoundUp) \ diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc index 9af71e0b8f..6e937e20d7 100644 --- a/src/compiler/mips64/instruction-selector-mips64.cc +++ b/src/compiler/mips64/instruction-selector-mips64.cc @@ -1224,20 +1224,126 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { void InstructionSelector::VisitFloat32Add(Node* node) { + Mips64OperandGenerator g(this); + Float32BinopMatcher m(node); + if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { + // For Add.S(Mul.S(x, y), z): + Float32BinopMatcher mleft(m.left().node()); + if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y). + Emit(kMips64MaddS, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y). + Emit(kMips64MaddfS, g.DefineSameAsFirst(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { + // For Add.S(x, Mul.S(y, z)): + Float32BinopMatcher mright(m.right().node()); + if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z). + Emit(kMips64MaddS, g.DefineAsRegister(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z). + Emit(kMips64MaddfS, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMips64AddS, node); } void InstructionSelector::VisitFloat64Add(Node* node) { + Mips64OperandGenerator g(this); + Float64BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + // For Add.D(Mul.D(x, y), z): + Float64BinopMatcher mleft(m.left().node()); + if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y). + Emit(kMips64MaddD, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y). + Emit(kMips64MaddfD, g.DefineSameAsFirst(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + // For Add.D(x, Mul.D(y, z)): + Float64BinopMatcher mright(m.right().node()); + if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z). + Emit(kMips64MaddD, g.DefineAsRegister(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z). + Emit(kMips64MaddfD, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMips64AddD, node); } void InstructionSelector::VisitFloat32Sub(Node* node) { + Mips64OperandGenerator g(this); + Float32BinopMatcher m(node); + if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { + if (kArchVariant == kMips64r2) { + // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y). + Float32BinopMatcher mleft(m.left().node()); + Emit(kMips64MsubS, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { + if (kArchVariant == kMips64r6) { + // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z). + Float32BinopMatcher mright(m.right().node()); + Emit(kMips64MsubfS, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMips64SubS, node); } void InstructionSelector::VisitFloat64Sub(Node* node) { + Mips64OperandGenerator g(this); + Float64BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + if (kArchVariant == kMips64r2) { + // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y). + Float64BinopMatcher mleft(m.left().node()); + Emit(kMips64MsubD, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + if (kArchVariant == kMips64r6) { + // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z). + Float64BinopMatcher mright(m.right().node()); + Emit(kMips64MsubfD, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } VisitRRR(this, kMips64SubD, node); } diff --git a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc index d9f9d49a4d..45b140e0a5 100644 --- a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc +++ b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc @@ -1179,6 +1179,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) { EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } +TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { + { + StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), + MachineType::Float32(), MachineType::Float32()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } + { + StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), + MachineType::Float32(), MachineType::Float32()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2)); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } +} + +TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { + { + StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), + MachineType::Float64(), MachineType::Float64()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } + { + StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), + MachineType::Float64(), MachineType::Float64()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2)); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } +} + +TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) { + StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), + MachineType::Float32(), MachineType::Float32()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* n; + if (IsMipsArchVariant(kMips32r2)) { + n = m.Float32Sub(m.Float32Mul(p1, p2), p0); + } else if (IsMipsArchVariant(kMips32r6)) { + n = m.Float32Sub(p0, m.Float32Mul(p1, p2)); + } + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_EQ(kMipsMsubfS, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); +} + +TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) { + StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), + MachineType::Float64(), MachineType::Float64()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* n; + if (IsMipsArchVariant(kMips32r2)) { + n = m.Float64Sub(m.Float64Mul(p1, p2), p0); + } else if (IsMipsArchVariant(kMips32r6)) { + n = m.Float64Sub(p0, m.Float64Mul(p1, p2)); + } + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_EQ(kMipsMsubfD, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (IsMipsArchVariant(kMips32r2)) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (IsMipsArchVariant(kMips32r6)) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); +} TEST_F(InstructionSelectorTest, Float64Max) { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), diff --git a/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc index db87cd5e5e..be77126688 100644 --- a/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc +++ b/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc @@ -1536,6 +1536,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) { EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } +TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { + { + StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), + MachineType::Float32(), MachineType::Float32()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (kArchVariant == kMips64r2) { + EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode()); + } else if (kArchVariant == kMips64r6) { + EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (kArchVariant == kMips64r2) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (kArchVariant == kMips64r6) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } + { + StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), + MachineType::Float32(), MachineType::Float32()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2)); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (kArchVariant == kMips64r2) { + EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode()); + } else if (kArchVariant == kMips64r6) { + EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (kArchVariant == kMips64r2) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (kArchVariant == kMips64r6) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } +} + +TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { + { + StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), + MachineType::Float64(), MachineType::Float64()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (kArchVariant == kMips64r2) { + EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode()); + } else if (kArchVariant == kMips64r6) { + EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (kArchVariant == kMips64r2) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (kArchVariant == kMips64r6) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } + { + StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), + MachineType::Float64(), MachineType::Float64()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2)); + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (kArchVariant == kMips64r2) { + EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode()); + } else if (kArchVariant == kMips64r6) { + EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (kArchVariant == kMips64r2) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (kArchVariant == kMips64r6) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); + } +} + +TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) { + StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), + MachineType::Float32(), MachineType::Float32()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* n; + if (kArchVariant == kMips64r2) { + n = m.Float32Sub(m.Float32Mul(p1, p2), p0); + } else if (kArchVariant == kMips64r6) { + n = m.Float32Sub(p0, m.Float32Mul(p1, p2)); + } + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (kArchVariant == kMips64r2) { + EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode()); + } else if (kArchVariant == kMips64r6) { + EXPECT_EQ(kMips64MsubfS, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (kArchVariant == kMips64r2) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (kArchVariant == kMips64r6) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); +} + +TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) { + StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), + MachineType::Float64(), MachineType::Float64()); + Node* const p0 = m.Parameter(0); + Node* const p1 = m.Parameter(1); + Node* const p2 = m.Parameter(2); + Node* n; + if (kArchVariant == kMips64r2) { + n = m.Float64Sub(m.Float64Mul(p1, p2), p0); + } else if (kArchVariant == kMips64r6) { + n = m.Float64Sub(p0, m.Float64Mul(p1, p2)); + } + m.Return(n); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + if (kArchVariant == kMips64r2) { + EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode()); + } else if (kArchVariant == kMips64r6) { + EXPECT_EQ(kMips64MsubfD, s[0]->arch_opcode()); + } + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); + EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); + EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); + ASSERT_EQ(1U, s[0]->OutputCount()); + if (kArchVariant == kMips64r2) { + EXPECT_FALSE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } else if (kArchVariant == kMips64r6) { + EXPECT_TRUE( + UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); + } + EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); + EXPECT_EQ(kFlags_none, s[0]->flags_mode()); +} TEST_F(InstructionSelectorTest, Float64Max) { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),