MIPS[64]: Disable fusion multiple-accumulate instructions

MIPS[64]R6 supports only fusion multiply-accumulate instructions, and using
these causes failures of several tests that expect exact floating-point
results. Therefore we disable fusion multiply-accumulate in both emitted and
compiled code on R6.

TEST=cctest/test-run-machops/RunFloat64MulAndFloat64Add1,mjsunit/es6/math-expm1.js
mjsunit/es6/math-fround.js,mjsunit/compiler/multiply-add.js

BUG=

Review-Url: https://codereview.chromium.org/2569683002
Cr-Commit-Position: refs/heads/master@{#41717}
This commit is contained in:
ivica.bogosavljevic 2016-12-15 03:50:12 -08:00 committed by Commit bot
parent 7c43fcb285
commit ee7281f8ff
14 changed files with 265 additions and 398 deletions

View File

@ -1152,36 +1152,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMipsMaddS:
__ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
__ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break;
case kMipsMaddD:
__ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMipsMaddfS:
__ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMipsMaddfD:
__ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
__ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2),
kScratchDoubleReg);
break;
case kMipsMsubS:
__ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
__ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break;
case kMipsMsubD:
__ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMipsMsubfS:
__ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMipsMsubfD:
__ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
__ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2),
kScratchDoubleReg);
break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.

View File

@ -71,12 +71,8 @@ namespace compiler {
V(MipsMulPair) \
V(MipsMaddS) \
V(MipsMaddD) \
V(MipsMaddfS) \
V(MipsMaddfD) \
V(MipsMsubS) \
V(MipsMsubD) \
V(MipsMsubfS) \
V(MipsMsubfD) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \

View File

@ -904,35 +904,23 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
MipsOperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Add.S(Mul.S(x, y), z):
Float32BinopMatcher mleft(m.left().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Add.S(Mul.S(x, y), z):
Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
// For Add.S(x, Mul.S(y, z)):
Float32BinopMatcher mright(m.right().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
// For Add.S(x, Mul.S(y, z)):
Float32BinopMatcher mright(m.right().node());
Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsAddS, node);
@ -941,35 +929,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Add.D(Mul.D(x, y), z):
Float64BinopMatcher mleft(m.left().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y).
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Add.D(Mul.D(x, y), z):
Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
// For Add.D(x, Mul.D(y, z)):
Float64BinopMatcher mright(m.right().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
// For Add.D(x, Mul.D(y, z)):
Float64BinopMatcher mright(m.right().node());
Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsAddD, node);
@ -978,9 +954,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
MipsOperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
if (IsMipsArchVariant(kMips32r2)) {
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMsubS, g.DefineAsRegister(node),
@ -988,24 +964,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (IsMipsArchVariant(kMips32r6)) {
// For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
Float32BinopMatcher mright(m.right().node());
Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsSubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
if (IsMipsArchVariant(kMips32r2)) {
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMsubD, g.DefineAsRegister(node),
@ -1013,15 +980,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (IsMipsArchVariant(kMips32r6)) {
// For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
Float64BinopMatcher mright(m.right().node());
Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsSubD, node);
}

View File

@ -1346,36 +1346,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMips64MaddS:
__ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
__ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break;
case kMips64MaddD:
__ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMips64MaddfS:
__ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMips64MaddfD:
__ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
__ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2),
kScratchDoubleReg);
break;
case kMips64MsubS:
__ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
__ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break;
case kMips64MsubD:
__ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMips64MsubfS:
__ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMips64MsubfD:
__ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
__ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2),
kScratchDoubleReg);
break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.

View File

@ -87,12 +87,8 @@ namespace compiler {
V(Mips64MinD) \
V(Mips64MaddS) \
V(Mips64MaddD) \
V(Mips64MaddfS) \
V(Mips64MaddfD) \
V(Mips64MsubS) \
V(Mips64MsubD) \
V(Mips64MsubfS) \
V(Mips64MsubfD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \

View File

@ -1449,35 +1449,23 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
Mips64OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Add.S(Mul.S(x, y), z):
Float32BinopMatcher mleft(m.left().node());
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Add.S(Mul.S(x, y), z):
Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y).
Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
// For Add.S(x, Mul.S(y, z)):
Float32BinopMatcher mright(m.right().node());
if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z).
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
// For Add.S(x, Mul.S(y, z)):
Float32BinopMatcher mright(m.right().node());
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z).
Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64AddS, node);
@ -1486,35 +1474,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
Mips64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Add.D(Mul.D(x, y), z):
Float64BinopMatcher mleft(m.left().node());
if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y).
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Add.D(Mul.D(x, y), z):
Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y).
Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
// For Add.D(x, Mul.D(y, z)):
Float64BinopMatcher mright(m.right().node());
if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z).
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
// For Add.D(x, Mul.D(y, z)):
Float64BinopMatcher mright(m.right().node());
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z).
Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64AddD, node);
@ -1523,9 +1499,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
Mips64OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
if (kArchVariant == kMips64r2) {
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MsubS, g.DefineAsRegister(node),
@ -1533,24 +1509,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (kArchVariant == kMips64r6) {
// For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
Float32BinopMatcher mright(m.right().node());
Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
if (kArchVariant == kMips64r2) {
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MsubD, g.DefineAsRegister(node),
@ -1558,15 +1525,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (kArchVariant == kMips64r6) {
// For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
Float64BinopMatcher mright(m.right().node());
Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64SubD, node);
}

View File

@ -1998,6 +1998,49 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
}
}
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
madd_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
madd_d(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
}
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
msub_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
msub_d(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,

View File

@ -887,6 +887,15 @@ class MacroAssembler: public Assembler {
// general-purpose register.
void Mfhc1(Register rt, FPURegister fs);
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
// Wrapper functions for the different cmp/branch types.
inline void BranchF32(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,

View File

@ -2212,19 +2212,49 @@ void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
bind(&fail);
}
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (kArchVariant == kMips64r2) {
madd_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (0) { // TODO(plind): find reasonable arch-variant symbol names.
if (kArchVariant == kMips64r2) {
madd_d(fd, fr, fs, ft);
} else {
// Can not change source regs's value.
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
}
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (kArchVariant == kMips64r2) {
msub_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (kArchVariant == kMips64r2) {
msub_d(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,

View File

@ -940,10 +940,13 @@ class MacroAssembler: public Assembler {
void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
FPURegister ft);
void Madd_d(FPURegister fd,
FPURegister fr,
FPURegister fs,
FPURegister ft,
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
// Wrapper functions for the different cmp/branch types.

View File

@ -359,7 +359,9 @@ v8_executable("cctest") {
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64") {
# Disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
cflags += [ "-ffp-contract=off" ]

View File

@ -428,7 +428,9 @@
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \
or v8_target_arch=="arm" or v8_target_arch=="arm64" \
or v8_target_arch=="s390" or v8_target_arch=="s390x"', {
or v8_target_arch=="s390" or v8_target_arch=="s390x" \
or v8_target_arch=="mips" or v8_target_arch=="mips64" \
or v8_target_arch=="mipsel" or v8_target_arch=="mips64el"', {
# disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
'cflags': ['-ffp-contract=off'],

View File

@ -1270,7 +1270,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
if (!IsMipsArchVariant(kMips32r2)) {
return;
}
{
@ -1283,23 +1283,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
}
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@ -1313,30 +1304,21 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
}
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
if (!IsMipsArchVariant(kMips32r2)) {
return;
}
{
@ -1349,23 +1331,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
}
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@ -1379,23 +1352,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
}
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@ -1404,83 +1368,59 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
if (!IsMipsArchVariant(kMips32r2)) {
return;
}
{
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (IsMipsArchVariant(kMips32r2)) {
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
} else if (IsMipsArchVariant(kMips32r6)) {
n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMsubfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
if (!IsMipsArchVariant(kMips32r2)) {
return;
}
{
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (IsMipsArchVariant(kMips32r2)) {
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
} else if (IsMipsArchVariant(kMips32r6)) {
n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMsubfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Max) {

View File

@ -1752,6 +1752,9 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
if (kArchVariant != kMips64r2) {
return;
}
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
@ -1762,23 +1765,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
}
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@ -1792,29 +1786,23 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
}
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
if (kArchVariant != kMips64r2) {
return;
}
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
@ -1825,23 +1813,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
}
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@ -1855,23 +1834,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
}
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@ -1880,73 +1850,57 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
if (kArchVariant == kMips64r2) {
if (kArchVariant != kMips64r2) {
return;
}
{
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
} else if (kArchVariant == kMips64r6) {
n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MsubfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
if (kArchVariant == kMips64r2) {
if (kArchVariant != kMips64r2) {
return;
}
{
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
} else if (kArchVariant == kMips64r6) {
n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MsubfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Max) {