MIPS: [turbofan] Add backend support for float32 operations.

Port 8dad78cdbd

Original commit message:
This adds the basics necessary to support float32 operations in TurboFan.
The actual functionality required to detect safe float32 operations will
be added based on this later. Therefore this does not affect production
code except for some cleanup/refactoring.

In detail, this patchset contains the following features:
- Add support for float32 operations to arm, arm64, ia32 and x64
  backends.
- Add float32 machine operators.
- Add support for float32 constants to simplified lowering.
- Handle float32 representation for phis in simplified lowering.

In addition, contains the following (related) cleanups:
- Fix/unify naming of backend instructions.
- Use AVX comparisons when available.
- Extend ArchOpcodeField to 9 bits (required for arm64).
- Refactor some code duplication in instruction selectors.

BUG=v8:3589
LOG=n

Review URL: https://codereview.chromium.org/1046953004

Cr-Commit-Position: refs/heads/master@{#27531}
This commit is contained in:
balazs.kilvady 2015-03-30 12:56:30 -07:00 committed by Commit bot
parent f00b4e94fb
commit dd402998f1
8 changed files with 303 additions and 55 deletions

View File

@ -542,6 +542,45 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kMipsCmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsAddS:
// TODO(plind): add special case: combine mult & add.
__ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsSubS:
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsMulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsDivS:
__ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
// TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputSingleRegister());
break;
}
case kMipsSqrtS: {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@ -576,6 +615,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsFloat64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
@ -588,10 +631,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsCvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
@ -781,6 +820,41 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMipsCmpS) {
// TODO(dusmil) optimize unordered checks to use fewer instructions
// even if we have to unfold BranchF macro.
Label* nan = flabel;
switch (branch->condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
nan = tlabel;
break;
case kUnsignedLessThan:
cc = lt;
break;
case kUnsignedGreaterThanOrEqual:
cc = ge;
nan = tlabel;
break;
case kUnsignedLessThanOrEqual:
cc = le;
break;
case kUnsignedGreaterThan:
cc = gt;
nan = tlabel;
break;
default:
UNSUPPORTED_COND(kMipsCmpS, branch->condition);
break;
}
__ BranchFS(tlabel, nan, cc, i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMipsCmpD) {
// TODO(dusmil) optimize unordered checks to use fewer instructions
// even if we have to unfold BranchF macro.

View File

@ -34,6 +34,13 @@ namespace compiler {
V(MipsMov) \
V(MipsTst) \
V(MipsCmp) \
V(MipsCmpS) \
V(MipsAddS) \
V(MipsSubS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
V(MipsSqrtS) \
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \

View File

@ -265,8 +265,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
void InstructionSelector::VisitWord32Clz(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsClz, node);
}
@ -313,15 +312,12 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
}
Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
VisitRRR(this, kMipsMul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsMulHigh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
VisitRRR(this, kMipsMulHigh, node);
}
@ -365,39 +361,37 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsCvtDS, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsCvtDW, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsCvtDUw, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsTruncWD, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsTruncUwD, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsTruncUwD, node);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsCvtSD, node);
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMipsAddS, node);
}
@ -406,6 +400,11 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kMipsSubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
@ -425,11 +424,21 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMipsMulD, node);
}
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitRRR(this, kMipsDivS, node);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMipsDivD, node);
}
@ -442,15 +451,25 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMipsSqrtS, node);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMipsSqrtD, node);
}
@ -636,7 +655,18 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
// Shared routine for multiple float compare operations.
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
VisitCompare(selector, kMipsCmpS, g.UseRegister(left), g.UseRegister(right),
cont);
}
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
@ -708,6 +738,15 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
@ -853,6 +892,24 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);

View File

@ -2078,16 +2078,31 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
}
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
}
void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
}
void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
}
void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
}
void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
}
@ -2100,6 +2115,11 @@ void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
}
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
@ -2115,11 +2135,21 @@ void Assembler::mov_d(FPURegister fd, FPURegister fs) {
}
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
}
void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}

View File

@ -885,14 +885,20 @@ class Assembler : public AssemblerBase {
void cfc1(Register rt, FPUControlRegister fs);
// Arithmetic.
void add_s(FPURegister fd, FPURegister fs, FPURegister ft);
void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_s(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_s(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
// Conversion.

View File

@ -1430,18 +1430,18 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
}
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd) {
void MacroAssembler::BranchFSize(SecondaryField sizeField, Label* target,
Label* nan, Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
Branch(bd, target);
return;
}
if (IsMipsArchVariant(kMips32r6)) {
sizeField = sizeField == D ? L : W;
}
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
@ -1463,35 +1463,35 @@ void MacroAssembler::BranchF(Label* target,
// have been handled by the caller.
switch (cc) {
case lt:
c(OLT, D, cmp1, cmp2);
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt:
c(ULE, D, cmp1, cmp2);
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge:
c(ULT, D, cmp1, cmp2);
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le:
c(OLE, D, cmp1, cmp2);
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, D, cmp1, cmp2);
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ueq:
c(UEQ, D, cmp1, cmp2);
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ne:
c(EQ, D, cmp1, cmp2);
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
case nue:
c(UEQ, D, cmp1, cmp2);
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
default:
@ -1508,35 +1508,35 @@ void MacroAssembler::BranchF(Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
switch (cc) {
case lt:
cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case gt:
cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ge:
cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case le:
cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case eq:
cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ueq:
cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ne:
cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case nue:
cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
default:
@ -1551,6 +1551,20 @@ void MacroAssembler::BranchF(Label* target,
}
void MacroAssembler::BranchF(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd) {
BranchFSize(D, target, nan, cc, cmp1, cmp2, bd);
}
void MacroAssembler::BranchFS(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd) {
BranchFSize(S, target, nan, cc, cmp1, cmp2, bd);
}
void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
if (IsFp64Mode()) {
DCHECK(!src_low.is(at));

View File

@ -771,7 +771,11 @@ class MacroAssembler: public Assembler {
// general-purpose register.
void Mfhc1(Register rt, FPURegister fs);
// Wrapper function for the different cmp/branch types.
// Wrapper functions for the different cmp/branch types.
void BranchFSize(SecondaryField sizeField, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchF(Label* target,
Label* nan,
Condition cc,
@ -779,6 +783,9 @@ class MacroAssembler: public Assembler {
FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchFS(Label* target, Label* nan, Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd = PROTECT);
// Alternate (inline) version for better readability with USE_DELAY_SLOT.
inline void BranchF(BranchDelaySlot bd,
Label* target,
@ -789,6 +796,11 @@ class MacroAssembler: public Assembler {
BranchF(target, nan, cc, cmp1, cmp2, bd);
}
inline void BranchFS(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchFS(target, nan, cc, cmp1, cmp2, bd);
}
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.

View File

@ -2329,13 +2329,61 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
const int32_t& ft_reg,
const int32_t& fs_reg,
const int32_t& fd_reg) {
float f;
double ft = get_fpu_register_double(ft_reg);
int64_t ft_int = static_cast<int64_t>(ft);
float fs, ft;
fs = get_fpu_register_float(fs_reg);
ft = get_fpu_register_float(ft_reg);
int64_t ft_int = static_cast<int64_t>(get_fpu_register_double(ft_reg));
uint32_t cc, fcsr_cc;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
case ADD_D:
set_fpu_register_float(fd_reg, fs + ft);
break;
case SUB_D:
set_fpu_register_float(fd_reg, fs - ft);
break;
case MUL_D:
set_fpu_register_float(fd_reg, fs * ft);
break;
case DIV_D:
set_fpu_register_float(fd_reg, fs / ft);
break;
case ABS_D:
set_fpu_register_float(fd_reg, fabs(fs));
break;
case MOV_D:
set_fpu_register_float(fd_reg, fs);
break;
case NEG_D:
set_fpu_register_float(fd_reg, -fs);
break;
case SQRT_D:
set_fpu_register_float(fd_reg, fast_sqrt(fs));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
break;
case C_UEQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
break;
case C_ULT_D:
set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
break;
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case CVT_D_S:
f = get_fpu_register_float(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(f));
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));