MIPS[64]: Support for some SIMD operations (4)

Add support for F32x4Abs, F32x4Neg, F32x4RecipApprox,
F32x4RecipRefine, F32x4RecipSqrtApprox, F32x4RecipSqrtRefine,
F32x4Add, F32x4Sub, F32x4Mul, F32x4Max, F32x4Min,
F32x4Eq, F32x4Ne, F32x4Lt, F32x4Le, I32x4SConvertF32x4,
I32x4UConvertF32x4 operations for mips32 and mips64
architectures.

BUG=

Review-Url: https://codereview.chromium.org/2778203002
Cr-Commit-Position: refs/heads/master@{#44597}
This commit is contained in:
dusan.simicic 2017-04-12 00:32:00 -07:00 committed by Commit bot
parent a615efaa50
commit 8d2db536c9
10 changed files with 425 additions and 23 deletions

View File

@ -2088,9 +2088,7 @@ void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
@ -2124,7 +2122,7 @@ void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_ARM
#endif // V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@ -2164,11 +2162,17 @@ void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
@ -2183,10 +2187,6 @@ void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}

View File

@ -1767,6 +1767,113 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Abs: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMipsF32x4Neg: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMipsF32x4RecipApprox: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4RecipRefine: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
// Emulate with 2.0f - a * b
__ ldi_w(kSimd128ScratchReg, 2);
__ ffint_u_w(kSimd128ScratchReg, kSimd128ScratchReg);
__ fmul_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ fsub_w(dst, kSimd128ScratchReg, dst);
break;
}
case kMipsF32x4RecipSqrtApprox: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4RecipSqrtRefine: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
// Emulate with (3.0f - a * b) * 0.5f;
__ ldi_w(kSimd128ScratchReg, 3);
__ ffint_u_w(kSimd128ScratchReg, kSimd128ScratchReg);
__ fmul_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ fsub_w(dst, kSimd128ScratchReg, dst);
__ ldi_w(kSimd128ScratchReg, 0x3f);
__ slli_w(kSimd128ScratchReg, kSimd128ScratchReg, 24);
__ fmul_w(dst, dst, kSimd128ScratchReg);
break;
}
case kMipsF32x4Add: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Sub: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Mul: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Max: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Min: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Eq: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Ne: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Lt: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Le: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4SConvertF32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4UConvertF32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
}
return kSuccess;
} // NOLINT(readability/fn_size)

View File

@ -154,7 +154,24 @@ namespace compiler {
V(MipsI32x4ShrU) \
V(MipsI32x4MaxU) \
V(MipsI32x4MinU) \
V(MipsS32x4Select)
V(MipsS32x4Select) \
V(MipsF32x4Abs) \
V(MipsF32x4Neg) \
V(MipsF32x4RecipApprox) \
V(MipsF32x4RecipRefine) \
V(MipsF32x4RecipSqrtApprox) \
V(MipsF32x4RecipSqrtRefine) \
V(MipsF32x4Add) \
V(MipsF32x4Sub) \
V(MipsF32x4Mul) \
V(MipsF32x4Max) \
V(MipsF32x4Min) \
V(MipsF32x4Eq) \
V(MipsF32x4Ne) \
V(MipsF32x4Lt) \
V(MipsF32x4Le) \
V(MipsI32x4SConvertF32x4) \
V(MipsI32x4UConvertF32x4)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes

View File

@ -2035,6 +2035,74 @@ void InstructionSelector::VisitS32x4Select(Node* node) {
VisitRRRR(this, kMipsS32x4Select, node);
}
void InstructionSelector::VisitF32x4Abs(Node* node) {
VisitRR(this, kMipsF32x4Abs, node);
}
void InstructionSelector::VisitF32x4Neg(Node* node) {
VisitRR(this, kMipsF32x4Neg, node);
}
void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
VisitRR(this, kMipsF32x4RecipApprox, node);
}
void InstructionSelector::VisitF32x4RecipRefine(Node* node) {
VisitRRR(this, kMipsF32x4RecipRefine, node);
}
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
VisitRR(this, kMipsF32x4RecipSqrtApprox, node);
}
void InstructionSelector::VisitF32x4RecipSqrtRefine(Node* node) {
VisitRRR(this, kMipsF32x4RecipSqrtRefine, node);
}
void InstructionSelector::VisitF32x4Add(Node* node) {
VisitRRR(this, kMipsF32x4Add, node);
}
void InstructionSelector::VisitF32x4Sub(Node* node) {
VisitRRR(this, kMipsF32x4Sub, node);
}
void InstructionSelector::VisitF32x4Mul(Node* node) {
VisitRRR(this, kMipsF32x4Mul, node);
}
void InstructionSelector::VisitF32x4Max(Node* node) {
VisitRRR(this, kMipsF32x4Max, node);
}
void InstructionSelector::VisitF32x4Min(Node* node) {
VisitRRR(this, kMipsF32x4Min, node);
}
void InstructionSelector::VisitF32x4Eq(Node* node) {
VisitRRR(this, kMipsF32x4Eq, node);
}
void InstructionSelector::VisitF32x4Ne(Node* node) {
VisitRRR(this, kMipsF32x4Ne, node);
}
void InstructionSelector::VisitF32x4Lt(Node* node) {
VisitRRR(this, kMipsF32x4Lt, node);
}
void InstructionSelector::VisitF32x4Le(Node* node) {
VisitRRR(this, kMipsF32x4Le, node);
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
VisitRR(this, kMipsI32x4SConvertF32x4, node);
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
VisitRR(this, kMipsI32x4UConvertF32x4, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {

View File

@ -2097,6 +2097,113 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Abs: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4Neg: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4RecipApprox: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4RecipRefine: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
// Emulate with 2.0f - a * b
__ ldi_w(kSimd128ScratchReg, 2);
__ ffint_u_w(kSimd128ScratchReg, kSimd128ScratchReg);
__ fmul_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ fsub_w(dst, kSimd128ScratchReg, dst);
break;
}
case kMips64F32x4RecipSqrtApprox: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4RecipSqrtRefine: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
// Emulate with (3.0f - a * b) * 0.5f;
__ ldi_w(kSimd128ScratchReg, 3);
__ ffint_u_w(kSimd128ScratchReg, kSimd128ScratchReg);
__ fmul_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ fsub_w(dst, kSimd128ScratchReg, dst);
__ ldi_w(kSimd128ScratchReg, 0x3f);
__ slli_w(kSimd128ScratchReg, kSimd128ScratchReg, 24);
__ fmul_w(dst, dst, kSimd128ScratchReg);
break;
}
case kMips64F32x4Add: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Sub: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Mul: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Max: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Min: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Eq: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Ne: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Lt: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Le: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4UConvertF32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
}
return kSuccess;
} // NOLINT(readability/fn_size)

View File

@ -188,7 +188,24 @@ namespace compiler {
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
V(Mips64S32x4Select)
V(Mips64S32x4Select) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipRefine) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4RecipSqrtRefine) \
V(Mips64F32x4Add) \
V(Mips64F32x4Sub) \
V(Mips64F32x4Mul) \
V(Mips64F32x4Max) \
V(Mips64F32x4Min) \
V(Mips64F32x4Eq) \
V(Mips64F32x4Ne) \
V(Mips64F32x4Lt) \
V(Mips64F32x4Le) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes

View File

@ -2786,6 +2786,74 @@ void InstructionSelector::VisitS32x4Select(Node* node) {
VisitRRRR(this, kMips64S32x4Select, node);
}
void InstructionSelector::VisitF32x4Abs(Node* node) {
VisitRR(this, kMips64F32x4Abs, node);
}
void InstructionSelector::VisitF32x4Neg(Node* node) {
VisitRR(this, kMips64F32x4Neg, node);
}
void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
VisitRR(this, kMips64F32x4RecipApprox, node);
}
void InstructionSelector::VisitF32x4RecipRefine(Node* node) {
VisitRRR(this, kMips64F32x4RecipRefine, node);
}
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
VisitRR(this, kMips64F32x4RecipSqrtApprox, node);
}
void InstructionSelector::VisitF32x4RecipSqrtRefine(Node* node) {
VisitRRR(this, kMips64F32x4RecipSqrtRefine, node);
}
void InstructionSelector::VisitF32x4Add(Node* node) {
VisitRRR(this, kMips64F32x4Add, node);
}
void InstructionSelector::VisitF32x4Sub(Node* node) {
VisitRRR(this, kMips64F32x4Sub, node);
}
void InstructionSelector::VisitF32x4Mul(Node* node) {
VisitRRR(this, kMips64F32x4Mul, node);
}
void InstructionSelector::VisitF32x4Max(Node* node) {
VisitRRR(this, kMips64F32x4Max, node);
}
void InstructionSelector::VisitF32x4Min(Node* node) {
VisitRRR(this, kMips64F32x4Min, node);
}
void InstructionSelector::VisitF32x4Eq(Node* node) {
VisitRRR(this, kMips64F32x4Eq, node);
}
void InstructionSelector::VisitF32x4Ne(Node* node) {
VisitRRR(this, kMips64F32x4Ne, node);
}
void InstructionSelector::VisitF32x4Lt(Node* node) {
VisitRRR(this, kMips64F32x4Lt, node);
}
void InstructionSelector::VisitF32x4Le(Node* node) {
VisitRRR(this, kMips64F32x4Le, node);
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
VisitRR(this, kMips64I32x4SConvertF32x4, node);
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
VisitRR(this, kMips64I32x4UConvertF32x4, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {

View File

@ -356,6 +356,9 @@ constexpr DoubleRegister kLithiumScratchDouble = f30;
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips32r6 for compare operations.
constexpr DoubleRegister kDoubleCompareReg = f26;
// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
constexpr Simd128Register kSimd128RegZero = w28;
constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.

View File

@ -361,6 +361,9 @@ constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips64r6 for compare operations.
// We use the last non-callee saved odd register for N64 ABI
constexpr DoubleRegister kDoubleCompareReg = f23;
// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
constexpr Simd128Register kSimd128RegZero = w28;
constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.

View File

@ -75,7 +75,8 @@ T Maximum(T a, T b) {
}
// For float operands, Min and Max must return NaN if either operand is NaN.
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
template <>
float Minimum(float a, float b) {
if (std::isnan(a) || std::isnan(b))
@ -89,7 +90,8 @@ float Maximum(float a, float b) {
return std::numeric_limits<float>::quiet_NaN();
return a >= b ? a : b;
}
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
template <typename T>
T UnsignedMinimum(T a, T b) {
@ -485,7 +487,8 @@ WASM_EXEC_COMPILED_TEST(F32x4ConvertI32x4) {
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op,
float error = 0.0f) {
FLAG_wasm_simd_prototype = true;
@ -510,13 +513,14 @@ void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op,
WASM_EXEC_COMPILED_TEST(F32x4Abs) { RunF32x4UnOpTest(kExprF32x4Abs, std::abs); }
WASM_EXEC_COMPILED_TEST(F32x4Neg) { RunF32x4UnOpTest(kExprF32x4Neg, Negate); }
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(F32x4Sqrt) { RunF32x4UnOpTest(kExprF32x4Sqrt, Sqrt); }
#endif // SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
static const float kApproxError = 0.01f;
WASM_EXEC_COMPILED_TEST(F32x4RecipApprox) {
@ -526,9 +530,10 @@ WASM_EXEC_COMPILED_TEST(F32x4RecipApprox) {
WASM_EXEC_COMPILED_TEST(F32x4RecipSqrtApprox) {
RunF32x4UnOpTest(kExprF32x4RecipSqrtApprox, RecipSqrt, kApproxError);
}
#endif // V8_TARGET_ARCH_ARM
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
void RunF32x4BinOpTest(WasmOpcode simd_op, FloatBinOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, float, float, float> r(kExecuteCompiled);
@ -563,13 +568,14 @@ WASM_EXEC_COMPILED_TEST(F32x4_Min) {
WASM_EXEC_COMPILED_TEST(F32x4_Max) {
RunF32x4BinOpTest(kExprF32x4Max, Maximum);
}
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(F32x4Div) { RunF32x4BinOpTest(kExprF32x4Div, Div); }
#endif // SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(F32x4RecipRefine) {
RunF32x4BinOpTest(kExprF32x4RecipRefine, RecipRefine);
}
@ -577,9 +583,10 @@ WASM_EXEC_COMPILED_TEST(F32x4RecipRefine) {
WASM_EXEC_COMPILED_TEST(F32x4RecipSqrtRefine) {
RunF32x4BinOpTest(kExprF32x4RecipSqrtRefine, RecipSqrtRefine);
}
#endif // V8_TARGET_ARCH_ARM
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
void RunF32x4CompareOpTest(WasmOpcode simd_op, FloatCompareOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, float, float, int32_t> r(kExecuteCompiled);
@ -626,7 +633,8 @@ WASM_EXEC_COMPILED_TEST(F32x4Lt) { RunF32x4CompareOpTest(kExprF32x4Lt, Less); }
WASM_EXEC_COMPILED_TEST(F32x4Le) {
RunF32x4CompareOpTest(kExprF32x4Le, LessEqual);
}
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(I32x4Splat) {
FLAG_wasm_simd_prototype = true;
@ -862,7 +870,8 @@ WASM_EXEC_COMPILED_TEST(I8x16ReplaceLane) {
}
#endif // V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Determines if conversion from float to int will be valid.
bool CanRoundToZeroAndConvert(double val, bool unsigned_integer) {
const double max_uint = static_cast<double>(0xffffffffu);
@ -928,6 +937,8 @@ WASM_EXEC_COMPILED_TEST(I32x4ConvertF32x4) {
CHECK_EQ(1, r.Call(*i, signed_value, unsigned_value));
}
}
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM
// Tests both signed and unsigned conversion from I16x8 (unpacking).
@ -956,6 +967,7 @@ WASM_EXEC_COMPILED_TEST(I32x4ConvertI16x8) {
}
#endif // V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
void RunI32x4UnOpTest(WasmOpcode simd_op, Int32UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);