diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index f215a487dd..ccd77141b4 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -1792,6 +1792,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0); break; } + SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D); + SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D); case kArm64F32x4Splat: { __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0); break; diff --git a/src/compiler/backend/arm64/instruction-codes-arm64.h b/src/compiler/backend/arm64/instruction-codes-arm64.h index 6b1e4a84de..15da0a63d9 100644 --- a/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -173,6 +173,8 @@ namespace compiler { V(Arm64F64x2Splat) \ V(Arm64F64x2ExtractLane) \ V(Arm64F64x2ReplaceLane) \ + V(Arm64F64x2Abs) \ + V(Arm64F64x2Neg) \ V(Arm64F32x4Splat) \ V(Arm64F32x4ExtractLane) \ V(Arm64F32x4ReplaceLane) \ diff --git a/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 0f67e6a4de..3655ce74ae 100644 --- a/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -140,6 +140,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F64x2Splat: case kArm64F64x2ExtractLane: case kArm64F64x2ReplaceLane: + case kArm64F64x2Abs: + case kArm64F64x2Neg: case kArm64F32x4Splat: case kArm64F32x4ExtractLane: case kArm64F32x4ReplaceLane: diff --git a/src/compiler/backend/arm64/instruction-selector-arm64.cc b/src/compiler/backend/arm64/instruction-selector-arm64.cc index c528a65bcb..903a44429c 100644 --- a/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -3055,6 +3055,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(I8x16) #define SIMD_UNOP_LIST(V) \ + V(F64x2Abs, kArm64F64x2Abs) \ + V(F64x2Neg, kArm64F64x2Neg) \ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \ V(F32x4Abs, kArm64F32x4Abs) \ diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index d8260059f1..d99837aa8b 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -2600,8 +2600,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 #if !V8_TARGET_ARCH_X64 -void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); } @@ -2616,6 +2614,8 @@ void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } diff --git a/test/cctest/wasm/test-run-wasm-simd.cc b/test/cctest/wasm/test-run-wasm-simd.cc index 53c33768eb..b1bbc109ea 100644 --- a/test/cctest/wasm/test-run-wasm-simd.cc +++ b/test/cctest/wasm/test-run-wasm-simd.cc @@ -988,62 +988,6 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) { CHECK_EQ(static_cast(i), ReadLittleEndianValue(&g[i])); } } -#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 - -#if V8_TARGET_ARCH_X64 -void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd, - WasmOpcode opcode, DoubleCompareOp expected_op) { - WasmRunner r(execution_tier, lower_simd); - // Set up global to hold mask output. - int64_t* g = r.builder().AddGlobal(kWasmS128); - // Build fn to splat test values, perform compare op, and write the result. - byte value1 = 0, value2 = 1; - byte temp1 = r.AllocateLocal(kWasmS128); - byte temp2 = r.AllocateLocal(kWasmS128); - BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))), - WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))), - WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1), - WASM_GET_LOCAL(temp2))), - WASM_ONE); - - FOR_FLOAT64_INPUTS(x) { - if (!PlatformCanRepresent(x)) continue; - FOR_FLOAT64_INPUTS(y) { - if (!PlatformCanRepresent(y)) continue; - double diff = x - y; // Model comparison as subtraction. - if (!PlatformCanRepresent(diff)) continue; - r.Call(x, y); - int64_t expected = expected_op(x, y); - for (int i = 0; i < 2; i++) { - CHECK_EQ(expected, ReadLittleEndianValue(&g[i])); - } - } - } -} - -WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) { - RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal); -} - -WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) { - RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual); -} - -WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) { - RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater); -} - -WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) { - RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual); -} - -WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) { - RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less); -} - -WASM_SIMD_TEST_NO_LOWERING(F64x2Le) { - RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual); -} bool IsExtreme(double x) { double abs_x = std::fabs(x); @@ -1166,6 +1110,62 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Abs) { WASM_SIMD_TEST_NO_LOWERING(F64x2Neg) { RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate); } +#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 + +#if V8_TARGET_ARCH_X64 +void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd, + WasmOpcode opcode, DoubleCompareOp expected_op) { + WasmRunner r(execution_tier, lower_simd); + // Set up global to hold mask output. + int64_t* g = r.builder().AddGlobal(kWasmS128); + // Build fn to splat test values, perform compare op, and write the result. + byte value1 = 0, value2 = 1; + byte temp1 = r.AllocateLocal(kWasmS128); + byte temp2 = r.AllocateLocal(kWasmS128); + BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))), + WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))), + WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1), + WASM_GET_LOCAL(temp2))), + WASM_ONE); + + FOR_FLOAT64_INPUTS(x) { + if (!PlatformCanRepresent(x)) continue; + FOR_FLOAT64_INPUTS(y) { + if (!PlatformCanRepresent(y)) continue; + double diff = x - y; // Model comparison as subtraction. + if (!PlatformCanRepresent(diff)) continue; + r.Call(x, y); + int64_t expected = expected_op(x, y); + for (int i = 0; i < 2; i++) { + CHECK_EQ(expected, ReadLittleEndianValue(&g[i])); + } + } + } +} + +WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) { + RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal); +} + +WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) { + RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual); +} + +WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) { + RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater); +} + +WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) { + RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual); +} + +WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) { + RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less); +} + +WASM_SIMD_TEST_NO_LOWERING(F64x2Le) { + RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual); +} void RunF64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode opcode, DoubleBinOp expected_op) {