diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h index e0759c3c9a..c99331c0a8 100644 --- a/src/arm/assembler-arm-inl.h +++ b/src/arm/assembler-arm-inl.h @@ -48,7 +48,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return true; } +bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); } int DoubleRegister::NumRegisters() { return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h index b471380a10..a87d94ef34 100644 --- a/src/arm64/assembler-arm64-inl.h +++ b/src/arm64/assembler-arm64-inl.h @@ -16,7 +16,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return false; } void RelocInfo::apply(intptr_t delta) { // On arm64 only internal references need extra work. diff --git a/src/assembler.h b/src/assembler.h index d744d89ab4..c240cc1833 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -241,7 +241,7 @@ class CpuFeatures : public AllStatic { static inline bool SupportsCrankshaft(); - static inline bool SupportsSimd128(); + static inline bool SupportsWasmSimd128(); static inline unsigned icache_line_size() { DCHECK(icache_line_size_ != 0); diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc index 0d3848a012..dcee0ce84a 100644 --- a/src/compiler/instruction-selector.cc +++ b/src/compiler/instruction-selector.cc @@ -2054,6 +2054,32 @@ void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); } #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM +void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) { + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) { + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) { + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitSimd128Zero(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitSimd1x4Zero(Node* node) { UNIMPLEMENTED(); } @@ -2136,38 +2162,12 @@ void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) { void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) { - UNIMPLEMENTED(); -} - -void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) { - UNIMPLEMENTED(); -} - -void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); } - void InstructionSelector::VisitInt32x4LessThan(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitInt32x4LessThanOrEqual(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) { - UNIMPLEMENTED(); -} - -void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); } - void InstructionSelector::VisitUint32x4LessThan(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitUint32x4LessThanOrEqual(Node* node) { @@ -2321,9 +2321,13 @@ void InstructionSelector::VisitSimd128Or(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitSimd128Xor(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitSimd128Not(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_ARM +#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM +#if !V8_TARGET_ARCH_ARM void InstructionSelector::VisitSimd16x8Select(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitSimd8x16Select(Node* node) { UNIMPLEMENTED(); } diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 836bdb0cc2..ddad169e16 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -4050,7 +4050,7 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction( .LowerGraph(); } - if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) { + if (builder.has_simd() && !CpuFeatures::SupportsWasmSimd128()) { SimdScalarLowering(jsgraph_, func_body_.sig).LowerGraph(); } diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc index fb5fbd6cbe..ab994a32e4 100644 --- a/src/compiler/x64/code-generator-x64.cc +++ b/src/compiler/x64/code-generator-x64.cc @@ -2143,10 +2143,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Int32x4Splat: { - CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); - __ Movd(dst, i.InputRegister(0)); - __ shufps(dst, dst, 0x0); + __ movd(dst, i.InputRegister(0)); + __ pshufd(dst, dst, 0x0); break; } case kX64Int32x4ExtractLane: { @@ -2165,17 +2164,70 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Int32x4Add: { - CpuFeatureScope sse_scope(masm(), SSE4_1); __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64Int32x4Sub: { - CpuFeatureScope sse_scope(masm(), SSE4_1); __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } - case kX64Simd128Zero: { + case kX64Int32x4Mul: { CpuFeatureScope sse_scope(masm(), SSE4_1); + __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64Int32x4Min: { + CpuFeatureScope sse_scope(masm(), SSE4_1); + __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64Int32x4Max: { + CpuFeatureScope sse_scope(masm(), SSE4_1); + __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64Uint32x4Min: { + CpuFeatureScope sse_scope(masm(), SSE4_1); + __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64Uint32x4Max: { + CpuFeatureScope sse_scope(masm(), SSE4_1); + __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64Int32x4Equal: { + __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64Int32x4NotEqual: { + __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + break; + } + case kX64Int32x4ShiftLeftByScalar: { + __ pslld(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64Int32x4ShiftRightByScalar: { + __ psrad(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64Uint32x4ShiftRightByScalar: { + __ psrld(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64Simd32x4Select: { + // Mask used here is stored in dst. + XMMRegister dst = i.OutputSimd128Register(); + __ movaps(kScratchDoubleReg, i.InputSimd128Register(1)); + __ xorps(kScratchDoubleReg, i.InputSimd128Register(2)); + __ andps(dst, kScratchDoubleReg); + __ xorps(dst, i.InputSimd128Register(2)); + break; + } + case kX64Simd128Zero: { XMMRegister dst = i.OutputSimd128Register(); __ xorps(dst, dst); break; diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h index a93e94e0e4..ba4a1ff96f 100644 --- a/src/compiler/x64/instruction-codes-x64.h +++ b/src/compiler/x64/instruction-codes-x64.h @@ -148,6 +148,17 @@ namespace compiler { V(X64Int32x4ReplaceLane) \ V(X64Int32x4Add) \ V(X64Int32x4Sub) \ + V(X64Int32x4Mul) \ + V(X64Int32x4Min) \ + V(X64Int32x4Max) \ + V(X64Int32x4Equal) \ + V(X64Int32x4NotEqual) \ + V(X64Int32x4ShiftLeftByScalar) \ + V(X64Int32x4ShiftRightByScalar) \ + V(X64Uint32x4ShiftRightByScalar) \ + V(X64Uint32x4Min) \ + V(X64Uint32x4Max) \ + V(X64Simd32x4Select) \ V(X64Simd128Zero) // Addressing modes represent the "shape" of inputs to an instruction. diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc index fae8d3406c..dfb3d6c070 100644 --- a/src/compiler/x64/instruction-scheduler-x64.cc +++ b/src/compiler/x64/instruction-scheduler-x64.cc @@ -128,6 +128,17 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64Int32x4ReplaceLane: case kX64Int32x4Add: case kX64Int32x4Sub: + case kX64Int32x4Mul: + case kX64Int32x4Min: + case kX64Int32x4Max: + case kX64Int32x4Equal: + case kX64Int32x4NotEqual: + case kX64Int32x4ShiftLeftByScalar: + case kX64Int32x4ShiftRightByScalar: + case kX64Uint32x4ShiftRightByScalar: + case kX64Uint32x4Min: + case kX64Uint32x4Max: + case kX64Simd32x4Select: case kX64Simd128Zero: return (instr->addressing_mode() == kMode_None) ? kNoOpcodeFlags diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc index 785437bc3c..b9ca427da2 100644 --- a/src/compiler/x64/instruction-selector-x64.cc +++ b/src/compiler/x64/instruction-selector-x64.cc @@ -2339,56 +2339,92 @@ void InstructionSelector::VisitAtomicExchange(Node* node) { Emit(code, 1, outputs, input_count, inputs); } -void InstructionSelector::VisitInt32x4Splat(Node* node) { - X64OperandGenerator g(this); - Emit(kX64Int32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0))); -} +#define SIMD_TYPES(V) V(Int32x4) -void InstructionSelector::VisitInt32x4ExtractLane(Node* node) { - X64OperandGenerator g(this); - int32_t lane = OpParameter(node); - Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); -} +#define SIMD_ZERO_OP_LIST(V) \ + V(Simd128Zero) \ + V(Simd1x4Zero) \ + V(Simd1x8Zero) \ + V(Simd1x16Zero) -void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) { - X64OperandGenerator g(this); - int32_t lane = OpParameter(node); - Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), - g.Use(node->InputAt(1))); -} +#define SIMD_BINOP_LIST(V) \ + V(Int32x4Add) \ + V(Int32x4Sub) \ + V(Int32x4Mul) \ + V(Int32x4Min) \ + V(Int32x4Max) \ + V(Int32x4Equal) \ + V(Int32x4NotEqual) \ + V(Uint32x4Min) \ + V(Uint32x4Max) -void InstructionSelector::VisitInt32x4Add(Node* node) { - X64OperandGenerator g(this); - Emit(kX64Int32x4Add, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -} +#define SIMD_SHIFT_OPCODES(V) \ + V(Int32x4ShiftLeftByScalar) \ + V(Int32x4ShiftRightByScalar) \ + V(Uint32x4ShiftRightByScalar) -void InstructionSelector::VisitInt32x4Sub(Node* node) { - X64OperandGenerator g(this); - Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -} +#define VISIT_SIMD_SPLAT(Type) \ + void InstructionSelector::Visit##Type##Splat(Node* node) { \ + X64OperandGenerator g(this); \ + Emit(kX64##Type##Splat, g.DefineAsRegister(node), \ + g.Use(node->InputAt(0))); \ + } +SIMD_TYPES(VISIT_SIMD_SPLAT) +#undef VISIT_SIMD_SPLAT -void InstructionSelector::VisitSimd128Zero(Node* node) { - X64OperandGenerator g(this); - Emit(kX64Simd128Zero, g.DefineSameAsFirst(node)); -} +#define VISIT_SIMD_EXTRACT_LANE(Type) \ + void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ + X64OperandGenerator g(this); \ + int32_t lane = OpParameter(node); \ + Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \ + g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \ + } +SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE) +#undef VISIT_SIMD_EXTRACT_LANE -void InstructionSelector::VisitSimd1x4Zero(Node* node) { - X64OperandGenerator g(this); - Emit(kX64Simd128Zero, g.DefineSameAsFirst(node)); -} +#define VISIT_SIMD_REPLACE_LANE(Type) \ + void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ + X64OperandGenerator g(this); \ + int32_t lane = OpParameter(node); \ + Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \ + g.Use(node->InputAt(1))); \ + } +SIMD_TYPES(VISIT_SIMD_REPLACE_LANE) +#undef VISIT_SIMD_REPLACE_LANE -void InstructionSelector::VisitSimd1x8Zero(Node* node) { - X64OperandGenerator g(this); - Emit(kX64Simd128Zero, g.DefineSameAsFirst(node)); -} +#define SIMD_VISIT_ZERO_OP(Name) \ + void InstructionSelector::Visit##Name(Node* node) { \ + X64OperandGenerator g(this); \ + Emit(kX64Simd128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \ + } +SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP) +#undef SIMD_VISIT_ZERO_OP -void InstructionSelector::VisitSimd1x16Zero(Node* node) { +#define VISIT_SIMD_BINOP(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \ + } +SIMD_BINOP_LIST(VISIT_SIMD_BINOP) +#undef VISIT_SIMD_BINOP + +#define VISIT_SIMD_SHIFT(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + int32_t value = OpParameter(node); \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \ + } +SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT) +#undef VISIT_SIMD_SHIFT + +void InstructionSelector::VisitSimd32x4Select(Node* node) { X64OperandGenerator g(this); - Emit(kX64Simd128Zero, g.DefineSameAsFirst(node)); + Emit(kX64Simd32x4Select, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), + g.UseRegister(node->InputAt(2))); } // static diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h index 5225229f20..d8a107b113 100644 --- a/src/ia32/assembler-ia32-inl.h +++ b/src/ia32/assembler-ia32-inl.h @@ -48,7 +48,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return true; } +bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); } static const byte kCallOpcode = 0xE8; static const int kNoCodeAgeSequenceLength = 5; diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h index 65f797d805..b06d7ce437 100644 --- a/src/mips/assembler-mips-inl.h +++ b/src/mips/assembler-mips-inl.h @@ -49,7 +49,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); } -bool CpuFeatures::SupportsSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return false; } // ----------------------------------------------------------------------------- // Operand and MemOperand. diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h index 2a6da259dc..54998d58e8 100644 --- a/src/mips64/assembler-mips64-inl.h +++ b/src/mips64/assembler-mips64-inl.h @@ -49,7 +49,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); } -bool CpuFeatures::SupportsSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return false; } // ----------------------------------------------------------------------------- // Operand and MemOperand. diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h index 8008348713..f20cbb4ece 100644 --- a/src/ppc/assembler-ppc-inl.h +++ b/src/ppc/assembler-ppc-inl.h @@ -49,7 +49,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return false; } void RelocInfo::apply(intptr_t delta) { // absolute code pointer inside code object moves with the code object. diff --git a/src/s390/assembler-s390-inl.h b/src/s390/assembler-s390-inl.h index 1990d6787d..3e7ff10611 100644 --- a/src/s390/assembler-s390-inl.h +++ b/src/s390/assembler-s390-inl.h @@ -48,7 +48,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return false; } void RelocInfo::apply(intptr_t delta) { // Absolute code pointer inside code object moves with the code object. diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h index e8d8431e8f..2760df7343 100644 --- a/src/x64/assembler-x64-inl.h +++ b/src/x64/assembler-x64-inl.h @@ -17,7 +17,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return true; } +bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); } // ----------------------------------------------------------------------------- // Implementation of Assembler diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h index 8b2510bb3a..357c2c8fbe 100644 --- a/src/x87/assembler-x87-inl.h +++ b/src/x87/assembler-x87-inl.h @@ -48,7 +48,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return false; } static const byte kCallOpcode = 0xE8; static const int kNoCodeAgeSequenceLength = 5; diff --git a/test/cctest/wasm/test-run-wasm-simd.cc b/test/cctest/wasm/test-run-wasm-simd.cc index e9500c21de..35bea95a7a 100644 --- a/test/cctest/wasm/test-run-wasm-simd.cc +++ b/test/cctest/wasm/test-run-wasm-simd.cc @@ -956,15 +956,19 @@ WASM_EXEC_COMPILED_TEST(I32x4Add) { RunI32x4BinOpTest(kExprI32x4Add, Add); } WASM_EXEC_COMPILED_TEST(I32x4Sub) { RunI32x4BinOpTest(kExprI32x4Sub, Sub); } -#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET WASM_EXEC_COMPILED_TEST(I32x4Mul) { RunI32x4BinOpTest(kExprI32x4Mul, Mul); } +#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET +#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET WASM_EXEC_COMPILED_TEST(S128And) { RunI32x4BinOpTest(kExprS128And, And); } WASM_EXEC_COMPILED_TEST(S128Or) { RunI32x4BinOpTest(kExprS128Or, Or); } WASM_EXEC_COMPILED_TEST(S128Xor) { RunI32x4BinOpTest(kExprS128Xor, Xor); } +#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET WASM_EXEC_COMPILED_TEST(I32x4Min) { RunI32x4BinOpTest(kExprI32x4MinS, Minimum); } @@ -1007,7 +1011,9 @@ WASM_EXEC_COMPILED_TEST(I32x4Eq) { RunI32x4CompareOpTest(kExprI32x4Eq, Equal); } WASM_EXEC_COMPILED_TEST(I32x4Ne) { RunI32x4CompareOpTest(kExprI32x4Ne, NotEqual); } +#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET +#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET WASM_EXEC_COMPILED_TEST(I32x4LtS) { RunI32x4CompareOpTest(kExprI32x4LtS, Less); } @@ -1039,7 +1045,9 @@ WASM_EXEC_COMPILED_TEST(I32x4GtU) { WASM_EXEC_COMPILED_TEST(I32x4GeU) { RunI32x4CompareOpTest(kExprI32x4GeU, UnsignedGreaterEqual); } +#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET void RunI32x4ShiftOpTest(WasmOpcode simd_op, Int32ShiftOp expected_op, int shift) { FLAG_wasm_simd_prototype = true; @@ -1066,10 +1074,9 @@ WASM_EXEC_COMPILED_TEST(I32x4ShrS) { WASM_EXEC_COMPILED_TEST(I32x4ShrU) { RunI32x4ShiftOpTest(kExprI32x4ShrU, LogicalShiftRight, 1); } -#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET +#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET #if V8_TARGET_ARCH_ARM - void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) { FLAG_wasm_simd_prototype = true; WasmRunner r(kExecuteCompiled); @@ -1385,7 +1392,9 @@ WASM_EXEC_COMPILED_TEST(I8x16ShrS) { WASM_EXEC_COMPILED_TEST(I8x16ShrU) { RunI8x16ShiftOpTest(kExprI8x16ShrU, LogicalShiftRight, 1); } +#endif // V8_TARGET_ARCH_ARM +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 // Test Select by making a mask where the first two lanes are true and the rest // false, and comparing for non-equality with zero to materialize a bool vector. #define WASM_SIMD_SELECT_TEST(format) \ @@ -1422,6 +1431,9 @@ WASM_EXEC_COMPILED_TEST(I8x16ShrU) { } WASM_SIMD_SELECT_TEST(32x4) +#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 + +#if V8_TARGET_ARCH_ARM WASM_SIMD_SELECT_TEST(16x8) WASM_SIMD_SELECT_TEST(8x16) diff --git a/test/cctest/wasm/wasm-run-utils.h b/test/cctest/wasm/wasm-run-utils.h index b068d363c3..741dc94bd5 100644 --- a/test/cctest/wasm/wasm-run-utils.h +++ b/test/cctest/wasm/wasm-run-utils.h @@ -399,7 +399,7 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module, FATAL(str.str().c_str()); } builder.Int64LoweringForTesting(); - if (!CpuFeatures::SupportsSimd128()) { + if (!CpuFeatures::SupportsWasmSimd128()) { builder.SimdScalarLoweringForTesting(); } }