Add Int32x4 Wasm Simd Binops, compare ops, select
- Added: Int32x4Mul, Int32x4Min, Int32x4Max, Int32x4Equal, Int32x4NotEqual Uint32x4Min, Uint32x4Max - Fix I32x4Splat R=bbudge@chromium.org, bradnelson@chromium.org, mtrofin@chromium.org Review-Url: https://codereview.chromium.org/2719953002 Cr-Commit-Position: refs/heads/master@{#43827}
This commit is contained in:
parent
387e2aca5e
commit
16796914cb
src
test/cctest/wasm
@ -48,7 +48,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return true; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
|
||||
|
||||
int DoubleRegister::NumRegisters() {
|
||||
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
|
||||
|
@ -16,7 +16,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return false; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return false; }
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
// On arm64 only internal references need extra work.
|
||||
|
@ -241,7 +241,7 @@ class CpuFeatures : public AllStatic {
|
||||
|
||||
static inline bool SupportsCrankshaft();
|
||||
|
||||
static inline bool SupportsSimd128();
|
||||
static inline bool SupportsWasmSimd128();
|
||||
|
||||
static inline unsigned icache_line_size() {
|
||||
DCHECK(icache_line_size_ != 0);
|
||||
|
@ -2054,6 +2054,32 @@ void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitSimd128Zero(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitSimd1x4Zero(Node* node) { UNIMPLEMENTED(); }
|
||||
@ -2136,38 +2162,12 @@ void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
|
||||
|
||||
void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4LessThan(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32x4LessThanOrEqual(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitUint32x4LessThan(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitUint32x4LessThanOrEqual(Node* node) {
|
||||
@ -2321,9 +2321,13 @@ void InstructionSelector::VisitSimd128Or(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitSimd128Xor(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitSimd128Not(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_ARM
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
|
||||
|
||||
#if !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitSimd16x8Select(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitSimd8x16Select(Node* node) { UNIMPLEMENTED(); }
|
||||
|
@ -4050,7 +4050,7 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
|
||||
.LowerGraph();
|
||||
}
|
||||
|
||||
if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) {
|
||||
if (builder.has_simd() && !CpuFeatures::SupportsWasmSimd128()) {
|
||||
SimdScalarLowering(jsgraph_, func_body_.sig).LowerGraph();
|
||||
}
|
||||
|
||||
|
@ -2143,10 +2143,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4Splat: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
__ Movd(dst, i.InputRegister(0));
|
||||
__ shufps(dst, dst, 0x0);
|
||||
__ movd(dst, i.InputRegister(0));
|
||||
__ pshufd(dst, dst, 0x0);
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4ExtractLane: {
|
||||
@ -2165,17 +2164,70 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4Add: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4Sub: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Simd128Zero: {
|
||||
case kX64Int32x4Mul: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4Min: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4Max: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Uint32x4Min: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Uint32x4Max: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4Equal: {
|
||||
__ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4NotEqual: {
|
||||
__ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4ShiftLeftByScalar: {
|
||||
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
|
||||
break;
|
||||
}
|
||||
case kX64Int32x4ShiftRightByScalar: {
|
||||
__ psrad(i.OutputSimd128Register(), i.InputInt8(1));
|
||||
break;
|
||||
}
|
||||
case kX64Uint32x4ShiftRightByScalar: {
|
||||
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
|
||||
break;
|
||||
}
|
||||
case kX64Simd32x4Select: {
|
||||
// Mask used here is stored in dst.
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
__ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
|
||||
__ xorps(kScratchDoubleReg, i.InputSimd128Register(2));
|
||||
__ andps(dst, kScratchDoubleReg);
|
||||
__ xorps(dst, i.InputSimd128Register(2));
|
||||
break;
|
||||
}
|
||||
case kX64Simd128Zero: {
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
__ xorps(dst, dst);
|
||||
break;
|
||||
|
@ -148,6 +148,17 @@ namespace compiler {
|
||||
V(X64Int32x4ReplaceLane) \
|
||||
V(X64Int32x4Add) \
|
||||
V(X64Int32x4Sub) \
|
||||
V(X64Int32x4Mul) \
|
||||
V(X64Int32x4Min) \
|
||||
V(X64Int32x4Max) \
|
||||
V(X64Int32x4Equal) \
|
||||
V(X64Int32x4NotEqual) \
|
||||
V(X64Int32x4ShiftLeftByScalar) \
|
||||
V(X64Int32x4ShiftRightByScalar) \
|
||||
V(X64Uint32x4ShiftRightByScalar) \
|
||||
V(X64Uint32x4Min) \
|
||||
V(X64Uint32x4Max) \
|
||||
V(X64Simd32x4Select) \
|
||||
V(X64Simd128Zero)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
|
@ -128,6 +128,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kX64Int32x4ReplaceLane:
|
||||
case kX64Int32x4Add:
|
||||
case kX64Int32x4Sub:
|
||||
case kX64Int32x4Mul:
|
||||
case kX64Int32x4Min:
|
||||
case kX64Int32x4Max:
|
||||
case kX64Int32x4Equal:
|
||||
case kX64Int32x4NotEqual:
|
||||
case kX64Int32x4ShiftLeftByScalar:
|
||||
case kX64Int32x4ShiftRightByScalar:
|
||||
case kX64Uint32x4ShiftRightByScalar:
|
||||
case kX64Uint32x4Min:
|
||||
case kX64Uint32x4Max:
|
||||
case kX64Simd32x4Select:
|
||||
case kX64Simd128Zero:
|
||||
return (instr->addressing_mode() == kMode_None)
|
||||
? kNoOpcodeFlags
|
||||
|
@ -2339,56 +2339,92 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Emit(code, 1, outputs, input_count, inputs);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitInt32x4Splat(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Int32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
||||
}
|
||||
#define SIMD_TYPES(V) V(Int32x4)
|
||||
|
||||
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
int32_t lane = OpParameter<int32_t>(node);
|
||||
Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
|
||||
}
|
||||
#define SIMD_ZERO_OP_LIST(V) \
|
||||
V(Simd128Zero) \
|
||||
V(Simd1x4Zero) \
|
||||
V(Simd1x8Zero) \
|
||||
V(Simd1x16Zero)
|
||||
|
||||
void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
int32_t lane = OpParameter<int32_t>(node);
|
||||
Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
|
||||
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
|
||||
g.Use(node->InputAt(1)));
|
||||
}
|
||||
#define SIMD_BINOP_LIST(V) \
|
||||
V(Int32x4Add) \
|
||||
V(Int32x4Sub) \
|
||||
V(Int32x4Mul) \
|
||||
V(Int32x4Min) \
|
||||
V(Int32x4Max) \
|
||||
V(Int32x4Equal) \
|
||||
V(Int32x4NotEqual) \
|
||||
V(Uint32x4Min) \
|
||||
V(Uint32x4Max)
|
||||
|
||||
void InstructionSelector::VisitInt32x4Add(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
|
||||
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
|
||||
}
|
||||
#define SIMD_SHIFT_OPCODES(V) \
|
||||
V(Int32x4ShiftLeftByScalar) \
|
||||
V(Int32x4ShiftRightByScalar) \
|
||||
V(Uint32x4ShiftRightByScalar)
|
||||
|
||||
void InstructionSelector::VisitInt32x4Sub(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
|
||||
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
|
||||
}
|
||||
#define VISIT_SIMD_SPLAT(Type) \
|
||||
void InstructionSelector::Visit##Type##Splat(Node* node) { \
|
||||
X64OperandGenerator g(this); \
|
||||
Emit(kX64##Type##Splat, g.DefineAsRegister(node), \
|
||||
g.Use(node->InputAt(0))); \
|
||||
}
|
||||
SIMD_TYPES(VISIT_SIMD_SPLAT)
|
||||
#undef VISIT_SIMD_SPLAT
|
||||
|
||||
void InstructionSelector::VisitSimd128Zero(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Simd128Zero, g.DefineSameAsFirst(node));
|
||||
}
|
||||
#define VISIT_SIMD_EXTRACT_LANE(Type) \
|
||||
void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
|
||||
X64OperandGenerator g(this); \
|
||||
int32_t lane = OpParameter<int32_t>(node); \
|
||||
Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \
|
||||
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
|
||||
}
|
||||
SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
|
||||
#undef VISIT_SIMD_EXTRACT_LANE
|
||||
|
||||
void InstructionSelector::VisitSimd1x4Zero(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Simd128Zero, g.DefineSameAsFirst(node));
|
||||
}
|
||||
#define VISIT_SIMD_REPLACE_LANE(Type) \
|
||||
void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
|
||||
X64OperandGenerator g(this); \
|
||||
int32_t lane = OpParameter<int32_t>(node); \
|
||||
Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
|
||||
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
|
||||
g.Use(node->InputAt(1))); \
|
||||
}
|
||||
SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
|
||||
#undef VISIT_SIMD_REPLACE_LANE
|
||||
|
||||
void InstructionSelector::VisitSimd1x8Zero(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Simd128Zero, g.DefineSameAsFirst(node));
|
||||
}
|
||||
#define SIMD_VISIT_ZERO_OP(Name) \
|
||||
void InstructionSelector::Visit##Name(Node* node) { \
|
||||
X64OperandGenerator g(this); \
|
||||
Emit(kX64Simd128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
|
||||
}
|
||||
SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
|
||||
#undef SIMD_VISIT_ZERO_OP
|
||||
|
||||
void InstructionSelector::VisitSimd1x16Zero(Node* node) {
|
||||
#define VISIT_SIMD_BINOP(Opcode) \
|
||||
void InstructionSelector::Visit##Opcode(Node* node) { \
|
||||
X64OperandGenerator g(this); \
|
||||
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
|
||||
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
|
||||
}
|
||||
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
|
||||
#undef VISIT_SIMD_BINOP
|
||||
|
||||
#define VISIT_SIMD_SHIFT(Opcode) \
|
||||
void InstructionSelector::Visit##Opcode(Node* node) { \
|
||||
X64OperandGenerator g(this); \
|
||||
int32_t value = OpParameter<int32_t>(node); \
|
||||
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
|
||||
g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \
|
||||
}
|
||||
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
|
||||
#undef VISIT_SIMD_SHIFT
|
||||
|
||||
void InstructionSelector::VisitSimd32x4Select(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
Emit(kX64Simd128Zero, g.DefineSameAsFirst(node));
|
||||
Emit(kX64Simd32x4Select, g.DefineSameAsFirst(node),
|
||||
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
|
||||
g.UseRegister(node->InputAt(2)));
|
||||
}
|
||||
|
||||
// static
|
||||
|
@ -48,7 +48,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return true; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
|
||||
|
||||
static const byte kCallOpcode = 0xE8;
|
||||
static const int kNoCodeAgeSequenceLength = 5;
|
||||
|
@ -49,7 +49,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return false; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return false; }
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operand and MemOperand.
|
||||
|
@ -49,7 +49,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return false; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return false; }
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operand and MemOperand.
|
||||
|
@ -49,7 +49,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return false; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return false; }
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
// absolute code pointer inside code object moves with the code object.
|
||||
|
@ -48,7 +48,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return false; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return false; }
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
// Absolute code pointer inside code object moves with the code object.
|
||||
|
@ -17,7 +17,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return true; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of Assembler
|
||||
|
@ -48,7 +48,7 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsCrankshaft() { return true; }
|
||||
|
||||
bool CpuFeatures::SupportsSimd128() { return false; }
|
||||
bool CpuFeatures::SupportsWasmSimd128() { return false; }
|
||||
|
||||
static const byte kCallOpcode = 0xE8;
|
||||
static const int kNoCodeAgeSequenceLength = 5;
|
||||
|
@ -956,15 +956,19 @@ WASM_EXEC_COMPILED_TEST(I32x4Add) { RunI32x4BinOpTest(kExprI32x4Add, Add); }
|
||||
|
||||
WASM_EXEC_COMPILED_TEST(I32x4Sub) { RunI32x4BinOpTest(kExprI32x4Sub, Sub); }
|
||||
|
||||
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
|
||||
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
|
||||
WASM_EXEC_COMPILED_TEST(I32x4Mul) { RunI32x4BinOpTest(kExprI32x4Mul, Mul); }
|
||||
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
|
||||
|
||||
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
|
||||
WASM_EXEC_COMPILED_TEST(S128And) { RunI32x4BinOpTest(kExprS128And, And); }
|
||||
|
||||
WASM_EXEC_COMPILED_TEST(S128Or) { RunI32x4BinOpTest(kExprS128Or, Or); }
|
||||
|
||||
WASM_EXEC_COMPILED_TEST(S128Xor) { RunI32x4BinOpTest(kExprS128Xor, Xor); }
|
||||
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
|
||||
|
||||
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
|
||||
WASM_EXEC_COMPILED_TEST(I32x4Min) {
|
||||
RunI32x4BinOpTest(kExprI32x4MinS, Minimum);
|
||||
}
|
||||
@ -1007,7 +1011,9 @@ WASM_EXEC_COMPILED_TEST(I32x4Eq) { RunI32x4CompareOpTest(kExprI32x4Eq, Equal); }
|
||||
WASM_EXEC_COMPILED_TEST(I32x4Ne) {
|
||||
RunI32x4CompareOpTest(kExprI32x4Ne, NotEqual);
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
|
||||
|
||||
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
|
||||
WASM_EXEC_COMPILED_TEST(I32x4LtS) {
|
||||
RunI32x4CompareOpTest(kExprI32x4LtS, Less);
|
||||
}
|
||||
@ -1039,7 +1045,9 @@ WASM_EXEC_COMPILED_TEST(I32x4GtU) {
|
||||
WASM_EXEC_COMPILED_TEST(I32x4GeU) {
|
||||
RunI32x4CompareOpTest(kExprI32x4GeU, UnsignedGreaterEqual);
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
|
||||
|
||||
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
|
||||
void RunI32x4ShiftOpTest(WasmOpcode simd_op, Int32ShiftOp expected_op,
|
||||
int shift) {
|
||||
FLAG_wasm_simd_prototype = true;
|
||||
@ -1066,10 +1074,9 @@ WASM_EXEC_COMPILED_TEST(I32x4ShrS) {
|
||||
WASM_EXEC_COMPILED_TEST(I32x4ShrU) {
|
||||
RunI32x4ShiftOpTest(kExprI32x4ShrU, LogicalShiftRight, 1);
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
|
||||
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
|
||||
void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
|
||||
FLAG_wasm_simd_prototype = true;
|
||||
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
|
||||
@ -1385,7 +1392,9 @@ WASM_EXEC_COMPILED_TEST(I8x16ShrS) {
|
||||
WASM_EXEC_COMPILED_TEST(I8x16ShrU) {
|
||||
RunI8x16ShiftOpTest(kExprI8x16ShrU, LogicalShiftRight, 1);
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
|
||||
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
|
||||
// Test Select by making a mask where the first two lanes are true and the rest
|
||||
// false, and comparing for non-equality with zero to materialize a bool vector.
|
||||
#define WASM_SIMD_SELECT_TEST(format) \
|
||||
@ -1422,6 +1431,9 @@ WASM_EXEC_COMPILED_TEST(I8x16ShrU) {
|
||||
}
|
||||
|
||||
WASM_SIMD_SELECT_TEST(32x4)
|
||||
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
|
||||
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
WASM_SIMD_SELECT_TEST(16x8)
|
||||
WASM_SIMD_SELECT_TEST(8x16)
|
||||
|
||||
|
@ -399,7 +399,7 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
|
||||
FATAL(str.str().c_str());
|
||||
}
|
||||
builder.Int64LoweringForTesting();
|
||||
if (!CpuFeatures::SupportsSimd128()) {
|
||||
if (!CpuFeatures::SupportsWasmSimd128()) {
|
||||
builder.SimdScalarLoweringForTesting();
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user