[wasm-simd] Implement rounding average for ia32

Bug: v8:10039
Change-Id: I3568bd3d01508e8bca81959341c75369c5bdf700
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1958051
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65618}
This commit is contained in:
Ng Zhi An 2020-01-07 11:03:05 -08:00 committed by Commit Bot
parent fd53519035
commit cb4ff11d83
8 changed files with 21 additions and 11 deletions

View File

@ -338,6 +338,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Paddq, paddq)
AVX_PACKED_OP3(Psubq, psubq)
AVX_PACKED_OP3(Pmuludq, pmuludq)
AVX_PACKED_OP3(Pavgb, pavgb)
AVX_PACKED_OP3(Pavgw, pavgw)
#undef AVX_PACKED_OP3
AVX_PACKED_OP3_WITH_TYPE(Psllq, psllq, XMMRegister, uint8_t)

View File

@ -34,8 +34,10 @@
V(pslld, 66, 0F, F2) \
V(psllq, 66, 0F, F3) \
V(pmuludq, 66, 0F, F4) \
V(pavgb, 66, 0F, E0) \
V(psraw, 66, 0F, E1) \
V(psrad, 66, 0F, E2) \
V(pavgw, 66, 0F, E3) \
V(psrlw, 66, 0F, D1) \
V(psrld, 66, 0F, D2) \
V(psrlq, 66, 0F, D3) \

View File

@ -3164,6 +3164,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqw(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kIA32I16x8RoundingAverageU: {
__ Pavgw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kIA32I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@ -3592,6 +3597,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kIA32I8x16RoundingAverageU: {
__ Pavgb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kIA32S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Pxor(dst, dst);

View File

@ -284,6 +284,7 @@ namespace compiler {
V(AVXI16x8GtU) \
V(SSEI16x8GeU) \
V(AVXI16x8GeU) \
V(IA32I16x8RoundingAverageU) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLaneU) \
V(IA32I8x16ExtractLaneS) \
@ -332,6 +333,7 @@ namespace compiler {
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
V(AVXI8x16GeU) \
V(IA32I8x16RoundingAverageU) \
V(IA32S128Zero) \
V(SSES128Not) \
V(AVXS128Not) \

View File

@ -265,6 +265,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8GtU:
case kSSEI16x8GeU:
case kAVXI16x8GeU:
case kIA32I16x8RoundingAverageU:
case kIA32I8x16Splat:
case kIA32I8x16ExtractLaneU:
case kIA32I8x16ExtractLaneS:
@ -313,6 +314,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GtU:
case kSSEI8x16GeU:
case kAVXI8x16GeU:
case kIA32I8x16RoundingAverageU:
case kIA32S128Zero:
case kSSES128Not:
case kAVXS128Not:

View File

@ -2027,7 +2027,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(I64x2Add) \
V(I64x2Sub)
V(I64x2Sub) \
V(I16x8RoundingAverageU) \
V(I8x16RoundingAverageU)
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4) \

View File

@ -2636,12 +2636,6 @@ void InstructionSelector::VisitF64x2UConvertI64x2(Node* node) {
}
#if !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI16x8RoundingAverageU(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_IA32

View File

@ -2181,13 +2181,11 @@ WASM_SIMD_TEST(I16x8LeU) {
UnsignedLessEqual);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
WASM_SIMD_TEST_NO_LOWERING(I16x8RoundingAverageU) {
RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd,
kExprI16x8RoundingAverageU,
base::RoundingAverageUnsigned);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int16ShiftOp expected_op) {
@ -2407,13 +2405,11 @@ WASM_SIMD_TEST(I8x16Mul) {
base::MulWithWraparound);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
WASM_SIMD_TEST_NO_LOWERING(I8x16RoundingAverageU) {
RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd,
kExprI8x16RoundingAverageU,
base::RoundingAverageUnsigned);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int8ShiftOp expected_op) {