From 34871eddc0471be31f749465c6f36a5f74af5426 Mon Sep 17 00:00:00 2001 From: Ng Zhi An Date: Thu, 9 Jul 2020 15:02:12 -0700 Subject: [PATCH] [wasm-simd][liftoff] Implement subset of v128.const Partial implementation of v128.const, only the optimized case for all 0s and all 1s. The other cases bailout to TurboFan for now, and will be added in subsequent patches. Bug: v8:9909 Change-Id: I3240c1c5f4259c45d51edca00fec37047bc1b3a5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2284212 Commit-Queue: Zhi An Ng Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/master@{#68772} --- src/wasm/baseline/arm/liftoff-assembler-arm.h | 5 +++++ .../baseline/arm64/liftoff-assembler-arm64.h | 5 +++++ src/wasm/baseline/ia32/liftoff-assembler-ia32.h | 5 +++++ src/wasm/baseline/liftoff-assembler.h | 1 + src/wasm/baseline/liftoff-compiler.cc | 16 +++++++++++++++- src/wasm/baseline/x64/liftoff-assembler-x64.h | 5 +++++ 6 files changed, 36 insertions(+), 1 deletion(-) diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index d10c92d50d..1b73620ffe 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -3269,6 +3269,11 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, liftoff::F64x2Compare(this, dst, lhs, rhs, le); } +void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, + const uint8_t imms[16]) { + bailout(kSimd, "s128.const"); +} + void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { vmvn(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src)); } diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 7313c44732..c3c296dca3 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -2331,6 +2331,11 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, Fcmge(dst.fp().V2D(), rhs.fp().V2D(), lhs.fp().V2D()); } +void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, + const uint8_t imms[16]) { + bailout(kSimd, "s128.const"); +} + void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { Mvn(dst.fp().V16B(), src.fp().V16B()); } diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 8efd592705..5fafe71723 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -2832,6 +2832,11 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, rhs); } +void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, + const uint8_t imms[16]) { + bailout(kSimd, "s128.const"); +} + void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { if (dst.fp() != src.fp()) { Pcmpeqd(dst.fp(), dst.fp()); diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index de8e2bbc7d..5456c4d7be 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -818,6 +818,7 @@ class LiftoffAssembler : public TurboAssembler { LiftoffRegister rhs); inline void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs); + inline void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16]); inline void emit_s128_not(LiftoffRegister dst, LiftoffRegister src); inline void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs); diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 4ace16f44a..8cb5c56344 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -2949,7 +2949,21 @@ class LiftoffCompiler { void S128Const(FullDecoder* decoder, const Simd128Immediate& imm, Value* result) { - unsupported(decoder, kSimd, "simd"); + constexpr RegClass result_rc = reg_class_for(ValueType::kS128); + LiftoffRegister dst = __ GetUnusedRegister(result_rc, {}); + bool all_zeroes = std::all_of(std::begin(imm.value), std::end(imm.value), + [](uint8_t v) { return v == 0; }); + bool all_ones = std::all_of(std::begin(imm.value), std::end(imm.value), + [](uint8_t v) { return v == 0xff; }); + if (all_zeroes) { + __ LiftoffAssembler::emit_s128_xor(dst, dst, dst); + } else if (all_ones) { + // Any SIMD eq will work, i32x4 is efficient on all archs. + __ LiftoffAssembler::emit_i32x4_eq(dst, dst, dst); + } else { + __ LiftoffAssembler::emit_s128_const(dst, imm.value); + } + __ PushRegister(kWasmS128, dst); } void Simd8x16ShuffleOp(FullDecoder* decoder, diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index 57d915e363..93e82841de 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -2547,6 +2547,11 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, rhs); } +void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, + const uint8_t imms[16]) { + bailout(kSimd, "s128.const"); +} + void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { if (dst.fp() != src.fp()) { Pcmpeqd(dst.fp(), dst.fp());