[wasm-simd][liftoff] Handle SIMD params in function

Add a cctest that contains a function with a SIMD parameter, and calls
that function. This will exercise two cases in Liftoff which involves
preparing to call the function, and processing the SIMD parameters of
the function. The tricky case here is ARM, which requires an FP pair.
Most of the logic added is to check the RegClass/type and construct the
right type of LiftoffRegister to use.

As a drive-by, added SIMD case to the various backends' Move
implementation. This is not exercised by the test case, requires more
complicated function setup.

Bug: v8:9909
Change-Id: I1d01e8c3bee0cf336d1a8ff537317c77aedfdac0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2004369
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65829}
This commit is contained in:
Ng Zhi An 2020-01-16 14:40:06 -08:00 committed by Commit Bot
parent 13b148a31f
commit 8bcee19168
8 changed files with 71 additions and 20 deletions

View File

@ -181,6 +181,10 @@ inline FloatRegister GetFloatRegister(DoubleRegister reg) {
return LowDwVfpRegister::from_code(reg.code()).low(); return LowDwVfpRegister::from_code(reg.code()).low();
} }
inline Simd128Register GetSimd128Register(DoubleRegister reg) {
return QwNeonRegister::from_code(reg.code() / 2);
}
enum class MinOrMax : uint8_t { kMin, kMax }; enum class MinOrMax : uint8_t { kMin, kMax };
template <typename RegisterType> template <typename RegisterType>
inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst, inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
@ -591,9 +595,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (type == kWasmF32) {
vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src)); vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
vmov(dst, src); vmov(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
} }
} }

View File

@ -382,9 +382,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) { ValueType type) {
if (type == kWasmF32) { if (type == kWasmF32) {
Fmov(dst.S(), src.S()); Fmov(dst.S(), src.S());
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
Fmov(dst.D(), src.D()); Fmov(dst.D(), src.D());
} else {
DCHECK_EQ(kWasmS128, type);
Fmov(dst.Q(), src.Q());
} }
} }

View File

@ -452,9 +452,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (type == kWasmF32) {
movss(dst, src); movss(dst, src);
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
movsd(dst, src); movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
movapd(dst, src);
} }
} }

View File

@ -636,17 +636,22 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
DCHECK(!loc.IsAnyRegister()); DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type); RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
int reg_code = loc.AsRegister(); int reg_code = loc.AsRegister();
#if V8_TARGET_ARCH_ARM
// Liftoff assumes a one-to-one mapping between float registers and // Initialize to anything, will be set in all branches below.
// double registers, and so does not distinguish between f32 and f64 LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
// registers. The f32 register code must therefore be halved in order to if (!kSimpleFPAliasing && type == kWasmF32) {
// pass the f64 code to Liftoff. // Liftoff assumes a one-to-one mapping between float registers and
DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0); // double registers, and so does not distinguish between f32 and f64
LiftoffRegister reg = LiftoffRegister::from_code( // registers. The f32 register code must therefore be halved in order
rc, (type == kWasmF32) ? (reg_code / 2) : reg_code); // to pass the f64 code to Liftoff.
#else DCHECK_EQ(0, reg_code % 2);
LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code); reg = LiftoffRegister::from_code(rc, (reg_code / 2));
#endif } else if (kNeedS128RegPair && type == kWasmS128) {
reg = LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
} else {
reg = LiftoffRegister::from_code(rc, reg_code);
}
param_regs.set(reg); param_regs.set(reg);
if (is_gp_pair) { if (is_gp_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset, stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset,

View File

@ -426,7 +426,12 @@ class LiftoffCompiler {
: kLiftoffAssemblerFpCacheRegs; : kLiftoffAssemblerFpCacheRegs;
if (cache_regs & (1ULL << reg_code)) { if (cache_regs & (1ULL << reg_code)) {
// This is a cache register, just use it. // This is a cache register, just use it.
in_reg = LiftoffRegister::from_code(rc, reg_code); if (kNeedS128RegPair && rc == kFpRegPair) {
in_reg =
LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
} else {
in_reg = LiftoffRegister::from_code(rc, reg_code);
}
} else { } else {
// Move to a cache register (spill one if necessary). // Move to a cache register (spill one if necessary).
// Note that we cannot create a {LiftoffRegister} for reg_code, since // Note that we cannot create a {LiftoffRegister} for reg_code, since
@ -434,7 +439,11 @@ class LiftoffCompiler {
in_reg = __ GetUnusedRegister(rc, pinned); in_reg = __ GetUnusedRegister(rc, pinned);
if (rc == kGpReg) { if (rc == kGpReg) {
__ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type); __ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
__ Move(in_reg.low_fp(), DoubleRegister::from_code(reg_code),
lowered_type);
} else { } else {
DCHECK_EQ(kFpReg, rc);
__ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
lowered_type); lowered_type);
} }

View File

@ -418,9 +418,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (type == kWasmF32) {
Movss(dst, src); Movss(dst, src);
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
Movsd(dst, src); Movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
Movapd(dst, src);
} }
} }

View File

@ -11,6 +11,7 @@
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h" #include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h" #include "test/common/wasm/wasm-macro-gen.h"
namespace v8 { namespace v8 {
@ -52,6 +53,24 @@ WASM_SIMD_LIFTOFF_TEST(S128Global) {
} }
} }
WASM_SIMD_LIFTOFF_TEST(S128Param) {
// Test how SIMD parameters in functions are processed. There is no easy way
// to specify a SIMD value when initializing a WasmRunner, so we manually
// add a new function with the right signature, and call it from main.
WasmRunner<int32_t> r(ExecutionTier::kLiftoff, kNoLowerSimd);
TestSignatures sigs;
// We use a temp local to materialize a SIMD value, since at this point
// Liftoff does not support any SIMD operations.
byte temp1 = r.AllocateLocal(kWasmS128);
WasmFunctionCompiler& simd_func = r.NewFunction(sigs.i_s());
BUILD(simd_func, WASM_ONE);
BUILD(r,
WASM_CALL_FUNCTION(simd_func.function_index(), WASM_GET_LOCAL(temp1)));
CHECK_EQ(1, r.Call());
}
#undef WASM_SIMD_LIFTOFF_TEST #undef WASM_SIMD_LIFTOFF_TEST
} // namespace test_run_wasm_simd_liftoff } // namespace test_run_wasm_simd_liftoff

View File

@ -29,6 +29,7 @@ class TestSignatures {
sig_i_rr(1, 2, kIntAnyRefTypes4), sig_i_rr(1, 2, kIntAnyRefTypes4),
sig_i_a(1, 1, kIntFuncRefTypes4), sig_i_a(1, 1, kIntFuncRefTypes4),
sig_i_n(1, 1, kIntNullRefTypes4), sig_i_n(1, 1, kIntNullRefTypes4),
sig_i_s(1, 1, kIntSimd128Types4),
sig_l_v(1, 0, kLongTypes4), sig_l_v(1, 0, kLongTypes4),
sig_l_l(1, 1, kLongTypes4), sig_l_l(1, 1, kLongTypes4),
sig_l_ll(1, 2, kLongTypes4), sig_l_ll(1, 2, kLongTypes4),
@ -65,6 +66,7 @@ class TestSignatures {
for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef; for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef;
for (int i = 1; i < 4; i++) kIntFuncRefTypes4[i] = kWasmFuncRef; for (int i = 1; i < 4; i++) kIntFuncRefTypes4[i] = kWasmFuncRef;
for (int i = 1; i < 4; i++) kIntNullRefTypes4[i] = kWasmNullRef; for (int i = 1; i < 4; i++) kIntNullRefTypes4[i] = kWasmNullRef;
for (int i = 1; i < 4; i++) kIntSimd128Types4[i] = kWasmS128;
for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kWasmS128; for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kWasmS128;
kIntLongTypes4[0] = kWasmI32; kIntLongTypes4[0] = kWasmI32;
kIntFloatTypes4[0] = kWasmI32; kIntFloatTypes4[0] = kWasmI32;
@ -72,6 +74,7 @@ class TestSignatures {
kIntAnyRefTypes4[0] = kWasmI32; kIntAnyRefTypes4[0] = kWasmI32;
kIntFuncRefTypes4[0] = kWasmI32; kIntFuncRefTypes4[0] = kWasmI32;
kIntNullRefTypes4[0] = kWasmI32; kIntNullRefTypes4[0] = kWasmI32;
kIntSimd128Types4[0] = kWasmI32;
kSimd128IntTypes4[1] = kWasmI32; kSimd128IntTypes4[1] = kWasmI32;
} }
@ -93,6 +96,7 @@ class TestSignatures {
FunctionSig* i_rr() { return &sig_i_rr; } FunctionSig* i_rr() { return &sig_i_rr; }
FunctionSig* i_a() { return &sig_i_a; } FunctionSig* i_a() { return &sig_i_a; }
FunctionSig* i_n() { return &sig_i_n; } FunctionSig* i_n() { return &sig_i_n; }
FunctionSig* i_s() { return &sig_i_s; }
FunctionSig* f_f() { return &sig_f_f; } FunctionSig* f_f() { return &sig_f_f; }
FunctionSig* f_ff() { return &sig_f_ff; } FunctionSig* f_ff() { return &sig_f_ff; }
@ -139,6 +143,7 @@ class TestSignatures {
ValueType kIntAnyRefTypes4[4]; ValueType kIntAnyRefTypes4[4];
ValueType kIntFuncRefTypes4[4]; ValueType kIntFuncRefTypes4[4];
ValueType kIntNullRefTypes4[4]; ValueType kIntNullRefTypes4[4];
ValueType kIntSimd128Types4[4];
ValueType kSimd128IntTypes4[4]; ValueType kSimd128IntTypes4[4];
FunctionSig sig_i_v; FunctionSig sig_i_v;
@ -154,6 +159,7 @@ class TestSignatures {
FunctionSig sig_i_rr; FunctionSig sig_i_rr;
FunctionSig sig_i_a; FunctionSig sig_i_a;
FunctionSig sig_i_n; FunctionSig sig_i_n;
FunctionSig sig_i_s;
FunctionSig sig_l_v; FunctionSig sig_l_v;
FunctionSig sig_l_l; FunctionSig sig_l_l;