2016-08-23 19:59:19 +00:00
|
|
|
// Copyright 2016 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
#include <type_traits>
|
|
|
|
|
2017-02-23 11:46:29 +00:00
|
|
|
#include "src/assembler-inl.h"
|
2018-07-13 18:24:42 +00:00
|
|
|
#include "src/base/bits.h"
|
2019-01-10 11:47:08 +00:00
|
|
|
#include "src/base/overflowing-math.h"
|
2016-08-23 19:59:19 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/compiler/value-helper.h"
|
|
|
|
#include "test/cctest/wasm/wasm-run-utils.h"
|
2017-04-25 11:29:17 +00:00
|
|
|
#include "test/common/wasm/wasm-macro-gen.h"
|
2016-08-23 19:59:19 +00:00
|
|
|
|
2017-09-01 12:57:34 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace wasm {
|
2017-09-21 03:29:52 +00:00
|
|
|
namespace test_run_wasm_simd {
|
2016-08-23 19:59:19 +00:00
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2019-03-29 09:40:26 +00:00
|
|
|
using FloatUnOp = float (*)(float);
|
|
|
|
using FloatBinOp = float (*)(float, float);
|
|
|
|
using FloatCompareOp = int (*)(float, float);
|
|
|
|
using Int32UnOp = int32_t (*)(int32_t);
|
|
|
|
using Int32BinOp = int32_t (*)(int32_t, int32_t);
|
|
|
|
using Int32CompareOp = int (*)(int32_t, int32_t);
|
|
|
|
using Int32ShiftOp = int32_t (*)(int32_t, int);
|
|
|
|
using Int16UnOp = int16_t (*)(int16_t);
|
|
|
|
using Int16BinOp = int16_t (*)(int16_t, int16_t);
|
|
|
|
using Int16CompareOp = int (*)(int16_t, int16_t);
|
|
|
|
using Int16ShiftOp = int16_t (*)(int16_t, int);
|
|
|
|
using Int8UnOp = int8_t (*)(int8_t);
|
|
|
|
using Int8BinOp = int8_t (*)(int8_t, int8_t);
|
|
|
|
using Int8CompareOp = int (*)(int8_t, int8_t);
|
|
|
|
using Int8ShiftOp = int8_t (*)(int8_t, int);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
#define WASM_SIMD_TEST(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
ExecutionTier execution_tier); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
2019-04-03 15:37:47 +00:00
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
|
2018-08-21 15:01:31 +00:00
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_interpreter) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_simd_lowered) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
2019-04-03 15:37:47 +00:00
|
|
|
RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kTurbofan); \
|
2018-08-21 15:01:31 +00:00
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
|
2018-05-18 21:47:59 +00:00
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
// Generic expected value functions.
|
2019-01-10 11:47:08 +00:00
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
2016-12-20 16:49:53 +00:00
|
|
|
T Negate(T a) {
|
|
|
|
return -a;
|
|
|
|
}
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
// For signed integral types, use base::AddWithWraparound.
|
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
T Add(T a, T b) {
|
|
|
|
return a + b;
|
|
|
|
}
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
// For signed integral types, use base::SubWithWraparound.
|
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
T Sub(T a, T b) {
|
|
|
|
return a - b;
|
|
|
|
}
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
// For signed integral types, use base::MulWithWraparound.
|
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
2017-01-26 02:18:00 +00:00
|
|
|
T Mul(T a, T b) {
|
|
|
|
return a * b;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
T Minimum(T a, T b) {
|
|
|
|
return a <= b ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Maximum(T a, T b) {
|
|
|
|
return a >= b ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedMinimum(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedMaximum(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
|
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int Equal(float a, float b) { return a == b ? -1 : 0; }
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T Equal(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a == b ? -1 : 0;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int NotEqual(float a, float b) { return a != b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T NotEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a != b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int Less(float a, float b) { return a < b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T Less(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a < b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int LessEqual(float a, float b) { return a <= b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T LessEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a <= b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int Greater(float a, float b) { return a > b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T Greater(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a > b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int GreaterEqual(float a, float b) { return a >= b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T GreaterEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a >= b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedLess(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) < static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedLessEqual(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedGreater(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) > static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedGreaterEqual(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
T LogicalShiftLeft(T a, int shift) {
|
2019-01-25 00:33:28 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) << shift;
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T LogicalShiftRight(T a, int shift) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) >> shift;
|
|
|
|
}
|
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
template <typename T>
|
|
|
|
T Clamp(int64_t value) {
|
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
|
|
|
int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
|
|
|
|
int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
|
|
|
|
int64_t clamped = std::max(min, std::min(max, value));
|
|
|
|
return static_cast<T>(clamped);
|
|
|
|
}
|
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
int64_t Widen(T value) {
|
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
|
|
|
return static_cast<int64_t>(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
int64_t UnsignedWiden(T value) {
|
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<int64_t>(static_cast<UnsignedT>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
T Narrow(int64_t value) {
|
|
|
|
return Clamp<T>(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedNarrow(int64_t value) {
|
2017-02-07 17:33:37 +00:00
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-12-02 00:30:37 +00:00
|
|
|
return static_cast<T>(Clamp<UnsignedT>(value & 0xFFFFFFFFu));
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T AddSaturate(T a, T b) {
|
|
|
|
return Clamp<T>(Widen(a) + Widen(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T SubSaturate(T a, T b) {
|
|
|
|
return Clamp<T>(Widen(a) - Widen(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedAddSaturate(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return Clamp<UnsignedT>(UnsignedWiden(a) + UnsignedWiden(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedSubSaturate(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return Clamp<UnsignedT>(UnsignedWiden(a) - UnsignedWiden(b));
|
|
|
|
}
|
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
template <typename T>
|
|
|
|
T And(T a, T b) {
|
|
|
|
return a & b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Or(T a, T b) {
|
|
|
|
return a | b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Xor(T a, T b) {
|
|
|
|
return a ^ b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Not(T a) {
|
|
|
|
return ~a;
|
|
|
|
}
|
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
template <typename T>
|
|
|
|
T LogicalNot(T a) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a == 0 ? -1 : 0;
|
2017-03-02 19:50:33 +00:00
|
|
|
}
|
|
|
|
|
2017-03-01 11:13:00 +00:00
|
|
|
template <typename T>
|
|
|
|
T Sqrt(T a) {
|
|
|
|
return std::sqrt(a);
|
|
|
|
}
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
} // namespace
|
|
|
|
|
2016-12-06 01:12:15 +00:00
|
|
|
#define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
|
|
|
|
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
|
|
|
|
WASM_SIMD_##TYPE##_EXTRACT_LANE( \
|
|
|
|
lane_index, WASM_GET_LOCAL(value))), \
|
|
|
|
WASM_RETURN1(WASM_ZERO))
|
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
#define TO_BYTE(val) static_cast<byte>(val)
|
|
|
|
#define WASM_SIMD_OP(op) kSimdPrefix, TO_BYTE(op)
|
2017-02-21 16:45:30 +00:00
|
|
|
#define WASM_SIMD_SPLAT(Type, x) x, WASM_SIMD_OP(kExpr##Type##Splat)
|
2017-02-13 20:24:43 +00:00
|
|
|
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
|
|
|
|
#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
|
|
|
|
#define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
|
2017-06-08 20:54:32 +00:00
|
|
|
#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
|
2017-02-27 23:45:03 +00:00
|
|
|
#define WASM_SIMD_F32x4_SPLAT(x) x, WASM_SIMD_OP(kExprF32x4Splat)
|
|
|
|
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprF32x4ReplaceLane), TO_BYTE(lane)
|
|
|
|
|
|
|
|
#define WASM_SIMD_I32x4_SPLAT(x) x, WASM_SIMD_OP(kExprI32x4Splat)
|
|
|
|
#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprI32x4ReplaceLane), TO_BYTE(lane)
|
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
#define WASM_SIMD_I16x8_SPLAT(x) x, WASM_SIMD_OP(kExprI16x8Splat)
|
|
|
|
#define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprI16x8ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
#define WASM_SIMD_I8x16_SPLAT(x) x, WASM_SIMD_OP(kExprI8x16Splat)
|
|
|
|
#define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprI8x16ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
|
|
|
|
|
2017-05-04 16:50:51 +00:00
|
|
|
#define WASM_SIMD_S8x16_SHUFFLE_OP(opcode, m, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
|
|
|
|
TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
|
|
|
|
TO_BYTE(m[7]), TO_BYTE(m[8]), TO_BYTE(m[9]), TO_BYTE(m[10]), \
|
|
|
|
TO_BYTE(m[11]), TO_BYTE(m[12]), TO_BYTE(m[13]), TO_BYTE(m[14]), \
|
|
|
|
TO_BYTE(m[15])
|
|
|
|
|
2017-06-19 19:23:11 +00:00
|
|
|
#define WASM_SIMD_LOAD_MEM(index) \
|
|
|
|
index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, ZERO_OFFSET
|
|
|
|
#define WASM_SIMD_STORE_MEM(index, val) \
|
|
|
|
index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, ZERO_OFFSET
|
|
|
|
|
2018-11-07 01:19:13 +00:00
|
|
|
// Runs tests of compiled code, using the interpreter as a reference.
|
2019-04-03 15:37:47 +00:00
|
|
|
#define WASM_SIMD_COMPILED_TEST(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
ExecutionTier execution_tier); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_simd_lowered) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kTurbofan); \
|
|
|
|
} \
|
2018-11-07 01:19:13 +00:00
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
|
|
|
|
|
2019-01-10 23:22:07 +00:00
|
|
|
// The macro below disables tests lowering for certain nodes where the simd
|
|
|
|
// lowering doesn't work correctly. Early return here if the CPU does not
|
|
|
|
// support SIMD as the graph will be implicitly lowered in that case.
|
2019-03-25 20:44:32 +00:00
|
|
|
#define WASM_SIMD_TEST_NO_LOWERING(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
ExecutionTier execution_tier); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
if (!CpuFeatures::SupportsWasmSimd128()) return; \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
2019-04-03 15:37:47 +00:00
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
|
2019-03-25 20:44:32 +00:00
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_interpreter) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
|
|
|
|
} \
|
2019-01-10 23:22:07 +00:00
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
|
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
// Returns true if the platform can represent the result.
|
|
|
|
bool PlatformCanRepresent(float x) {
|
|
|
|
#if V8_TARGET_ARCH_ARM
|
|
|
|
return std::fpclassify(x) != FP_SUBNORMAL;
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true for very small and very large numbers. We skip these test
|
|
|
|
// values for the approximation instructions, which don't work at the extremes.
|
|
|
|
bool IsExtreme(float x) {
|
|
|
|
float abs_x = std::fabs(x);
|
|
|
|
const float kSmallFloatThreshold = 1.0e-32f;
|
|
|
|
const float kLargeFloatThreshold = 1.0e32f;
|
|
|
|
return abs_x != 0.0f && // 0 or -0 are fine.
|
|
|
|
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST(F32x4Splat) {
|
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold output vector.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
|
|
|
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(param1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
float expected = x;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
} else {
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST(F32x4ReplaceLane) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its (FP) index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(3.14159f))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
0, WASM_GET_LOCAL(temp1), WASM_F32(0.0f))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
1, WASM_GET_LOCAL(temp1), WASM_F32(1.0f))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
2, WASM_GET_LOCAL(temp1), WASM_F32(2.0f))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
3, WASM_GET_LOCAL(temp1), WASM_F32(3.0f))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(static_cast<float>(i), ReadLittleEndianValue<float>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
// Tests both signed and unsigned conversion.
|
2018-11-07 01:19:13 +00:00
|
|
|
// v8:8425 tracks this test being enabled in the interpreter.
|
|
|
|
WASM_SIMD_COMPILED_TEST(F32x4ConvertI32x4) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create two output vectors to hold signed and unsigned results.
|
|
|
|
float* g0 = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
float* g1 = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
1, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2019-03-08 00:43:11 +00:00
|
|
|
float expected_signed = static_cast<float>(x);
|
|
|
|
float expected_unsigned = static_cast<float>(static_cast<uint32_t>(x));
|
2019-03-05 01:44:05 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<float>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<float>(&g1[i]));
|
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:43:48 +00:00
|
|
|
bool IsSameNan(float expected, float actual) {
|
|
|
|
// Sign is non-deterministic.
|
|
|
|
uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
|
|
|
|
uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
|
|
|
|
// Some implementations convert signaling NaNs to quiet NaNs.
|
|
|
|
return (expected_bits == actual_bits) ||
|
|
|
|
((expected_bits | 0x00400000) == actual_bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsCanonical(float actual) {
|
|
|
|
uint32_t actual_bits = bit_cast<uint32_t>(actual);
|
|
|
|
// Canonical NaN has quiet bit and no payload.
|
|
|
|
return (actual_bits & 0xFFC00000) == actual_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckFloatResult(float x, float y, float expected, float actual,
|
|
|
|
bool exact = true) {
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
if (std::isnan(x) && IsSameNan(x, actual)) return;
|
|
|
|
if (std::isnan(y) && IsSameNan(y, actual)) return;
|
|
|
|
if (IsSameNan(expected, actual)) return;
|
|
|
|
if (IsCanonical(actual)) return;
|
|
|
|
// This is expected to assert; it's useful for debugging.
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
|
|
|
|
} else {
|
|
|
|
if (exact) {
|
|
|
|
CHECK_EQ(expected, actual);
|
|
|
|
// The sign of 0's must match.
|
|
|
|
CHECK_EQ(std::signbit(expected), std::signbit(actual));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Otherwise, perform an approximate equality test. First check for
|
|
|
|
// equality to handle +/-Infinity where approximate equality doesn't work.
|
|
|
|
if (expected == actual) return;
|
|
|
|
|
|
|
|
// 1% error allows all platforms to pass easily.
|
|
|
|
constexpr float kApproximationError = 0.01f;
|
|
|
|
float abs_error = std::abs(expected) * kApproximationError,
|
|
|
|
min = expected - abs_error, max = expected + abs_error;
|
|
|
|
CHECK_LE(min, actual);
|
|
|
|
CHECK_GE(max, actual);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test some values not included in the float inputs from value_helper. These
|
|
|
|
// tests are useful for opcodes that are synthesized during code gen, like Min
|
|
|
|
// and Max on ia32 and x64.
|
|
|
|
static constexpr uint32_t nan_test_array[] = {
|
|
|
|
// Bit patterns of quiet NaNs and signaling NaNs, with or without
|
|
|
|
// additional payload.
|
|
|
|
0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0x7F800000, 0xFF800000, 0x7F876543,
|
|
|
|
0xFF876543,
|
|
|
|
// Both Infinities.
|
|
|
|
0x7F800000, 0xFF800000,
|
|
|
|
// Some "normal" numbers, 1 and -1.
|
|
|
|
0x3F800000, 0xBF800000};
|
|
|
|
|
|
|
|
#define FOR_FLOAT32_NAN_INPUTS(i) \
|
|
|
|
for (size_t i = 0; i < arraysize(nan_test_array); ++i)
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmOpcode opcode, FloatUnOp expected_op,
|
2019-03-26 23:43:48 +00:00
|
|
|
bool exact = true) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
// Extreme values have larger errors so skip them for approximation tests.
|
2019-03-26 23:43:48 +00:00
|
|
|
if (!exact && IsExtreme(x)) continue;
|
2019-03-05 01:44:05 +00:00
|
|
|
float expected = expected_op(x);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
2019-03-26 23:43:48 +00:00
|
|
|
CheckFloatResult(x, x, expected, actual, exact);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FOR_FLOAT32_NAN_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
// Extreme values have larger errors so skip them for approximation tests.
|
|
|
|
if (!exact && IsExtreme(x)) continue;
|
|
|
|
float expected = expected_op(x);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x, x, expected, actual, exact);
|
2019-03-05 01:44:05 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Abs) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Neg) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Neg, Negate);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-01 11:13:00 +00:00
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4RecipApprox) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
|
2019-03-26 23:43:48 +00:00
|
|
|
base::Recip, false /* !exact */);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
|
2019-03-26 23:43:48 +00:00
|
|
|
base::RecipSqrt, false /* !exact */);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunF32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmOpcode opcode, FloatBinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT32_INPUTS(y) {
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
float expected = expected_op(x, y);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
2019-03-26 23:43:48 +00:00
|
|
|
CheckFloatResult(x, y, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FOR_FLOAT32_NAN_INPUTS(i) {
|
|
|
|
float x = bit_cast<float>(nan_test_array[i]);
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT32_NAN_INPUTS(j) {
|
|
|
|
float y = bit_cast<float>(nan_test_array[j]);
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
float expected = expected_op(x, y);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x, y, expected, actual, true /* exact */);
|
2019-03-05 01:44:05 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:43:48 +00:00
|
|
|
#undef FOR_FLOAT32_NAN_INPUTS
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Add) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Sub) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Sub, Sub);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Mul) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2019-03-05 01:44:05 +00:00
|
|
|
// v8:8425 tracks this test being enabled in the interpreter.
|
|
|
|
WASM_SIMD_COMPILED_TEST(F32x4Min) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2019-03-05 01:44:05 +00:00
|
|
|
// v8:8425 tracks this test being enabled in the interpreter.
|
|
|
|
WASM_SIMD_COMPILED_TEST(F32x4Max) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunF32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmOpcode opcode, FloatCompareOp expected_op) {
|
|
|
|
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
|
|
|
WASM_ONE);
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT32_INPUTS(y) {
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
float diff = x - y; // Model comparison as subtraction.
|
|
|
|
if (!PlatformCanRepresent(diff)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
int32_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Eq) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ne) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-03 02:31:44 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Gt) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Gt, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ge) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ge, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Lt) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Lt, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Le) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Splat) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
|
|
|
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(param1))),
|
|
|
|
WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected = x;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
int32_t actual = ReadLittleEndianValue<int32_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ReplaceLane) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(-1))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int32_t i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Splat) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
|
|
|
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(param1))),
|
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected = x;
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
int16_t actual = ReadLittleEndianValue<int16_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ReplaceLane) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_I32V(-1))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
4, WASM_GET_LOCAL(temp1), WASM_I32V(4))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
5, WASM_GET_LOCAL(temp1), WASM_I32V(5))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
6, WASM_GET_LOCAL(temp1), WASM_I32V(6))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
7, WASM_GET_LOCAL(temp1), WASM_I32V(7))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int16_t i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int16_t>(&g[i]));
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Splat) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
|
|
|
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(param1))),
|
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int8_t expected = x;
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
int8_t actual = ReadLittleEndianValue<int8_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ReplaceLane) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_I32V(-1))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
4, WASM_GET_LOCAL(temp1), WASM_I32V(4))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
5, WASM_GET_LOCAL(temp1), WASM_I32V(5))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
6, WASM_GET_LOCAL(temp1), WASM_I32V(6))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
7, WASM_GET_LOCAL(temp1), WASM_I32V(7))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
8, WASM_GET_LOCAL(temp1), WASM_I32V(8))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
9, WASM_GET_LOCAL(temp1), WASM_I32V(9))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
10, WASM_GET_LOCAL(temp1), WASM_I32V(10))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
11, WASM_GET_LOCAL(temp1), WASM_I32V(11))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
12, WASM_GET_LOCAL(temp1), WASM_I32V(12))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
13, WASM_GET_LOCAL(temp1), WASM_I32V(13))),
|
|
|
|
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
14, WASM_GET_LOCAL(temp1), WASM_I32V(14))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
15, WASM_GET_LOCAL(temp1), WASM_I32V(15))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int8_t i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
// Use doubles to ensure exact conversion.
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
int32_t ConvertToInt(double val, bool unsigned_integer) {
|
2018-04-17 00:59:05 +00:00
|
|
|
if (std::isnan(val)) return 0;
|
|
|
|
if (unsigned_integer) {
|
|
|
|
if (val < 0) return 0;
|
|
|
|
if (val > kMaxUInt32) return kMaxUInt32;
|
|
|
|
return static_cast<uint32_t>(val);
|
|
|
|
} else {
|
|
|
|
if (val < kMinInt) return kMinInt;
|
|
|
|
if (val > kMaxInt) return kMaxInt;
|
|
|
|
return static_cast<int>(val);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests both signed and unsigned conversion.
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ConvertF32x4) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
// Create two output vectors to hold signed and unsigned results.
|
|
|
|
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
1, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
2016-08-23 19:59:19 +00:00
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected_signed = ConvertToInt(x, false);
|
|
|
|
int32_t expected_unsigned = ConvertToInt(x, true);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g1[i]));
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I16x8 (unpacking).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ConvertI16x8) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create four output vectors to hold signed and unsigned results.
|
|
|
|
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g2 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g3 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
2018-06-07 01:51:58 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected_signed = static_cast<int32_t>(Widen<int16_t>(x));
|
|
|
|
int32_t expected_unsigned = static_cast<int32_t>(UnsignedWiden<int16_t>(x));
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g1[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g2[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g3[i]));
|
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int32UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Neg) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
|
|
|
|
base::NegateWithWraparound);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-13 20:24:43 +00:00
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(S128Not) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not, Not);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int32BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
|
|
|
WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
FOR_INT32_INPUTS(y) {
|
|
|
|
r.Call(x, y);
|
|
|
|
int32_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
2016-08-23 19:59:19 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Add) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add,
|
|
|
|
base::AddWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Sub) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub,
|
|
|
|
base::SubWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Mul) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul,
|
|
|
|
base::MulWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MinS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MaxS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MinU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMinimum);
|
2017-02-21 16:45:30 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MaxU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxU,
|
2018-05-23 22:21:05 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-21 16:45:30 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128And) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And, And);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128Or) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or, Or);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128Xor) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor, Xor);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Eq) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Ne) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeS, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtU, UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtU, UnsignedGreater);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int32ShiftOp expected_op) {
|
|
|
|
for (int shift = 1; shift < 32; shift++) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
byte value = 0;
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
float expected = expected_op(x, shift);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 17:14:07 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Shl) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftLeft);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ShrS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
|
2018-07-03 17:14:07 +00:00
|
|
|
ArithmeticShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ShrU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
2017-03-03 02:04:07 +00:00
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I8x16 (unpacking).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ConvertI8x16) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create four output vectors to hold signed and unsigned results.
|
|
|
|
int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g2 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g3 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected_signed = static_cast<int16_t>(Widen<int8_t>(x));
|
|
|
|
int16_t expected_unsigned = static_cast<int16_t>(UnsignedWiden<int8_t>(x));
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g1[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g2[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g3[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests both signed and unsigned conversion from I32x4 (packing).
|
|
|
|
WASM_SIMD_TEST(I16x8ConvertI32x4) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create output vectors to hold signed and unsigned results.
|
|
|
|
int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
1, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected_signed = Narrow<int16_t>(x);
|
|
|
|
int16_t expected_unsigned = UnsignedNarrow<int16_t>(x);
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g1[i]));
|
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI16x8UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int16UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Neg) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
|
|
|
|
base::NegateWithWraparound);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
|
|
|
WasmOpcode opcode, Int16BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
FOR_INT16_INPUTS(y) {
|
|
|
|
r.Call(x, y);
|
|
|
|
int16_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
|
|
|
|
}
|
2018-06-20 06:03:44 +00:00
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Add) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
|
|
|
|
base::AddWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddSaturateS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateS,
|
2018-05-18 21:47:59 +00:00
|
|
|
AddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Sub) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub,
|
|
|
|
base::SubWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8SubSaturateS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateS,
|
2018-05-18 21:47:59 +00:00
|
|
|
SubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Mul) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul,
|
|
|
|
base::MulWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MinS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MaxS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddSaturateU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedAddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8SubSaturateU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedSubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MinU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMinimum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MaxU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Eq) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Ne) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeS, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtU, UnsignedGreater);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtU, UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int16ShiftOp expected_op) {
|
|
|
|
for (int shift = 1; shift < 16; shift++) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
byte value = 0;
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
float expected = expected_op(x, shift);
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 17:14:07 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Shl) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftLeft);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ShrS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
|
2018-07-03 17:14:07 +00:00
|
|
|
ArithmeticShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ShrU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI8x16UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int8UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int8_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Neg) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
|
|
|
|
base::NegateWithWraparound);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I16x8 (packing).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ConvertI16x8) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create output vectors to hold signed and unsigned results.
|
|
|
|
int8_t* g0 = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
int8_t* g1 = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
1, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp1))),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int8_t expected_signed = Narrow<int8_t>(x);
|
|
|
|
int8_t expected_unsigned = UnsignedNarrow<int8_t>(x);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int8_t>(&g1[i]));
|
2018-06-20 06:03:44 +00:00
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI8x16BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int8BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
FOR_INT8_INPUTS(y) {
|
|
|
|
r.Call(x, y);
|
|
|
|
int8_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Add) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
|
|
|
|
base::AddWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16AddSaturateS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateS,
|
2018-05-18 21:47:59 +00:00
|
|
|
AddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Sub) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub,
|
|
|
|
base::SubWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16SubSaturateS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateS,
|
2018-05-18 21:47:59 +00:00
|
|
|
SubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MinS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MaxS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16AddSaturateU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedAddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16SubSaturateU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedSubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MinU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMinimum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MaxU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Eq) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Ne) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
[wasm] Implement first set of SIMD I8x16 ops
- I8x16Splat, I8x16ExtractLane, I8x16ReplaceLane
- Binops: I8x16Add, I8x16AddSaturateS, I8x16Sub, I8x16SubSaturateS, I8x16MinS,
I8x16MaxS, I8x16AddSaturateU, I8x16SubSaturateU, I8x16MinU, I8x16MaxU
- Compare ops: I8x16Eq, I8x16Ne
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org
Review-Url: https://codereview.chromium.org/2829483002
Cr-Commit-Position: refs/heads/master@{#44706}
2017-04-18 23:23:12 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GeS, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtU, UnsignedGreater);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LtU, UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Mul) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul,
|
|
|
|
base::MulWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-10-05 20:22:49 +00:00
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int8ShiftOp expected_op) {
|
|
|
|
for (int shift = 1; shift < 8; shift++) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
byte value = 0;
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
|
|
|
|
WASM_SET_GLOBAL(
|
|
|
|
0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
float expected = expected_op(x, shift);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 17:14:07 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Shl) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftLeft);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShrS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
|
2018-07-03 17:14:07 +00:00
|
|
|
ArithmeticShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShrU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
2017-02-13 20:24:43 +00:00
|
|
|
|
2017-06-08 20:54:32 +00:00
|
|
|
// Test Select by making a mask where the 0th and 3rd lanes are true and the
|
2017-06-20 22:04:57 +00:00
|
|
|
// rest false, and comparing for non-equality with zero to convert to a boolean
|
2017-06-08 20:54:32 +00:00
|
|
|
// vector.
|
2017-05-31 13:31:52 +00:00
|
|
|
#define WASM_SIMD_SELECT_TEST(format) \
|
2019-03-25 20:44:32 +00:00
|
|
|
WASM_SIMD_TEST(S##format##Select) { \
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd); \
|
2017-05-31 13:31:52 +00:00
|
|
|
byte val1 = 0; \
|
|
|
|
byte val2 = 1; \
|
|
|
|
byte src1 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte src2 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte mask = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
|
|
|
WASM_SET_LOCAL(src1, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val1))), \
|
|
|
|
WASM_SET_LOCAL(src2, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val2))), \
|
|
|
|
WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
1, WASM_GET_LOCAL(zero), WASM_I32V(-1))), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
2, WASM_GET_LOCAL(mask), WASM_I32V(-1))), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
mask, \
|
|
|
|
WASM_SIMD_SELECT( \
|
2019-01-12 01:02:41 +00:00
|
|
|
format, WASM_GET_LOCAL(src1), WASM_GET_LOCAL(src2), \
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, WASM_GET_LOCAL(mask), \
|
2019-01-12 01:02:41 +00:00
|
|
|
WASM_GET_LOCAL(zero)))), \
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
|
|
|
|
\
|
|
|
|
CHECK_EQ(1, r.Call(0x12, 0x34)); \
|
2017-02-13 20:24:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_SELECT_TEST(32x4)
|
|
|
|
WASM_SIMD_SELECT_TEST(16x8)
|
|
|
|
WASM_SIMD_SELECT_TEST(8x16)
|
2017-06-08 20:54:32 +00:00
|
|
|
|
|
|
|
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
|
|
|
|
// rest 0. The mask is not the result of a comparison op.
|
|
|
|
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
|
2019-03-25 20:44:32 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S##format##NonCanonicalSelect) { \
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, \
|
2018-01-05 08:43:05 +00:00
|
|
|
lower_simd); \
|
2017-06-08 20:54:32 +00:00
|
|
|
byte val1 = 0; \
|
|
|
|
byte val2 = 1; \
|
|
|
|
byte combined = 2; \
|
|
|
|
byte src1 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte src2 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte mask = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
|
|
|
WASM_SET_LOCAL(src1, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val1))), \
|
|
|
|
WASM_SET_LOCAL(src2, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val2))), \
|
|
|
|
WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
1, WASM_GET_LOCAL(zero), WASM_I32V(0xF))), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
2, WASM_GET_LOCAL(mask), WASM_I32V(0xF))), \
|
2019-01-12 01:02:41 +00:00
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_SELECT(format, WASM_GET_LOCAL(src1), \
|
|
|
|
WASM_GET_LOCAL(src2), \
|
|
|
|
WASM_GET_LOCAL(mask))), \
|
2017-06-08 20:54:32 +00:00
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
|
|
|
|
\
|
|
|
|
CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(16x8)
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
|
2017-03-02 19:50:33 +00:00
|
|
|
|
2017-04-24 18:53:16 +00:00
|
|
|
// Test binary ops with two lane test patterns, all lanes distinct.
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
template <typename T>
|
2017-04-24 18:53:16 +00:00
|
|
|
void RunBinaryLaneOpTest(
|
2018-08-21 15:01:31 +00:00
|
|
|
ExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode simd_op,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
const std::array<T, kSimd128Size / sizeof(T)>& expected) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
|
2017-08-19 16:34:11 +00:00
|
|
|
T* src0 = r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
|
2017-05-04 16:50:51 +00:00
|
|
|
static const int kElems = kSimd128Size / sizeof(T);
|
|
|
|
for (int i = 0; i < kElems; i++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
WriteLittleEndianValue<T>(&src0[i], i);
|
|
|
|
WriteLittleEndianValue<T>(&src1[i], kElems + i);
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
2017-06-13 23:40:51 +00:00
|
|
|
if (simd_op == kExprS8x16Shuffle) {
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_S8x16_SHUFFLE_OP(simd_op, expected,
|
|
|
|
WASM_GET_GLOBAL(0),
|
|
|
|
WASM_GET_GLOBAL(1))),
|
|
|
|
WASM_ONE);
|
|
|
|
} else {
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(simd_op, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_GET_GLOBAL(1))),
|
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
for (size_t i = 0; i < expected.size(); i++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
CHECK_EQ(ReadLittleEndianValue<T>(&src0[i]), expected[i]);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-29 19:29:16 +00:00
|
|
|
WASM_SIMD_TEST(I32x4AddHoriz) {
|
2018-07-13 18:24:42 +00:00
|
|
|
// Inputs are [0 1 2 3] and [4 5 6 7].
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
|
2018-05-18 21:47:59 +00:00
|
|
|
{{1, 5, 9, 13}});
|
2017-04-24 18:53:16 +00:00
|
|
|
}
|
|
|
|
|
2018-06-29 19:29:16 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddHoriz) {
|
2018-07-13 18:24:42 +00:00
|
|
|
// Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
|
2017-04-24 18:53:16 +00:00
|
|
|
{{1, 5, 9, 13, 17, 21, 25, 29}});
|
|
|
|
}
|
[wasm] Implement Generic S128 Load/Store, logical ops and Horizontal add
- Ops: S128Load, S128Store, S128And, S128Or, S128Xor, S128Not, I32x4AddHoriz, I16x8AddHoriz
- Add x64 assembler support for - phaddd, phaddw, pand, por
- Enable tests for Globals, other tests applicable to x64 apart from tests for implemented ops
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org, zvi.rackover@intel.com
Review-Url: https://codereview.chromium.org/2849463003
Cr-Commit-Position: refs/heads/master@{#45005}
2017-05-02 00:05:53 +00:00
|
|
|
|
2018-06-29 19:29:16 +00:00
|
|
|
WASM_SIMD_TEST(F32x4AddHoriz) {
|
2018-07-13 18:24:42 +00:00
|
|
|
// Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{1.0f, 5.0f, 9.0f, 13.0f}});
|
[wasm] Implement Generic S128 Load/Store, logical ops and Horizontal add
- Ops: S128Load, S128Store, S128And, S128Or, S128Xor, S128Not, I32x4AddHoriz, I16x8AddHoriz
- Add x64 assembler support for - phaddd, phaddw, pand, por
- Enable tests for Globals, other tests applicable to x64 apart from tests for implemented ops
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org, zvi.rackover@intel.com
Review-Url: https://codereview.chromium.org/2849463003
Cr-Commit-Position: refs/heads/master@{#45005}
2017-05-02 00:05:53 +00:00
|
|
|
}
|
2017-04-24 18:53:16 +00:00
|
|
|
|
2018-06-09 17:01:36 +00:00
|
|
|
// Test shuffle ops.
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunShuffleOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2018-06-09 17:01:36 +00:00
|
|
|
WasmOpcode simd_op,
|
2018-07-13 18:24:42 +00:00
|
|
|
const std::array<int8_t, kSimd128Size>& shuffle) {
|
2018-06-09 17:01:36 +00:00
|
|
|
// Test the original shuffle.
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
|
|
|
|
// Test a non-canonical (inputs reversed) version of the shuffle.
|
2018-07-13 18:24:42 +00:00
|
|
|
std::array<int8_t, kSimd128Size> other_shuffle(shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) other_shuffle[i] ^= kSimd128Size;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
|
2018-07-13 18:24:42 +00:00
|
|
|
other_shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
|
|
|
|
// Test the swizzle (one-operand) version of the shuffle.
|
2018-07-13 18:24:42 +00:00
|
|
|
std::array<int8_t, kSimd128Size> swizzle(shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) swizzle[i] &= (kSimd128Size - 1);
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, swizzle);
|
2018-06-09 17:01:36 +00:00
|
|
|
|
|
|
|
// Test the non-canonical swizzle (one-operand) version of the shuffle.
|
2018-07-13 18:24:42 +00:00
|
|
|
std::array<int8_t, kSimd128Size> other_swizzle(shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) other_swizzle[i] |= kSimd128Size;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
|
2018-07-13 18:24:42 +00:00
|
|
|
other_swizzle);
|
2018-06-09 17:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
#define SHUFFLE_LIST(V) \
|
|
|
|
V(S128Identity) \
|
|
|
|
V(S32x4Dup) \
|
|
|
|
V(S32x4ZipLeft) \
|
|
|
|
V(S32x4ZipRight) \
|
|
|
|
V(S32x4UnzipLeft) \
|
|
|
|
V(S32x4UnzipRight) \
|
|
|
|
V(S32x4TransposeLeft) \
|
|
|
|
V(S32x4TransposeRight) \
|
|
|
|
V(S32x2Reverse) \
|
|
|
|
V(S32x4Irregular) \
|
|
|
|
V(S16x8Dup) \
|
|
|
|
V(S16x8ZipLeft) \
|
|
|
|
V(S16x8ZipRight) \
|
|
|
|
V(S16x8UnzipLeft) \
|
|
|
|
V(S16x8UnzipRight) \
|
|
|
|
V(S16x8TransposeLeft) \
|
|
|
|
V(S16x8TransposeRight) \
|
|
|
|
V(S16x4Reverse) \
|
|
|
|
V(S16x2Reverse) \
|
|
|
|
V(S16x8Irregular) \
|
|
|
|
V(S8x16Dup) \
|
|
|
|
V(S8x16ZipLeft) \
|
|
|
|
V(S8x16ZipRight) \
|
|
|
|
V(S8x16UnzipLeft) \
|
|
|
|
V(S8x16UnzipRight) \
|
|
|
|
V(S8x16TransposeLeft) \
|
|
|
|
V(S8x16TransposeRight) \
|
|
|
|
V(S8x8Reverse) \
|
|
|
|
V(S8x4Reverse) \
|
|
|
|
V(S8x2Reverse) \
|
|
|
|
V(S8x16Irregular)
|
|
|
|
|
|
|
|
enum ShuffleKey {
|
|
|
|
#define SHUFFLE_ENUM_VALUE(Name) k##Name,
|
|
|
|
SHUFFLE_LIST(SHUFFLE_ENUM_VALUE)
|
|
|
|
#undef SHUFFLE_ENUM_VALUE
|
|
|
|
kNumShuffleKeys
|
|
|
|
};
|
|
|
|
|
|
|
|
using Shuffle = std::array<int8_t, kSimd128Size>;
|
|
|
|
using ShuffleMap = std::map<ShuffleKey, const Shuffle>;
|
|
|
|
|
|
|
|
ShuffleMap test_shuffles = {
|
|
|
|
{kS128Identity,
|
|
|
|
{{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}}},
|
|
|
|
{kS32x4Dup,
|
|
|
|
{{16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19}}},
|
|
|
|
{kS32x4ZipLeft, {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}}},
|
|
|
|
{kS32x4ZipRight,
|
|
|
|
{{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}}},
|
|
|
|
{kS32x4UnzipLeft,
|
|
|
|
{{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}}},
|
|
|
|
{kS32x4UnzipRight,
|
|
|
|
{{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}}},
|
|
|
|
{kS32x4TransposeLeft,
|
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}}},
|
|
|
|
{kS32x4TransposeRight,
|
|
|
|
{{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}}},
|
|
|
|
{kS32x2Reverse, // swizzle only
|
|
|
|
{{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}}},
|
|
|
|
{kS32x4Irregular,
|
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 16, 17, 18, 19, 20, 21, 22, 23}}},
|
|
|
|
{kS16x8Dup,
|
|
|
|
{{18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19}}},
|
|
|
|
{kS16x8ZipLeft, {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}}},
|
|
|
|
{kS16x8ZipRight,
|
|
|
|
{{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}}},
|
|
|
|
{kS16x8UnzipLeft,
|
|
|
|
{{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}}},
|
|
|
|
{kS16x8UnzipRight,
|
|
|
|
{{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}}},
|
|
|
|
{kS16x8TransposeLeft,
|
|
|
|
{{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}}},
|
|
|
|
{kS16x8TransposeRight,
|
|
|
|
{{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}}},
|
|
|
|
{kS16x4Reverse, // swizzle only
|
|
|
|
{{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}}},
|
|
|
|
{kS16x2Reverse, // swizzle only
|
|
|
|
{{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}}},
|
|
|
|
{kS16x8Irregular,
|
|
|
|
{{0, 1, 16, 17, 16, 17, 0, 1, 4, 5, 20, 21, 6, 7, 22, 23}}},
|
|
|
|
{kS8x16Dup,
|
|
|
|
{{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19}}},
|
|
|
|
{kS8x16ZipLeft, {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}}},
|
|
|
|
{kS8x16ZipRight,
|
|
|
|
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}}},
|
|
|
|
{kS8x16UnzipLeft,
|
|
|
|
{{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}}},
|
|
|
|
{kS8x16UnzipRight,
|
|
|
|
{{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}}},
|
|
|
|
{kS8x16TransposeLeft,
|
|
|
|
{{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}}},
|
|
|
|
{kS8x16TransposeRight,
|
|
|
|
{{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}}},
|
|
|
|
{kS8x8Reverse, // swizzle only
|
|
|
|
{{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}}},
|
|
|
|
{kS8x4Reverse, // swizzle only
|
|
|
|
{{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}}},
|
|
|
|
{kS8x2Reverse, // swizzle only
|
|
|
|
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}}},
|
|
|
|
{kS8x16Irregular,
|
|
|
|
{{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}}},
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SHUFFLE_TEST(Name) \
|
|
|
|
WASM_SIMD_TEST(Name) { \
|
|
|
|
ShuffleMap::const_iterator it = test_shuffles.find(k##Name); \
|
|
|
|
DCHECK_NE(it, test_shuffles.end()); \
|
2018-08-21 15:01:31 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, \
|
2018-07-13 18:24:42 +00:00
|
|
|
it->second); \
|
|
|
|
}
|
|
|
|
SHUFFLE_LIST(SHUFFLE_TEST)
|
|
|
|
#undef SHUFFLE_TEST
|
|
|
|
#undef SHUFFLE_LIST
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test shuffles that blend the two vectors (elements remain in their lanes.)
|
|
|
|
WASM_SIMD_TEST(S8x16Blend) {
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
|
|
|
for (int bias = 1; bias < kSimd128Size; bias++) {
|
|
|
|
for (int i = 0; i < bias; i++) expected[i] = i;
|
|
|
|
for (int i = bias; i < kSimd128Size; i++) expected[i] = i + kSimd128Size;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, expected);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test shuffles that concatenate the two vectors.
|
|
|
|
WASM_SIMD_TEST(S8x16Concat) {
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
|
|
|
// n is offset or bias of concatenation.
|
|
|
|
for (int n = 1; n < kSimd128Size; ++n) {
|
|
|
|
int i = 0;
|
|
|
|
// last kLanes - n bytes of first vector.
|
|
|
|
for (int j = n; j < kSimd128Size; ++j) {
|
|
|
|
expected[i++] = j;
|
|
|
|
}
|
|
|
|
// first n bytes of second vector
|
|
|
|
for (int j = 0; j < n; ++j) {
|
|
|
|
expected[i++] = j + kSimd128Size;
|
|
|
|
}
|
2018-08-21 15:01:31 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, expected);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Combine 3 shuffles a, b, and c by applying both a and b and then applying c
|
|
|
|
// to those two results.
|
|
|
|
Shuffle Combine(const Shuffle& a, const Shuffle& b, const Shuffle& c) {
|
|
|
|
Shuffle result;
|
|
|
|
for (int i = 0; i < kSimd128Size; ++i) {
|
|
|
|
result[i] = c[i] < kSimd128Size ? a[c[i]] : b[c[i] - kSimd128Size];
|
|
|
|
}
|
|
|
|
return result;
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
const Shuffle& GetRandomTestShuffle(v8::base::RandomNumberGenerator* rng) {
|
|
|
|
return test_shuffles[static_cast<ShuffleKey>(rng->NextInt(kNumShuffleKeys))];
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test shuffles that are random combinations of 3 test shuffles. Completely
|
|
|
|
// random shuffles almost always generate the slow general shuffle code, so
|
|
|
|
// don't exercise as many code paths.
|
|
|
|
WASM_SIMD_TEST(S8x16ShuffleFuzz) {
|
|
|
|
v8::base::RandomNumberGenerator* rng = CcTest::random_number_generator();
|
|
|
|
static const int kTests = 100;
|
|
|
|
for (int i = 0; i < kTests; ++i) {
|
|
|
|
auto shuffle = Combine(GetRandomTestShuffle(rng), GetRandomTestShuffle(rng),
|
|
|
|
GetRandomTestShuffle(rng));
|
2018-08-21 15:01:31 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, shuffle);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
void AppendShuffle(const Shuffle& shuffle, std::vector<byte>* buffer) {
|
|
|
|
byte opcode[] = {WASM_SIMD_OP(kExprS8x16Shuffle)};
|
|
|
|
for (size_t i = 0; i < arraysize(opcode); ++i) buffer->push_back(opcode[i]);
|
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) buffer->push_back((shuffle[i]));
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
void BuildShuffle(std::vector<Shuffle>& shuffles, std::vector<byte>* buffer) {
|
|
|
|
// Perform the leaf shuffles on globals 0 and 1.
|
|
|
|
size_t row_index = (shuffles.size() - 1) / 2;
|
|
|
|
for (size_t i = row_index; i < shuffles.size(); ++i) {
|
|
|
|
byte operands[] = {WASM_GET_GLOBAL(0), WASM_GET_GLOBAL(1)};
|
|
|
|
for (size_t j = 0; j < arraysize(operands); ++j)
|
|
|
|
buffer->push_back(operands[j]);
|
|
|
|
AppendShuffle(shuffles[i], buffer);
|
|
|
|
}
|
|
|
|
// Now perform inner shuffles in the correct order on operands on the stack.
|
|
|
|
do {
|
|
|
|
for (size_t i = row_index / 2; i < row_index; ++i) {
|
|
|
|
AppendShuffle(shuffles[i], buffer);
|
|
|
|
}
|
|
|
|
row_index /= 2;
|
|
|
|
} while (row_index != 0);
|
|
|
|
byte epilog[] = {kExprSetGlobal, static_cast<byte>(0), WASM_ONE};
|
|
|
|
for (size_t j = 0; j < arraysize(epilog); ++j) buffer->push_back(epilog[j]);
|
2017-05-09 21:04:27 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 15:01:31 +00:00
|
|
|
void RunWasmCode(ExecutionTier execution_tier, LowerSimd lower_simd,
|
2018-07-13 18:24:42 +00:00
|
|
|
const std::vector<byte>& code,
|
|
|
|
std::array<int8_t, kSimd128Size>* result) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2018-07-13 18:24:42 +00:00
|
|
|
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
|
|
|
|
int8_t* src0 = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
int8_t* src1 = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
for (int i = 0; i < kSimd128Size; ++i) {
|
2018-08-10 11:20:40 +00:00
|
|
|
WriteLittleEndianValue<int8_t>(&src0[i], i);
|
|
|
|
WriteLittleEndianValue<int8_t>(&src1[i], kSimd128Size + i);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
|
|
|
r.Build(code.data(), code.data() + code.size());
|
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
for (size_t i = 0; i < kSimd128Size; i++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
(*result)[i] = ReadLittleEndianValue<int8_t>(&src0[i]);
|
2018-05-31 16:53:09 +00:00
|
|
|
}
|
|
|
|
}
|
2017-06-13 23:40:51 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test multiple shuffles executed in sequence.
|
|
|
|
WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
|
|
|
|
v8::base::RandomNumberGenerator* rng = CcTest::random_number_generator();
|
|
|
|
static const int kShuffles = 100;
|
|
|
|
for (int i = 0; i < kShuffles; ++i) {
|
|
|
|
// Create an odd number in [3..23] of random test shuffles so we can build
|
|
|
|
// a complete binary tree (stored as a heap) of shuffle operations. The leaf
|
|
|
|
// shuffles operate on the test pattern inputs, while the interior shuffles
|
|
|
|
// operate on the results of the two child shuffles.
|
|
|
|
int num_shuffles = rng->NextInt(10) * 2 + 3;
|
|
|
|
std::vector<Shuffle> shuffles;
|
|
|
|
for (int j = 0; j < num_shuffles; ++j) {
|
|
|
|
shuffles.push_back(GetRandomTestShuffle(rng));
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
2018-07-13 18:24:42 +00:00
|
|
|
// Generate the code for the shuffle expression.
|
|
|
|
std::vector<byte> buffer;
|
|
|
|
BuildShuffle(shuffles, &buffer);
|
|
|
|
|
|
|
|
// Run the code using the interpreter to get the expected result.
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunWasmCode(ExecutionTier::kInterpreter, kNoLowerSimd, buffer, &expected);
|
2018-07-13 18:24:42 +00:00
|
|
|
// Run the SIMD or scalar lowered compiled code and compare results.
|
|
|
|
std::array<int8_t, kSimd128Size> result;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunWasmCode(execution_tier, lower_simd, buffer, &result);
|
2018-07-13 18:24:42 +00:00
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) {
|
|
|
|
CHECK_EQ(result[i], expected[i]);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-04 16:50:51 +00:00
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
|
|
|
|
// result. Use relational ops on numeric vectors to create the boolean vector
|
|
|
|
// test inputs. Test inputs with all true, all false, one true, and one false.
|
|
|
|
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
|
2018-06-29 19:29:16 +00:00
|
|
|
WASM_SIMD_TEST(ReductionTest##lanes) { \
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd); \
|
2017-03-02 19:50:33 +00:00
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte one_one = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte reduced = r.AllocateLocal(kWasmI32); \
|
|
|
|
BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL(one_one, \
|
|
|
|
WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
lanes - 1, WASM_GET_LOCAL(zero), WASM_ONE)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_ONE); \
|
|
|
|
CHECK_EQ(1, r.Call()); \
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4)
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r, WASM_IF_ELSE_I(
|
|
|
|
WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
|
|
|
|
WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
|
|
|
|
WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
|
2017-03-16 14:06:49 +00:00
|
|
|
// Choose two floating point values whose sum is normal and exactly
|
|
|
|
// representable as a float.
|
2017-12-02 00:30:37 +00:00
|
|
|
const int kOne = 0x3F800000;
|
2017-03-16 14:06:49 +00:00
|
|
|
const int kTwo = 0x40000000;
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(
|
|
|
|
WASM_F32_EQ(
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_BINOP(kExprF32x4Add,
|
2017-03-16 14:06:49 +00:00
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(kOne)),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(kTwo)))),
|
|
|
|
WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(kOne)),
|
|
|
|
WASM_F32_REINTERPRET_I32(WASM_I32V(kTwo)))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(
|
|
|
|
WASM_I32_EQ(
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_BINOP(kExprI32x4Add,
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
|
|
|
|
WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
|
|
|
|
WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4Local) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
|
|
|
|
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(31, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(76, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4For) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_LOCAL(1),
|
|
|
|
WASM_I32V(53))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_LOCAL(1),
|
|
|
|
WASM_I32V(23))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0)),
|
|
|
|
WASM_LOOP(
|
|
|
|
WASM_SET_LOCAL(
|
|
|
|
1, WASM_SIMD_BINOP(kExprI32x4Add, WASM_GET_LOCAL(1),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(1)))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(5)), WASM_BR(1))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(36)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(58)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(28)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(36)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(0));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4For) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_LOCAL(1),
|
|
|
|
WASM_F32(19.5))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0)),
|
|
|
|
WASM_LOOP(
|
|
|
|
WASM_SET_LOCAL(
|
|
|
|
1, WASM_SIMD_BINOP(kExprF32x4Add, WASM_GET_LOCAL(1),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(2.0)))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(3)), WASM_BR(1))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_F32(27.25)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_F32(25.5)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(0));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 19:54:19 +00:00
|
|
|
template <typename T, int numLanes = 4>
|
|
|
|
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
|
|
|
|
for (int lane = 0; lane < numLanes; lane++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
WriteLittleEndianValue<T>(&v[lane], arr[lane]);
|
2017-05-09 19:54:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2018-08-10 11:20:40 +00:00
|
|
|
const T GetScalar(T* v, int lane) {
|
2017-05-09 19:54:19 +00:00
|
|
|
constexpr int kElems = kSimd128Size / sizeof(T);
|
|
|
|
const int index = lane;
|
|
|
|
USE(kElems);
|
|
|
|
DCHECK(index >= 0 && index < kElems);
|
2018-08-10 11:20:40 +00:00
|
|
|
return ReadLittleEndianValue<T>(&v[index]);
|
2017-05-09 19:54:19 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-10-16 08:49:45 +00:00
|
|
|
// Pad the globals with a few unused slots to get a non-zero offset.
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
2017-08-19 16:34:11 +00:00
|
|
|
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2017-05-09 19:54:19 +00:00
|
|
|
SetVectorByLanes(global, {{0, 1, 2, 3}});
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
BUILD(
|
|
|
|
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(0),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(1),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(2),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(3),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-10-16 08:49:45 +00:00
|
|
|
// Pad the globals with a few unused slots to get a non-zero offset.
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
2017-08-19 16:34:11 +00:00
|
|
|
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2017-10-16 08:49:45 +00:00
|
|
|
BUILD(r, WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
|
|
|
|
WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(4),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(34))),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(4),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(45))),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(4),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(56))),
|
|
|
|
WASM_I32V(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-05-09 19:54:19 +00:00
|
|
|
CHECK_EQ(GetScalar(global, 0), 23);
|
|
|
|
CHECK_EQ(GetScalar(global, 1), 34);
|
|
|
|
CHECK_EQ(GetScalar(global, 2), 45);
|
|
|
|
CHECK_EQ(GetScalar(global, 3), 56);
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-08-19 16:34:11 +00:00
|
|
|
float* global = r.builder().AddGlobal<float>(kWasmS128);
|
2017-05-09 19:54:19 +00:00
|
|
|
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
BUILD(
|
|
|
|
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(0.0),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(1.5),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(2.25),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(3.5),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4SetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-08-19 16:34:11 +00:00
|
|
|
float* global = r.builder().AddGlobal<float>(kWasmS128);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_F32(45.5))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_F32(32.25))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_F32(65.0))),
|
|
|
|
WASM_I32V(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-05-09 19:54:19 +00:00
|
|
|
CHECK_EQ(GetScalar(global, 0), 13.5f);
|
|
|
|
CHECK_EQ(GetScalar(global, 1), 45.5f);
|
|
|
|
CHECK_EQ(GetScalar(global, 2), 32.25f);
|
|
|
|
CHECK_EQ(GetScalar(global, 3), 65.0f);
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
2017-03-15 23:34:53 +00:00
|
|
|
|
2018-11-15 02:06:42 +00:00
|
|
|
WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2018-04-27 20:00:12 +00:00
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
2017-10-13 20:55:17 +00:00
|
|
|
// Load memory, store it, then reload it and extract the first lane. Use a
|
|
|
|
// non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
|
|
|
|
BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(4), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(4))));
|
2017-03-15 23:34:53 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
int32_t expected = i;
|
2017-10-13 20:55:17 +00:00
|
|
|
r.builder().WriteMemory(&memory[1], expected);
|
2017-03-15 23:34:53 +00:00
|
|
|
CHECK_EQ(expected, r.Call());
|
|
|
|
}
|
|
|
|
}
|
2017-09-01 12:57:34 +00:00
|
|
|
|
2019-01-10 23:22:07 +00:00
|
|
|
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
|
|
|
|
// V8:8665 - Tracking bug to enable reduction tests in the interpreter,
|
|
|
|
// and for SIMD lowering.
|
|
|
|
// TODO(gdeepti): Enable these tests for ARM/ARM64
|
|
|
|
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max) \
|
2019-03-25 20:44:32 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S##format##AnyTrue) { \
|
2019-01-10 23:22:07 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD( \
|
|
|
|
r, \
|
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
|
|
|
|
WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, WASM_GET_LOCAL(simd))); \
|
|
|
|
DCHECK_EQ(1, r.Call(max)); \
|
|
|
|
DCHECK_EQ(1, r.Call(5)); \
|
|
|
|
DCHECK_EQ(0, r.Call(0)); \
|
|
|
|
}
|
2019-02-13 00:33:17 +00:00
|
|
|
WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff)
|
|
|
|
WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff)
|
|
|
|
WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff)
|
2019-01-10 23:22:07 +00:00
|
|
|
|
|
|
|
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max) \
|
2019-03-25 20:44:32 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S##format##AllTrue) { \
|
2019-01-10 23:22:07 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD( \
|
|
|
|
r, \
|
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
|
|
|
|
WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, WASM_GET_LOCAL(simd))); \
|
|
|
|
DCHECK_EQ(1, r.Call(max)); \
|
|
|
|
DCHECK_EQ(0, r.Call(0)); \
|
|
|
|
}
|
2019-02-13 00:33:17 +00:00
|
|
|
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff)
|
|
|
|
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff)
|
|
|
|
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff)
|
2019-01-10 23:22:07 +00:00
|
|
|
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
|
|
|
|
|
2019-03-25 20:44:32 +00:00
|
|
|
WASM_SIMD_TEST(BitSelect) {
|
2019-01-12 01:02:41 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(
|
|
|
|
simd,
|
|
|
|
WASM_SIMD_SELECT(32x4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(0x01020304)),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0)))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(simd)));
|
|
|
|
DCHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
|
|
|
|
}
|
|
|
|
|
2019-03-29 00:20:24 +00:00
|
|
|
void RunI8x16MixedRelationalOpTest(ExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode opcode,
|
|
|
|
Int8BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp3 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_LOCAL(temp3, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
|
|
|
WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_GET_LOCAL(temp3)));
|
|
|
|
|
|
|
|
DCHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
|
|
|
|
r.Call(0xff, 0x7fff));
|
|
|
|
DCHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
|
|
|
|
r.Call(0xfe, 0x7fff));
|
|
|
|
DCHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
|
|
|
|
r.Call(0xff, 0x7ffe));
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I8x16LeUMixed) {
|
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LeU,
|
|
|
|
UnsignedLessEqual);
|
|
|
|
}
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I8x16LtUMixed) {
|
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LtU,
|
|
|
|
UnsignedLess);
|
|
|
|
}
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I8x16GeUMixed) {
|
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GeU,
|
|
|
|
UnsignedGreaterEqual);
|
|
|
|
}
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I8x16GtUMixed) {
|
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GtU,
|
|
|
|
UnsignedGreater);
|
|
|
|
}
|
|
|
|
|
|
|
|
void RunI16x8MixedRelationalOpTest(ExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode opcode,
|
|
|
|
Int16BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp3 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value1))),
|
|
|
|
WASM_SET_LOCAL(temp2, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value2))),
|
|
|
|
WASM_SET_LOCAL(temp3, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
|
|
|
|
WASM_GET_LOCAL(temp2))),
|
|
|
|
WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_GET_LOCAL(temp3)));
|
|
|
|
|
|
|
|
DCHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
|
|
|
|
r.Call(0xffff, 0x7fffffff));
|
|
|
|
DCHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
|
|
|
|
r.Call(0xfeff, 0x7fffffff));
|
|
|
|
DCHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
|
|
|
|
r.Call(0xffff, 0x7ffffeff));
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8LeUMixed) {
|
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LeU,
|
|
|
|
UnsignedLessEqual);
|
|
|
|
}
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8LtUMixed) {
|
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LtU,
|
|
|
|
UnsignedLess);
|
|
|
|
}
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8GeUMixed) {
|
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GeU,
|
|
|
|
UnsignedGreaterEqual);
|
|
|
|
}
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
|
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GtU,
|
|
|
|
UnsignedGreater);
|
|
|
|
}
|
|
|
|
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_TEST
|
|
|
|
#undef WASM_SIMD_CHECK_LANE
|
|
|
|
#undef TO_BYTE
|
|
|
|
#undef WASM_SIMD_OP
|
|
|
|
#undef WASM_SIMD_SPLAT
|
|
|
|
#undef WASM_SIMD_UNOP
|
|
|
|
#undef WASM_SIMD_BINOP
|
|
|
|
#undef WASM_SIMD_SHIFT_OP
|
|
|
|
#undef WASM_SIMD_CONCAT_OP
|
|
|
|
#undef WASM_SIMD_SELECT
|
|
|
|
#undef WASM_SIMD_F32x4_SPLAT
|
|
|
|
#undef WASM_SIMD_F32x4_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_F32x4_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I32x4_SPLAT
|
|
|
|
#undef WASM_SIMD_I32x4_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I32x4_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I16x8_SPLAT
|
|
|
|
#undef WASM_SIMD_I16x8_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I16x8_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I8x16_SPLAT
|
|
|
|
#undef WASM_SIMD_I8x16_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I8x16_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_S8x16_SHUFFLE_OP
|
|
|
|
#undef WASM_SIMD_LOAD_MEM
|
|
|
|
#undef WASM_SIMD_STORE_MEM
|
|
|
|
#undef WASM_SIMD_SELECT_TEST
|
|
|
|
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
|
2018-07-13 18:24:42 +00:00
|
|
|
#undef WASM_SIMD_COMPILED_TEST
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_BOOL_REDUCTION_TEST
|
2019-03-25 20:44:32 +00:00
|
|
|
#undef WASM_SIMD_TEST_NO_LOWERING
|
2019-01-10 23:22:07 +00:00
|
|
|
#undef WASM_SIMD_ANYTRUE_TEST
|
|
|
|
#undef WASM_SIMD_ALLTRUE_TEST
|
2017-09-08 13:59:05 +00:00
|
|
|
|
2017-09-21 03:29:52 +00:00
|
|
|
} // namespace test_run_wasm_simd
|
2017-09-01 12:57:34 +00:00
|
|
|
} // namespace wasm
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|