2016-08-23 19:59:19 +00:00
|
|
|
// Copyright 2016 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2017-02-23 11:46:29 +00:00
|
|
|
#include "src/assembler-inl.h"
|
2016-08-23 19:59:19 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/compiler/value-helper.h"
|
|
|
|
#include "test/cctest/wasm/wasm-run-utils.h"
|
2017-04-25 11:29:17 +00:00
|
|
|
#include "test/common/wasm/wasm-macro-gen.h"
|
2016-08-23 19:59:19 +00:00
|
|
|
|
2017-09-01 12:57:34 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace wasm {
|
2017-09-21 03:29:52 +00:00
|
|
|
namespace test_run_wasm_simd {
|
2016-08-23 19:59:19 +00:00
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2016-12-20 16:49:53 +00:00
|
|
|
typedef float (*FloatUnOp)(float);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
typedef float (*FloatBinOp)(float, float);
|
2017-03-08 00:01:36 +00:00
|
|
|
typedef int (*FloatCompareOp)(float, float);
|
2017-01-26 02:18:00 +00:00
|
|
|
typedef int32_t (*Int32UnOp)(int32_t);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
typedef int32_t (*Int32BinOp)(int32_t, int32_t);
|
2017-03-08 00:01:36 +00:00
|
|
|
typedef int (*Int32CompareOp)(int32_t, int32_t);
|
2017-02-07 17:33:37 +00:00
|
|
|
typedef int32_t (*Int32ShiftOp)(int32_t, int);
|
2017-01-26 02:18:00 +00:00
|
|
|
typedef int16_t (*Int16UnOp)(int16_t);
|
|
|
|
typedef int16_t (*Int16BinOp)(int16_t, int16_t);
|
2017-03-08 00:01:36 +00:00
|
|
|
typedef int (*Int16CompareOp)(int16_t, int16_t);
|
2017-02-07 17:33:37 +00:00
|
|
|
typedef int16_t (*Int16ShiftOp)(int16_t, int);
|
2017-01-26 02:18:00 +00:00
|
|
|
typedef int8_t (*Int8UnOp)(int8_t);
|
|
|
|
typedef int8_t (*Int8BinOp)(int8_t, int8_t);
|
2017-03-08 00:01:36 +00:00
|
|
|
typedef int (*Int8CompareOp)(int8_t, int8_t);
|
2017-02-07 17:33:37 +00:00
|
|
|
typedef int8_t (*Int8ShiftOp)(int8_t, int);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
#define WASM_SIMD_TEST(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
WasmExecutionMode execution_mode); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, kExecuteTurbofan); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_interpreter) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, kExecuteInterpreter); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_simd_lowered) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kLowerSimd, kExecuteTurbofan); \
|
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
WasmExecutionMode execution_mode)
|
|
|
|
|
|
|
|
#define WASM_SIMD_COMPILED_AND_LOWERED_TEST(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
WasmExecutionMode execution_mode); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, kExecuteTurbofan); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_simd_lowered) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kLowerSimd, kExecuteTurbofan); \
|
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
WasmExecutionMode execution_mode)
|
|
|
|
|
|
|
|
#define WASM_SIMD_COMPILED_TEST(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
WasmExecutionMode execution_mode); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, kExecuteTurbofan); \
|
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
WasmExecutionMode execution_mode)
|
2017-05-31 13:31:52 +00:00
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
// Generic expected value functions.
|
2016-12-20 16:49:53 +00:00
|
|
|
template <typename T>
|
|
|
|
T Negate(T a) {
|
|
|
|
return -a;
|
|
|
|
}
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
template <typename T>
|
|
|
|
T Add(T a, T b) {
|
|
|
|
return a + b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Sub(T a, T b) {
|
|
|
|
return a - b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-01-26 02:18:00 +00:00
|
|
|
T Mul(T a, T b) {
|
|
|
|
return a * b;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
T Minimum(T a, T b) {
|
|
|
|
return a <= b ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Maximum(T a, T b) {
|
|
|
|
return a >= b ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedMinimum(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedMaximum(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
|
|
|
|
}
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int Equal(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a == b ? -1 : 0;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int NotEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a != b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int Less(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a < b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int LessEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a <= b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int Greater(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a > b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int GreaterEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a >= b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int UnsignedLess(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) < static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int UnsignedLessEqual(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int UnsignedGreater(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) > static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2017-03-08 00:01:36 +00:00
|
|
|
int UnsignedGreaterEqual(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
T LogicalShiftLeft(T a, int shift) {
|
|
|
|
return a << shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T LogicalShiftRight(T a, int shift) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) >> shift;
|
|
|
|
}
|
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
template <typename T>
|
|
|
|
T Clamp(int64_t value) {
|
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
|
|
|
int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
|
|
|
|
int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
|
|
|
|
int64_t clamped = std::max(min, std::min(max, value));
|
|
|
|
return static_cast<T>(clamped);
|
|
|
|
}
|
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
int64_t Widen(T value) {
|
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
|
|
|
return static_cast<int64_t>(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
int64_t UnsignedWiden(T value) {
|
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<int64_t>(static_cast<UnsignedT>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
T Narrow(int64_t value) {
|
|
|
|
return Clamp<T>(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedNarrow(int64_t value) {
|
2017-02-07 17:33:37 +00:00
|
|
|
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-12-02 00:30:37 +00:00
|
|
|
return static_cast<T>(Clamp<UnsignedT>(value & 0xFFFFFFFFu));
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T AddSaturate(T a, T b) {
|
|
|
|
return Clamp<T>(Widen(a) + Widen(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T SubSaturate(T a, T b) {
|
|
|
|
return Clamp<T>(Widen(a) - Widen(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedAddSaturate(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return Clamp<UnsignedT>(UnsignedWiden(a) + UnsignedWiden(b));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedSubSaturate(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return Clamp<UnsignedT>(UnsignedWiden(a) - UnsignedWiden(b));
|
|
|
|
}
|
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
template <typename T>
|
|
|
|
T And(T a, T b) {
|
|
|
|
return a & b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Or(T a, T b) {
|
|
|
|
return a | b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Xor(T a, T b) {
|
|
|
|
return a ^ b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Not(T a) {
|
|
|
|
return ~a;
|
|
|
|
}
|
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
template <typename T>
|
|
|
|
T LogicalNot(T a) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a == 0 ? -1 : 0;
|
2017-03-02 19:50:33 +00:00
|
|
|
}
|
|
|
|
|
2017-03-01 11:13:00 +00:00
|
|
|
template <typename T>
|
|
|
|
T Sqrt(T a) {
|
|
|
|
return std::sqrt(a);
|
|
|
|
}
|
|
|
|
|
2017-03-08 00:01:36 +00:00
|
|
|
template <typename T>
|
|
|
|
T Recip(T a) {
|
|
|
|
return 1.0f / a;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T RecipSqrt(T a) {
|
|
|
|
return 1.0f / std::sqrt(a);
|
|
|
|
}
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
} // namespace
|
|
|
|
|
2016-12-06 01:12:15 +00:00
|
|
|
#define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
|
|
|
|
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
|
|
|
|
WASM_SIMD_##TYPE##_EXTRACT_LANE( \
|
|
|
|
lane_index, WASM_GET_LOCAL(value))), \
|
|
|
|
WASM_RETURN1(WASM_ZERO))
|
|
|
|
|
|
|
|
#define WASM_SIMD_CHECK4(TYPE, value, LANE_TYPE, lv0, lv1, lv2, lv3) \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv0, 0) \
|
|
|
|
, WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv1, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv2, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv3, 3)
|
|
|
|
|
|
|
|
#define WASM_SIMD_CHECK_SPLAT4(TYPE, value, LANE_TYPE, lv) \
|
|
|
|
WASM_SIMD_CHECK4(TYPE, value, LANE_TYPE, lv, lv, lv, lv)
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
#define WASM_SIMD_CHECK8(TYPE, value, LANE_TYPE, lv0, lv1, lv2, lv3, lv4, lv5, \
|
|
|
|
lv6, lv7) \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv0, 0) \
|
|
|
|
, WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv1, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv2, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv3, 3), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv4, 4), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv5, 5), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv6, 6), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv7, 7)
|
|
|
|
|
|
|
|
#define WASM_SIMD_CHECK_SPLAT8(TYPE, value, LANE_TYPE, lv) \
|
|
|
|
WASM_SIMD_CHECK8(TYPE, value, LANE_TYPE, lv, lv, lv, lv, lv, lv, lv, lv)
|
|
|
|
|
|
|
|
#define WASM_SIMD_CHECK16(TYPE, value, LANE_TYPE, lv0, lv1, lv2, lv3, lv4, \
|
|
|
|
lv5, lv6, lv7, lv8, lv9, lv10, lv11, lv12, lv13, \
|
|
|
|
lv14, lv15) \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv0, 0) \
|
|
|
|
, WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv1, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv2, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv3, 3), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv4, 4), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv5, 5), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv6, 6), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv7, 7), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv8, 8), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv9, 9), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv10, 10), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv11, 11), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv12, 12), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv13, 13), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv14, 14), \
|
|
|
|
WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lv15, 15)
|
|
|
|
|
|
|
|
#define WASM_SIMD_CHECK_SPLAT16(TYPE, value, LANE_TYPE, lv) \
|
|
|
|
WASM_SIMD_CHECK16(TYPE, value, LANE_TYPE, lv, lv, lv, lv, lv, lv, lv, lv, \
|
|
|
|
lv, lv, lv, lv, lv, lv, lv, lv)
|
|
|
|
|
2017-03-08 00:01:36 +00:00
|
|
|
#define WASM_SIMD_CHECK_F32_LANE(value, lane_value, lane_index) \
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_GET_LOCAL(lane_value), \
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(lane_index, \
|
|
|
|
WASM_GET_LOCAL(value))), \
|
|
|
|
WASM_RETURN1(WASM_ZERO))
|
|
|
|
|
2017-03-01 23:31:47 +00:00
|
|
|
#define WASM_SIMD_CHECK_F32x4(value, lv0, lv1, lv2, lv3) \
|
|
|
|
WASM_SIMD_CHECK_F32_LANE(value, lv0, 0) \
|
|
|
|
, WASM_SIMD_CHECK_F32_LANE(value, lv1, 1), \
|
|
|
|
WASM_SIMD_CHECK_F32_LANE(value, lv2, 2), \
|
|
|
|
WASM_SIMD_CHECK_F32_LANE(value, lv3, 3)
|
2017-01-10 09:34:32 +00:00
|
|
|
|
2017-03-01 23:31:47 +00:00
|
|
|
#define WASM_SIMD_CHECK_SPLAT_F32x4(value, lv) \
|
|
|
|
WASM_SIMD_CHECK_F32x4(value, lv, lv, lv, lv)
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2017-03-08 00:01:36 +00:00
|
|
|
#define WASM_SIMD_CHECK_F32_LANE_ESTIMATE(value, low, high, lane_index) \
|
|
|
|
WASM_IF(WASM_F32_GT(WASM_GET_LOCAL(low), \
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(lane_index, \
|
|
|
|
WASM_GET_LOCAL(value))), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)) \
|
|
|
|
, WASM_IF(WASM_F32_LT(WASM_GET_LOCAL(high), \
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(lane_index, \
|
|
|
|
WASM_GET_LOCAL(value))), \
|
|
|
|
WASM_RETURN1(WASM_ZERO))
|
|
|
|
|
|
|
|
#define WASM_SIMD_CHECK_SPLAT_F32x4_ESTIMATE(value, low, high) \
|
|
|
|
WASM_SIMD_CHECK_F32_LANE_ESTIMATE(value, low, high, 0) \
|
|
|
|
, WASM_SIMD_CHECK_F32_LANE_ESTIMATE(value, low, high, 1), \
|
|
|
|
WASM_SIMD_CHECK_F32_LANE_ESTIMATE(value, low, high, 2), \
|
|
|
|
WASM_SIMD_CHECK_F32_LANE_ESTIMATE(value, low, high, 3)
|
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
#define TO_BYTE(val) static_cast<byte>(val)
|
|
|
|
#define WASM_SIMD_OP(op) kSimdPrefix, TO_BYTE(op)
|
2017-02-21 16:45:30 +00:00
|
|
|
#define WASM_SIMD_SPLAT(Type, x) x, WASM_SIMD_OP(kExpr##Type##Splat)
|
2017-02-13 20:24:43 +00:00
|
|
|
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
|
|
|
|
#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
|
|
|
|
#define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
|
2017-06-08 20:54:32 +00:00
|
|
|
#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
|
2017-02-27 23:45:03 +00:00
|
|
|
#define WASM_SIMD_F32x4_SPLAT(x) x, WASM_SIMD_OP(kExprF32x4Splat)
|
|
|
|
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprF32x4ReplaceLane), TO_BYTE(lane)
|
|
|
|
|
|
|
|
#define WASM_SIMD_I32x4_SPLAT(x) x, WASM_SIMD_OP(kExprI32x4Splat)
|
|
|
|
#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprI32x4ReplaceLane), TO_BYTE(lane)
|
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
#define WASM_SIMD_I16x8_SPLAT(x) x, WASM_SIMD_OP(kExprI16x8Splat)
|
|
|
|
#define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprI16x8ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2017-02-13 20:24:43 +00:00
|
|
|
#define WASM_SIMD_I8x16_SPLAT(x) x, WASM_SIMD_OP(kExprI8x16Splat)
|
|
|
|
#define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
|
|
|
|
x, WASM_SIMD_OP(kExprI8x16ExtractLane), TO_BYTE(lane)
|
|
|
|
#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
|
|
|
|
|
2017-05-04 16:50:51 +00:00
|
|
|
#define WASM_SIMD_S8x16_SHUFFLE_OP(opcode, m, x, y) \
|
|
|
|
x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
|
|
|
|
TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
|
|
|
|
TO_BYTE(m[7]), TO_BYTE(m[8]), TO_BYTE(m[9]), TO_BYTE(m[10]), \
|
|
|
|
TO_BYTE(m[11]), TO_BYTE(m[12]), TO_BYTE(m[13]), TO_BYTE(m[14]), \
|
|
|
|
TO_BYTE(m[15])
|
|
|
|
|
2017-06-19 19:23:11 +00:00
|
|
|
#define WASM_SIMD_LOAD_MEM(index) \
|
|
|
|
index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, ZERO_OFFSET
|
|
|
|
#define WASM_SIMD_STORE_MEM(index, val) \
|
|
|
|
index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, ZERO_OFFSET
|
|
|
|
|
2017-03-22 19:18:47 +00:00
|
|
|
// Skip FP tests involving extremely large or extremely small values, which
|
|
|
|
// may fail due to non-IEEE-754 SIMD arithmetic on some platforms.
|
|
|
|
bool SkipFPValue(float x) {
|
2017-03-01 23:31:47 +00:00
|
|
|
float abs_x = std::fabs(x);
|
2017-03-09 00:07:03 +00:00
|
|
|
const float kSmallFloatThreshold = 1.0e-32f;
|
|
|
|
const float kLargeFloatThreshold = 1.0e32f;
|
|
|
|
return abs_x != 0.0f && // 0 or -0 are fine.
|
|
|
|
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
|
2017-03-01 23:31:47 +00:00
|
|
|
}
|
|
|
|
|
2017-06-09 12:21:52 +00:00
|
|
|
// Skip tests where the expected value is a NaN, since our wasm test code
|
2017-03-22 19:18:47 +00:00
|
|
|
// doesn't handle NaNs. Also skip extreme values.
|
|
|
|
bool SkipFPExpectedValue(float x) { return std::isnan(x) || SkipFPValue(x); }
|
2017-03-09 00:07:03 +00:00
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Splat) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, float> r(execution_mode, lower_simd);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
byte lane_val = 0;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(lane_val))),
|
2017-03-01 23:31:47 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT_F32x4(simd, lane_val), WASM_RETURN1(WASM_ONE));
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2017-03-01 23:31:47 +00:00
|
|
|
FOR_FLOAT32_INPUTS(i) {
|
2017-03-09 00:07:03 +00:00
|
|
|
if (SkipFPExpectedValue(*i)) continue;
|
2017-03-01 23:31:47 +00:00
|
|
|
CHECK_EQ(1, r.Call(*i));
|
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(F32x4ReplaceLane) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, float, float> r(execution_mode, lower_simd);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
byte old_val = 0;
|
|
|
|
byte new_val = 1;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(old_val))),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_F32x4_REPLACE_LANE(0, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
2017-03-01 23:31:47 +00:00
|
|
|
WASM_SIMD_CHECK_F32x4(simd, new_val, old_val, old_val, old_val),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
2017-03-01 23:31:47 +00:00
|
|
|
WASM_SIMD_CHECK_F32x4(simd, new_val, new_val, old_val, old_val),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_F32x4_REPLACE_LANE(2, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
2017-03-01 23:31:47 +00:00
|
|
|
WASM_SIMD_CHECK_F32x4(simd, new_val, new_val, new_val, old_val),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
2017-03-01 23:31:47 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT_F32x4(simd, new_val), WASM_RETURN1(WASM_ONE));
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2017-02-27 23:45:03 +00:00
|
|
|
CHECK_EQ(1, r.Call(3.14159f, -1.5f));
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2017-12-05 10:50:32 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-03-27 05:05:44 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
// Tests both signed and unsigned conversion.
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(F32x4ConvertI32x4) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, float, float> r(execution_mode, lower_simd);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected_signed = 1;
|
|
|
|
byte expected_unsigned = 2;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd2 = r.AllocateLocal(kWasmS128);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT_F32x4(simd1, expected_signed),
|
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT_F32x4(simd2, expected_unsigned),
|
|
|
|
WASM_RETURN1(WASM_ONE));
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
CHECK_EQ(1, r.Call(*i, static_cast<float>(*i),
|
|
|
|
static_cast<float>(static_cast<uint32_t>(*i))));
|
|
|
|
}
|
|
|
|
}
|
[wasm] Implement wasm SIMD F32x4 Ops
This patch implements the following F32x4 Ops:
F32x4Splat, F32x4ExtractLane, F32x4ReplaceLane
F32x4RecipApprox, F32x4RecipSqrtApprox
F32x4Add, F32x4Sub, F32x4Mul, F32x4Min, F32x4Max,
F32x4Eq, F32x4Ne, F32x4Gt, F32x4Ge
BUG=V8:6020
Change-Id: I8267734d336f4bae6fed008d7b1f5faa428574df
Reviewed-on: https://chromium-review.googlesource.com/816734
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50014}
2017-12-11 19:55:56 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-03-27 05:05:44 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunF32x4UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, FloatUnOp expected_op,
|
|
|
|
float error = 0.0f) {
|
|
|
|
WasmRunner<int32_t, float, float, float> r(execution_mode, lower_simd);
|
2016-12-20 16:49:53 +00:00
|
|
|
byte a = 0;
|
2017-03-08 00:01:36 +00:00
|
|
|
byte low = 1;
|
|
|
|
byte high = 2;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
|
2017-03-08 00:01:36 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT_F32x4_ESTIMATE(simd, low, high),
|
|
|
|
WASM_RETURN1(WASM_ONE));
|
2016-12-20 16:49:53 +00:00
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(i) {
|
2017-03-22 19:18:47 +00:00
|
|
|
if (SkipFPValue(*i)) continue;
|
2017-03-01 23:31:47 +00:00
|
|
|
float expected = expected_op(*i);
|
2017-03-09 00:07:03 +00:00
|
|
|
if (SkipFPExpectedValue(expected)) continue;
|
2017-03-08 00:01:36 +00:00
|
|
|
float abs_error = std::abs(expected) * error;
|
|
|
|
CHECK_EQ(1, r.Call(*i, expected - abs_error, expected + abs_error));
|
2016-12-20 16:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Abs) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4Abs, std::abs);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Neg) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4Neg, Negate);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-01 11:13:00 +00:00
|
|
|
|
2017-03-08 00:01:36 +00:00
|
|
|
static const float kApproxError = 0.01f;
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4RecipApprox) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4RecipApprox, Recip,
|
|
|
|
kApproxError);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4RecipSqrtApprox,
|
|
|
|
RecipSqrt, kApproxError);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunF32x4BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, FloatBinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, float, float, float> r(execution_mode, lower_simd);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(b))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-03-01 23:31:47 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT_F32x4(simd1, expected), WASM_RETURN1(WASM_ONE));
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(i) {
|
2017-03-22 19:18:47 +00:00
|
|
|
if (SkipFPValue(*i)) continue;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
FOR_FLOAT32_INPUTS(j) {
|
2017-03-22 19:18:47 +00:00
|
|
|
if (SkipFPValue(*j)) continue;
|
2016-12-21 15:31:42 +00:00
|
|
|
float expected = expected_op(*i, *j);
|
2017-03-09 00:07:03 +00:00
|
|
|
if (SkipFPExpectedValue(expected)) continue;
|
2016-12-21 15:31:42 +00:00
|
|
|
CHECK_EQ(1, r.Call(*i, *j, expected));
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Add) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Add, Add);
|
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Sub) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Sub, Sub);
|
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Mul) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Mul, Mul);
|
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4_Min) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Min, JSMin);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4_Max) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Max, JSMax);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunF32x4CompareOpTest(WasmExecutionMode execution_mode,
|
|
|
|
LowerSimd lower_simd, WasmOpcode simd_op,
|
2017-10-05 20:22:49 +00:00
|
|
|
FloatCompareOp expected_op) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, float, float, int32_t> r(execution_mode, lower_simd);
|
2016-12-20 16:49:53 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(b))),
|
2017-06-20 22:04:57 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
|
2016-12-20 16:49:53 +00:00
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(i) {
|
2017-03-22 19:18:47 +00:00
|
|
|
if (SkipFPValue(*i)) continue;
|
2016-12-20 16:49:53 +00:00
|
|
|
FOR_FLOAT32_INPUTS(j) {
|
2017-03-22 19:18:47 +00:00
|
|
|
if (SkipFPValue(*j)) continue;
|
2017-03-07 17:52:08 +00:00
|
|
|
float diff = *i - *j;
|
2017-03-09 00:07:03 +00:00
|
|
|
if (SkipFPExpectedValue(diff)) continue;
|
2016-12-20 16:49:53 +00:00
|
|
|
CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Eq) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ne) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-03 02:31:44 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Gt) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Gt, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ge) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Ge, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Lt) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Lt, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Le) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Le, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Splat) {
|
2016-08-23 19:59:19 +00:00
|
|
|
// Store SIMD value in a local variable, use extract lane to check lane values
|
|
|
|
// This test is not a test for ExtractLane as Splat does not create
|
|
|
|
// interesting SIMD values.
|
|
|
|
//
|
|
|
|
// SetLocal(1, I32x4Splat(Local(0)));
|
|
|
|
// For each lane index
|
|
|
|
// if(Local(0) != I32x4ExtractLane(Local(1), index)
|
|
|
|
// return 0
|
|
|
|
//
|
|
|
|
// return 1
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2016-12-06 01:12:15 +00:00
|
|
|
byte lane_val = 0;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(lane_val))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, lane_val), WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ReplaceLane) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2016-12-06 01:12:15 +00:00
|
|
|
byte old_val = 0;
|
|
|
|
byte new_val = 1;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(old_val))),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I32x4_REPLACE_LANE(0, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK4(I32x4, simd, I32, new_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK4(I32x4, simd, I32, new_val, new_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK4(I32x4, simd, I32, new_val, new_val, new_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, new_val), WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call(1, 2));
|
|
|
|
}
|
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Splat) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte lane_val = 0;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(lane_val))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, lane_val), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ReplaceLane) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte old_val = 0;
|
|
|
|
byte new_val = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(old_val))),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(0, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(1, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, new_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(2, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, new_val, new_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(3, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(4, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(5, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(6, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I16x8_REPLACE_LANE(7, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, new_val), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call(1, 2));
|
|
|
|
}
|
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Splat) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte lane_val = 0;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(lane_val))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT8(I8x16, simd, I32, lane_val), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ReplaceLane) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte old_val = 0;
|
|
|
|
byte new_val = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(old_val))),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(0, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(1, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(2, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(3, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(4, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, old_val, old_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(5, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, old_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(6, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, old_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(7, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, old_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(8, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, old_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(9, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, new_val,
|
|
|
|
old_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(10, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, old_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(11, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, old_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(12, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, old_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(13, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, old_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(14, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd, I32, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, new_val,
|
|
|
|
new_val, new_val, new_val, new_val, new_val, old_val),
|
|
|
|
WASM_SET_LOCAL(simd,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(15, WASM_GET_LOCAL(simd),
|
|
|
|
WASM_GET_LOCAL(new_val))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, new_val), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call(1, 2));
|
|
|
|
}
|
|
|
|
|
2017-10-05 20:22:49 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
|
|
|
V8_TARGET_ARCH_MIPS64
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
|
|
|
int32_t ConvertToInt(double val, bool unsigned_integer) {
|
2018-04-17 00:59:05 +00:00
|
|
|
if (std::isnan(val)) return 0;
|
|
|
|
if (unsigned_integer) {
|
|
|
|
if (val < 0) return 0;
|
|
|
|
if (val > kMaxUInt32) return kMaxUInt32;
|
|
|
|
return static_cast<uint32_t>(val);
|
|
|
|
} else {
|
|
|
|
if (val < kMinInt) return kMinInt;
|
|
|
|
if (val > kMaxInt) return kMaxInt;
|
|
|
|
return static_cast<int>(val);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests both signed and unsigned conversion.
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ConvertF32x4) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, float, int32_t, int32_t> r(execution_mode, lower_simd);
|
2016-12-06 01:12:15 +00:00
|
|
|
byte a = 0;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
byte expected_signed = 1;
|
|
|
|
byte expected_unsigned = 2;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd2 = r.AllocateLocal(kWasmS128);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected_signed),
|
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd2, I32, expected_unsigned), WASM_ONE);
|
2016-08-23 19:59:19 +00:00
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
FOR_FLOAT32_INPUTS(i) {
|
2017-03-22 19:18:47 +00:00
|
|
|
if (SkipFPValue(*i)) continue;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
int32_t signed_value = ConvertToInt(*i, false);
|
|
|
|
int32_t unsigned_value = ConvertToInt(*i, true);
|
|
|
|
CHECK_EQ(1, r.Call(*i, signed_value, unsigned_value));
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-25 02:21:36 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
|
|
|
// V8_TARGET_ARCH_MIPS64
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-06-25 02:21:36 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I16x8 (unpacking).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ConvertI16x8) {
|
2018-06-07 01:51:58 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_mode,
|
|
|
|
lower_simd);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte unpacked_signed = 1;
|
|
|
|
byte unpacked_unsigned = 2;
|
2018-06-07 01:51:58 +00:00
|
|
|
byte zero_value = 3;
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd2 = r.AllocateLocal(kWasmS128);
|
2018-06-07 01:51:58 +00:00
|
|
|
byte simd3 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd4 = r.AllocateLocal(kWasmS128);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
|
2018-06-07 01:51:58 +00:00
|
|
|
WASM_SET_LOCAL(
|
|
|
|
simd0, WASM_SIMD_I16x8_REPLACE_LANE(0, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(zero_value))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, unpacked_signed),
|
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
2018-06-07 01:51:58 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd2, I32, unpacked_unsigned),
|
|
|
|
WASM_SET_LOCAL(simd3, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK4(I32x4, simd3, I32, zero_value, unpacked_signed,
|
|
|
|
unpacked_signed, unpacked_signed),
|
|
|
|
WASM_SET_LOCAL(simd4, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK4(I32x4, simd4, I32, zero_value, unpacked_unsigned,
|
|
|
|
unpacked_unsigned, unpacked_unsigned),
|
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) {
|
2017-06-15 17:49:46 +00:00
|
|
|
int32_t unpacked_signed = static_cast<int32_t>(Widen<int16_t>(*i));
|
|
|
|
int32_t unpacked_unsigned =
|
|
|
|
static_cast<int32_t>(UnsignedWiden<int16_t>(*i));
|
2018-06-07 01:51:58 +00:00
|
|
|
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned, 0));
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-06-25 02:21:36 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI32x4UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int32UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, expected), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
|
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Neg) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4UnOpTest(execution_mode, lower_simd, kExprI32x4Neg, Negate);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-13 20:24:43 +00:00
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(S128Not) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4UnOpTest(execution_mode, lower_simd, kExprS128Not, Not);
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI32x4BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int32BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2016-12-06 01:12:15 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
2016-12-21 13:43:00 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(b))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
FOR_INT32_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
2016-08-23 19:59:19 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Add) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4Add, Add);
|
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Sub) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4Sub, Sub);
|
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Mul) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4Mul, Mul);
|
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MinS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MaxS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MinU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MinU,
|
|
|
|
UnsignedMinimum);
|
2017-02-21 16:45:30 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MaxU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MaxU,
|
2018-05-23 22:21:05 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-21 16:45:30 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128And) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprS128And, And);
|
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128Or) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprS128Or, Or);
|
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128Xor) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4BinOpTest(execution_mode, lower_simd, kExprS128Xor, Xor);
|
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI32x4CompareOpTest(WasmExecutionMode execution_mode,
|
|
|
|
LowerSimd lower_simd, WasmOpcode simd_op,
|
2017-10-05 20:22:49 +00:00
|
|
|
Int32CompareOp expected_op) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-02-21 16:45:30 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(b))),
|
2017-06-20 22:04:57 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-21 16:45:30 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
FOR_INT32_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Eq) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Ne) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LtS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LeS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GtS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GeS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GeS,
|
|
|
|
GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LtU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LtU,
|
|
|
|
UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LeU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GtU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GtU,
|
|
|
|
UnsignedGreater);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GeU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int32ShiftOp expected_op,
|
|
|
|
int shift) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-02-07 17:33:37 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(
|
2017-02-13 20:24:43 +00:00
|
|
|
simd, WASM_SIMD_SHIFT_OP(simd_op, shift, WASM_GET_LOCAL(simd))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, expected), WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
|
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Shl) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4Shl,
|
|
|
|
LogicalShiftLeft, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ShrS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrS,
|
|
|
|
ArithmeticShiftRight, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ShrU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrU,
|
|
|
|
LogicalShiftRight, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
2017-03-03 02:04:07 +00:00
|
|
|
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-06-25 02:21:36 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I8x16 (unpacking).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ConvertI8x16) {
|
2018-06-07 01:51:58 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_mode,
|
|
|
|
lower_simd);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte unpacked_signed = 1;
|
|
|
|
byte unpacked_unsigned = 2;
|
2018-06-07 01:51:58 +00:00
|
|
|
byte zero_value = 3;
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd2 = r.AllocateLocal(kWasmS128);
|
2018-06-07 01:51:58 +00:00
|
|
|
byte simd3 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd4 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(
|
|
|
|
r, WASM_SET_LOCAL(simd0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd0,
|
|
|
|
WASM_SIMD_I8x16_REPLACE_LANE(0, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(zero_value))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd1, I32, unpacked_signed),
|
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd2, I32, unpacked_unsigned),
|
|
|
|
WASM_SET_LOCAL(simd3, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd3, I32, zero_value, unpacked_signed,
|
|
|
|
unpacked_signed, unpacked_signed, unpacked_signed,
|
|
|
|
unpacked_signed, unpacked_signed, unpacked_signed),
|
|
|
|
WASM_SET_LOCAL(simd4, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
|
|
|
|
WASM_GET_LOCAL(simd0))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd4, I32, zero_value, unpacked_unsigned,
|
|
|
|
unpacked_unsigned, unpacked_unsigned, unpacked_unsigned,
|
|
|
|
unpacked_unsigned, unpacked_unsigned, unpacked_unsigned),
|
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
|
|
|
FOR_INT8_INPUTS(i) {
|
2017-06-15 17:49:46 +00:00
|
|
|
int32_t unpacked_signed = static_cast<int32_t>(Widen<int8_t>(*i));
|
|
|
|
int32_t unpacked_unsigned = static_cast<int32_t>(UnsignedWiden<int8_t>(*i));
|
2018-06-07 01:51:58 +00:00
|
|
|
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned, 0));
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-06-25 02:21:36 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI16x8UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int16UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, expected), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
|
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Neg) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8UnOpTest(execution_mode, lower_simd, kExprI16x8Neg, Negate);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-04-25 03:28:49 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I32x4 (packing).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ConvertI32x4) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t> r(
|
|
|
|
execution_mode, lower_simd);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte a = 0;
|
2018-06-20 06:03:44 +00:00
|
|
|
byte b = 1;
|
|
|
|
// indices for packed signed params
|
|
|
|
byte ps_a = 2;
|
|
|
|
byte ps_b = 3;
|
|
|
|
// indices for packed unsigned params
|
|
|
|
byte pu_a = 4;
|
|
|
|
byte pu_b = 5;
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(b))),
|
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4,
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
WASM_GET_LOCAL(simd0),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd2, I32, ps_a, ps_a, ps_a, ps_a, ps_b, ps_b,
|
|
|
|
ps_b, ps_b),
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4,
|
|
|
|
WASM_GET_LOCAL(simd0),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_SIMD_CHECK8(I16x8, simd2, I32, pu_a, pu_a, pu_a, pu_a, pu_b, pu_b,
|
|
|
|
pu_b, pu_b),
|
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2018-06-20 06:03:44 +00:00
|
|
|
FOR_INT32_INPUTS(j) {
|
|
|
|
// packed signed values
|
|
|
|
int32_t ps_a = Narrow<int16_t>(*i);
|
|
|
|
int32_t ps_b = Narrow<int16_t>(*j);
|
|
|
|
// packed unsigned values
|
|
|
|
int32_t pu_a = UnsignedNarrow<int16_t>(*i);
|
|
|
|
int32_t pu_b = UnsignedNarrow<int16_t>(*j);
|
|
|
|
// Sign-extend here, since ExtractLane sign extends.
|
|
|
|
if (pu_a & 0x8000) pu_a |= 0xFFFF0000;
|
|
|
|
if (pu_b & 0x8000) pu_b |= 0xFFFF0000;
|
|
|
|
CHECK_EQ(1, r.Call(*i, *j, ps_a, ps_b, pu_a, pu_b));
|
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-04-25 03:28:49 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI16x8BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int16BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(b))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd1, I32, expected), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) {
|
|
|
|
FOR_INT16_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Add) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8Add, Add);
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddSaturateS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8AddSaturateS,
|
|
|
|
AddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Sub) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8Sub, Sub);
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8SubSaturateS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8SubSaturateS,
|
|
|
|
SubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Mul) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8Mul, Mul);
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MinS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MaxS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddSaturateU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8AddSaturateU,
|
|
|
|
UnsignedAddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8SubSaturateU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8SubSaturateU,
|
|
|
|
UnsignedSubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MinU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MinU,
|
|
|
|
UnsignedMinimum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MaxU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MaxU,
|
|
|
|
UnsignedMaximum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI16x8CompareOpTest(WasmExecutionMode execution_mode,
|
|
|
|
LowerSimd lower_simd, WasmOpcode simd_op,
|
2017-10-05 20:22:49 +00:00
|
|
|
Int16CompareOp expected_op) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-02-21 16:45:30 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(b))),
|
2017-06-20 22:04:57 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-21 16:45:30 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd1, I32, expected), WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) {
|
|
|
|
FOR_INT16_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Eq) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Ne) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GeS,
|
|
|
|
GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GtU,
|
|
|
|
UnsignedGreater);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LtU,
|
|
|
|
UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int16ShiftOp expected_op,
|
|
|
|
int shift) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-02-07 17:33:37 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(
|
2017-02-13 20:24:43 +00:00
|
|
|
simd, WASM_SIMD_SHIFT_OP(simd_op, shift, WASM_GET_LOCAL(simd))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, expected), WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
|
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Shl) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8Shl,
|
|
|
|
LogicalShiftLeft, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ShrS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrS,
|
|
|
|
ArithmeticShiftRight, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ShrU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrU,
|
|
|
|
LogicalShiftRight, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI8x16UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int8UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, expected), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
|
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Neg) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16UnOpTest(execution_mode, lower_simd, kExprI8x16Neg, Negate);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-04-25 03:28:49 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I16x8 (packing).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ConvertI16x8) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t> r(
|
|
|
|
execution_mode, lower_simd);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte a = 0;
|
2018-06-20 06:03:44 +00:00
|
|
|
byte b = 1;
|
|
|
|
// indices for packed signed params
|
|
|
|
byte ps_a = 2;
|
|
|
|
byte ps_b = 3;
|
|
|
|
// indices for packed unsigned params
|
|
|
|
byte pu_a = 4;
|
|
|
|
byte pu_b = 5;
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd2 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(b))),
|
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8,
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
WASM_GET_LOCAL(simd0),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd2, I32, ps_a, ps_a, ps_a, ps_a, ps_a, ps_a,
|
|
|
|
ps_a, ps_a, ps_b, ps_b, ps_b, ps_b, ps_b, ps_b, ps_b,
|
|
|
|
ps_b),
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
WASM_SET_LOCAL(simd2, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8,
|
|
|
|
WASM_GET_LOCAL(simd0),
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_GET_LOCAL(simd1))),
|
|
|
|
WASM_SIMD_CHECK16(I8x16, simd2, I32, pu_a, pu_a, pu_a, pu_a, pu_a, pu_a,
|
|
|
|
pu_a, pu_a, pu_b, pu_b, pu_b, pu_b, pu_b, pu_b, pu_b,
|
|
|
|
pu_b),
|
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
|
|
|
FOR_INT16_INPUTS(i) {
|
2018-06-20 06:03:44 +00:00
|
|
|
FOR_INT16_INPUTS(j) {
|
|
|
|
// packed signed values
|
|
|
|
int32_t ps_a = Narrow<int8_t>(*i);
|
|
|
|
int32_t ps_b = Narrow<int8_t>(*j);
|
|
|
|
// packed unsigned values
|
|
|
|
int32_t pu_a = UnsignedNarrow<int8_t>(*i);
|
|
|
|
int32_t pu_b = UnsignedNarrow<int8_t>(*j);
|
|
|
|
// Sign-extend here, since ExtractLane sign extends.
|
|
|
|
if (pu_a & 0x80) pu_a |= 0xFFFFFF00;
|
|
|
|
if (pu_b & 0x80) pu_b |= 0xFFFFFF00;
|
|
|
|
CHECK_EQ(1, r.Call(*i, *j, ps_a, ps_b, pu_a, pu_b));
|
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
MIPS[64]: Implement convert SIMD operations
Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.
Bug:
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Miran Karić <Miran.Karic@imgtec.com>
Reviewed-by: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
2017-06-21 09:32:23 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-04-25 03:28:49 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI8x16BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int8BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-01-26 02:18:00 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
2017-02-02 23:06:21 +00:00
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(b))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-02 23:06:21 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT16(I8x16, simd1, I32, expected), WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
|
|
|
FOR_INT8_INPUTS(i) {
|
|
|
|
FOR_INT8_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Add) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16Add, Add);
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16AddSaturateS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16AddSaturateS,
|
|
|
|
AddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Sub) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16Sub, Sub);
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16SubSaturateS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16SubSaturateS,
|
|
|
|
SubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MinS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MaxS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16AddSaturateU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16AddSaturateU,
|
|
|
|
UnsignedAddSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16SubSaturateU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16SubSaturateU,
|
|
|
|
UnsignedSubSaturate);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MinU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MinU,
|
|
|
|
UnsignedMinimum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MaxU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MaxU,
|
|
|
|
UnsignedMaximum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI8x16CompareOpTest(WasmExecutionMode execution_mode,
|
|
|
|
LowerSimd lower_simd, WasmOpcode simd_op,
|
2017-10-05 20:22:49 +00:00
|
|
|
Int8CompareOp expected_op) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-02-21 16:45:30 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte b = 1;
|
|
|
|
byte expected = 2;
|
|
|
|
byte simd0 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte simd1 = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(b))),
|
2017-06-20 22:04:57 +00:00
|
|
|
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
|
|
|
|
WASM_GET_LOCAL(simd1))),
|
2017-02-21 16:45:30 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT16(I8x16, simd1, I32, expected), WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT8_INPUTS(i) {
|
|
|
|
FOR_INT8_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Eq) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Ne) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
[wasm] Implement first set of SIMD I8x16 ops
- I8x16Splat, I8x16ExtractLane, I8x16ReplaceLane
- Binops: I8x16Add, I8x16AddSaturateS, I8x16Sub, I8x16SubSaturateS, I8x16MinS,
I8x16MaxS, I8x16AddSaturateU, I8x16SubSaturateU, I8x16MinU, I8x16MaxU
- Compare ops: I8x16Eq, I8x16Ne
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org
Review-Url: https://codereview.chromium.org/2829483002
Cr-Commit-Position: refs/heads/master@{#44706}
2017-04-18 23:23:12 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GeS,
|
|
|
|
GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GtU,
|
|
|
|
UnsignedGreater);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LtU,
|
|
|
|
UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2017-10-05 20:22:49 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-01-30 04:09:54 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Mul) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16Mul, Mul);
|
|
|
|
}
|
2017-10-05 20:22:49 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-01-30 04:09:54 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
2017-10-05 20:22:49 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op, Int8ShiftOp expected_op,
|
|
|
|
int shift) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-02-07 17:33:37 +00:00
|
|
|
byte a = 0;
|
|
|
|
byte expected = 1;
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
|
|
|
|
WASM_SET_LOCAL(
|
2017-02-13 20:24:43 +00:00
|
|
|
simd, WASM_SIMD_SHIFT_OP(simd_op, shift, WASM_GET_LOCAL(simd))),
|
2017-02-07 17:33:37 +00:00
|
|
|
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, expected), WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
|
|
|
|
}
|
|
|
|
|
2017-06-15 17:49:46 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-01-30 04:09:54 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Shl) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16Shl,
|
|
|
|
LogicalShiftLeft, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShrS) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrS,
|
|
|
|
ArithmeticShiftRight, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShrU) {
|
2018-05-18 21:47:59 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrU,
|
|
|
|
LogicalShiftRight, 1);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
[wasm] Implement remaining SIMD x64 compare ops, unops.
Ops Implemented: I32x4Neg, I32x4GtS, I32x4GeS, I32x4GtU, I32x4GeU,
I16x8Neg, I16x8GtS, I16x8GeS, I16x8GtU, I16x8GeU
I8x16Neg, I8x16GtS, I8x16GeS, I8x16GtU, I8x16GeU
S128Not
BUG=v8:6020
R=bbudge@chromium.org, zvi.rackover@intel.com, mtrofin@chromium.org
Review-Url: https://codereview.chromium.org/2951793003
Cr-Commit-Position: refs/heads/master@{#46329}
2017-06-29 16:07:28 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-01-30 04:09:54 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
2017-02-13 20:24:43 +00:00
|
|
|
|
2017-06-08 20:54:32 +00:00
|
|
|
// Test Select by making a mask where the 0th and 3rd lanes are true and the
|
2017-06-20 22:04:57 +00:00
|
|
|
// rest false, and comparing for non-equality with zero to convert to a boolean
|
2017-06-08 20:54:32 +00:00
|
|
|
// vector.
|
2017-05-31 13:31:52 +00:00
|
|
|
#define WASM_SIMD_SELECT_TEST(format) \
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S##format##Select) { \
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd); \
|
2017-05-31 13:31:52 +00:00
|
|
|
byte val1 = 0; \
|
|
|
|
byte val2 = 1; \
|
|
|
|
byte src1 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte src2 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte mask = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
|
|
|
WASM_SET_LOCAL(src1, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val1))), \
|
|
|
|
WASM_SET_LOCAL(src2, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val2))), \
|
|
|
|
WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
1, WASM_GET_LOCAL(zero), WASM_I32V(-1))), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
2, WASM_GET_LOCAL(mask), WASM_I32V(-1))), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
mask, \
|
|
|
|
WASM_SIMD_SELECT( \
|
|
|
|
format, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, WASM_GET_LOCAL(mask), \
|
|
|
|
WASM_GET_LOCAL(zero)), \
|
|
|
|
WASM_GET_LOCAL(src1), WASM_GET_LOCAL(src2))), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
|
|
|
|
\
|
|
|
|
CHECK_EQ(1, r.Call(0x12, 0x34)); \
|
2017-02-13 20:24:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_SELECT_TEST(32x4)
|
|
|
|
WASM_SIMD_SELECT_TEST(16x8)
|
|
|
|
WASM_SIMD_SELECT_TEST(8x16)
|
2017-06-08 20:54:32 +00:00
|
|
|
|
|
|
|
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
|
|
|
|
// rest 0. The mask is not the result of a comparison op.
|
|
|
|
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S##format##NonCanonicalSelect) { \
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, \
|
2018-01-05 08:43:05 +00:00
|
|
|
lower_simd); \
|
2017-06-08 20:54:32 +00:00
|
|
|
byte val1 = 0; \
|
|
|
|
byte val2 = 1; \
|
|
|
|
byte combined = 2; \
|
|
|
|
byte src1 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte src2 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte mask = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
|
|
|
WASM_SET_LOCAL(src1, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val1))), \
|
|
|
|
WASM_SET_LOCAL(src2, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val2))), \
|
|
|
|
WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
1, WASM_GET_LOCAL(zero), WASM_I32V(0xF))), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
2, WASM_GET_LOCAL(mask), WASM_I32V(0xF))), \
|
|
|
|
WASM_SET_LOCAL(mask, WASM_SIMD_SELECT(format, WASM_GET_LOCAL(mask), \
|
|
|
|
WASM_GET_LOCAL(src1), \
|
|
|
|
WASM_GET_LOCAL(src2))), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
|
|
|
|
\
|
|
|
|
CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(16x8)
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
|
2017-03-02 19:50:33 +00:00
|
|
|
|
2017-04-24 18:53:16 +00:00
|
|
|
// Test binary ops with two lane test patterns, all lanes distinct.
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
template <typename T>
|
2017-04-24 18:53:16 +00:00
|
|
|
void RunBinaryLaneOpTest(
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmExecutionMode execution_mode, LowerSimd lower_simd, WasmOpcode simd_op,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
const std::array<T, kSimd128Size / sizeof(T)>& expected) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
|
2017-08-19 16:34:11 +00:00
|
|
|
T* src0 = r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
|
2017-05-04 16:50:51 +00:00
|
|
|
static const int kElems = kSimd128Size / sizeof(T);
|
|
|
|
for (int i = 0; i < kElems; i++) {
|
|
|
|
src0[i] = i;
|
|
|
|
src1[i] = kElems + i;
|
|
|
|
}
|
2017-06-13 23:40:51 +00:00
|
|
|
if (simd_op == kExprS8x16Shuffle) {
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_S8x16_SHUFFLE_OP(simd_op, expected,
|
|
|
|
WASM_GET_GLOBAL(0),
|
|
|
|
WASM_GET_GLOBAL(1))),
|
|
|
|
WASM_ONE);
|
|
|
|
} else {
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(simd_op, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_GET_GLOBAL(1))),
|
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
for (size_t i = 0; i < expected.size(); i++) {
|
2017-05-04 16:50:51 +00:00
|
|
|
CHECK_EQ(src0[i], expected[i]);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4AddHoriz) {
|
|
|
|
RunBinaryLaneOpTest<int32_t>(execution_mode, lower_simd, kExprI32x4AddHoriz,
|
|
|
|
{{1, 5, 9, 13}});
|
2017-04-24 18:53:16 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8AddHoriz) {
|
|
|
|
RunBinaryLaneOpTest<int16_t>(execution_mode, lower_simd, kExprI16x8AddHoriz,
|
2017-04-24 18:53:16 +00:00
|
|
|
{{1, 5, 9, 13, 17, 21, 25, 29}});
|
|
|
|
}
|
[wasm] Implement Generic S128 Load/Store, logical ops and Horizontal add
- Ops: S128Load, S128Store, S128And, S128Or, S128Xor, S128Not, I32x4AddHoriz, I16x8AddHoriz
- Add x64 assembler support for - phaddd, phaddw, pand, por
- Enable tests for Globals, other tests applicable to x64 apart from tests for implemented ops
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org, zvi.rackover@intel.com
Review-Url: https://codereview.chromium.org/2849463003
Cr-Commit-Position: refs/heads/master@{#45005}
2017-05-02 00:05:53 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(F32x4AddHoriz) {
|
|
|
|
RunBinaryLaneOpTest<float>(execution_mode, lower_simd, kExprF32x4AddHoriz,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{1.0f, 5.0f, 9.0f, 13.0f}});
|
[wasm] Implement Generic S128 Load/Store, logical ops and Horizontal add
- Ops: S128Load, S128Store, S128And, S128Or, S128Xor, S128Not, I32x4AddHoriz, I16x8AddHoriz
- Add x64 assembler support for - phaddd, phaddw, pand, por
- Enable tests for Globals, other tests applicable to x64 apart from tests for implemented ops
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org, zvi.rackover@intel.com
Review-Url: https://codereview.chromium.org/2849463003
Cr-Commit-Position: refs/heads/master@{#45005}
2017-05-02 00:05:53 +00:00
|
|
|
}
|
2017-04-24 18:53:16 +00:00
|
|
|
|
2018-06-09 17:01:36 +00:00
|
|
|
// Test shuffle ops.
|
|
|
|
template <typename T>
|
|
|
|
void RunShuffleOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
|
|
|
|
WasmOpcode simd_op,
|
|
|
|
const std::array<T, kSimd128Size / sizeof(T)>& shuffle) {
|
|
|
|
// Test the original shuffle.
|
|
|
|
RunBinaryLaneOpTest<T>(execution_mode, lower_simd, simd_op, shuffle);
|
|
|
|
|
|
|
|
// Test a non-canonical (inputs reversed) version of the shuffle.
|
|
|
|
std::array<T, kSimd128Size / sizeof(T)> other_shuffle(shuffle);
|
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) other_shuffle[i] ^= kSimd128Size;
|
|
|
|
RunBinaryLaneOpTest<T>(execution_mode, lower_simd, simd_op, other_shuffle);
|
|
|
|
|
|
|
|
// Test the swizzle (one-operand) version of the shuffle.
|
|
|
|
std::array<T, kSimd128Size / sizeof(T)> swizzle(shuffle);
|
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) swizzle[i] &= (kSimd128Size - 1);
|
|
|
|
RunBinaryLaneOpTest<T>(execution_mode, lower_simd, simd_op, swizzle);
|
|
|
|
|
|
|
|
// Test the non-canonical swizzle (one-operand) version of the shuffle.
|
|
|
|
std::array<T, kSimd128Size / sizeof(T)> other_swizzle(shuffle);
|
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) other_swizzle[i] |= kSimd128Size;
|
|
|
|
RunBinaryLaneOpTest<T>(execution_mode, lower_simd, simd_op, other_swizzle);
|
|
|
|
}
|
|
|
|
|
2018-02-01 23:36:59 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
|
2018-03-22 18:19:21 +00:00
|
|
|
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
2017-05-04 16:50:51 +00:00
|
|
|
// Test some regular shuffles that may have special handling on some targets.
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4Dup) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-07-20 18:55:32 +00:00
|
|
|
{{16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4ZipLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4ZipRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4UnzipLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4UnzipRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4TransposeLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4TransposeRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}});
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reverses are only unary.
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x2Reverse) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 21:04:27 +00:00
|
|
|
// Test irregular shuffle.
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S32x4Irregular) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 16, 17, 18, 19, 20, 21, 22, 23}});
|
2017-05-09 21:04:27 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8Dup) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-07-20 18:55:32 +00:00
|
|
|
{{18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8ZipLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8ZipRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8UnzipLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8UnzipRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8TransposeLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8TransposeRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}});
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2018-06-21 13:58:02 +00:00
|
|
|
// TODO(simd) 'Reverse' tests should be 2-operand shuffles, not swizzles.
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x4Reverse) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}});
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x2Reverse) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S16x8Irregular) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-06-13 23:40:51 +00:00
|
|
|
{{0, 1, 16, 17, 16, 17, 0, 1, 4, 5, 20, 21, 6, 7, 22, 23}});
|
2017-05-09 21:04:27 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16Dup) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-07-20 18:55:32 +00:00
|
|
|
{{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16ZipLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
{{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16ZipRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16UnzipLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
{{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16UnzipRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
{{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16TransposeLeft) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
{{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16TransposeRight) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
{{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}});
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x8Reverse) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}});
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x4Reverse) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}});
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x2Reverse) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-10-05 20:22:49 +00:00
|
|
|
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}});
|
2017-05-09 21:04:27 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16Irregular) {
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest<int8_t>(
|
2018-05-18 21:47:59 +00:00
|
|
|
execution_mode, lower_simd, kExprS8x16Shuffle,
|
2017-05-09 21:04:27 +00:00
|
|
|
{{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 16:53:09 +00:00
|
|
|
// Test shuffles that blend the two vectors (elements remain in their lanes.)
|
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16Blend) {
|
|
|
|
static const int kLanes = 16;
|
|
|
|
std::array<uint8_t, kLanes> expected;
|
|
|
|
for (int bias = 1; bias < kLanes; bias++) {
|
|
|
|
for (int i = 0; i < bias; i++) expected[i] = i;
|
|
|
|
for (int i = bias; i < kLanes; i++) expected[i] = i + kLanes;
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest(execution_mode, lower_simd, kExprS8x16Shuffle, expected);
|
2018-05-31 16:53:09 +00:00
|
|
|
}
|
|
|
|
}
|
2017-06-13 23:40:51 +00:00
|
|
|
|
2018-05-31 16:53:09 +00:00
|
|
|
// Test shuffles that concatenate the two vectors.
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(S8x16Concat) {
|
2017-06-13 23:40:51 +00:00
|
|
|
static const int kLanes = 16;
|
|
|
|
std::array<uint8_t, kLanes> expected;
|
2018-06-09 17:01:36 +00:00
|
|
|
// n is offset or bias of concatenation.
|
|
|
|
for (int n = 1; n < kLanes; ++n) {
|
2017-05-04 16:50:51 +00:00
|
|
|
int i = 0;
|
2018-06-09 17:01:36 +00:00
|
|
|
// last kLanes - n bytes of first vector.
|
|
|
|
for (int j = n; j < kLanes; ++j) {
|
2017-05-04 16:50:51 +00:00
|
|
|
expected[i++] = j;
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
2018-06-09 17:01:36 +00:00
|
|
|
// first n bytes of second vector
|
|
|
|
for (int j = 0; j < n; ++j) {
|
2017-05-04 16:50:51 +00:00
|
|
|
expected[i++] = j + kLanes;
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
2018-06-09 17:01:36 +00:00
|
|
|
RunShuffleOpTest(execution_mode, lower_simd, kExprS8x16Shuffle, expected);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-04 16:50:51 +00:00
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
|
|
|
|
// result. Use relational ops on numeric vectors to create the boolean vector
|
|
|
|
// test inputs. Test inputs with all true, all false, one true, and one false.
|
|
|
|
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
|
2018-05-18 21:47:59 +00:00
|
|
|
WASM_SIMD_COMPILED_AND_LOWERED_TEST(ReductionTest##lanes) { \
|
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd); \
|
2017-03-02 19:50:33 +00:00
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte one_one = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte reduced = r.AllocateLocal(kWasmI32); \
|
|
|
|
BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(zero), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL(one_one, \
|
|
|
|
WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
lanes - 1, WASM_GET_LOCAL(zero), WASM_ONE)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_SET_LOCAL( \
|
|
|
|
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
|
|
|
WASM_GET_LOCAL(one_one), \
|
|
|
|
WASM_GET_LOCAL(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
|
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_ONE); \
|
|
|
|
CHECK_EQ(1, r.Call()); \
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4)
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r, WASM_IF_ELSE_I(
|
|
|
|
WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
|
|
|
|
WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
2018-01-20 00:13:54 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
|
2018-04-19 05:29:29 +00:00
|
|
|
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
|
|
|
|
WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
|
2017-03-16 14:06:49 +00:00
|
|
|
// Choose two floating point values whose sum is normal and exactly
|
|
|
|
// representable as a float.
|
2017-12-02 00:30:37 +00:00
|
|
|
const int kOne = 0x3F800000;
|
2017-03-16 14:06:49 +00:00
|
|
|
const int kTwo = 0x40000000;
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(
|
|
|
|
WASM_F32_EQ(
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_BINOP(kExprF32x4Add,
|
2017-03-16 14:06:49 +00:00
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(kOne)),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(kTwo)))),
|
|
|
|
WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(kOne)),
|
|
|
|
WASM_F32_REINTERPRET_I32(WASM_I32V(kTwo)))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(
|
|
|
|
WASM_I32_EQ(
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_BINOP(kExprI32x4Add,
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
|
|
|
|
WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
|
|
|
|
WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4Local) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
|
|
|
|
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(31, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(76, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4For) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_LOCAL(1),
|
|
|
|
WASM_I32V(53))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_LOCAL(1),
|
|
|
|
WASM_I32V(23))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0)),
|
|
|
|
WASM_LOOP(
|
|
|
|
WASM_SET_LOCAL(
|
|
|
|
1, WASM_SIMD_BINOP(kExprI32x4Add, WASM_GET_LOCAL(1),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(1)))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(5)), WASM_BR(1))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(36)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(58)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(28)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_I32V(36)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(0));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4For) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
|
|
|
BUILD(r, WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_LOCAL(1),
|
|
|
|
WASM_F32(19.5))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0)),
|
|
|
|
WASM_LOOP(
|
|
|
|
WASM_SET_LOCAL(
|
|
|
|
1, WASM_SIMD_BINOP(kExprF32x4Add, WASM_GET_LOCAL(1),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(2.0)))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(3)), WASM_BR(1))),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_F32(27.25)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GET_LOCAL(1)),
|
|
|
|
WASM_F32(25.5)),
|
|
|
|
WASM_SET_LOCAL(0, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(0));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 19:54:19 +00:00
|
|
|
template <typename T, int numLanes = 4>
|
|
|
|
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
|
|
|
|
for (int lane = 0; lane < numLanes; lane++) {
|
|
|
|
const T& value = arr[lane];
|
|
|
|
#if defined(V8_TARGET_BIG_ENDIAN)
|
|
|
|
v[numLanes - 1 - lane] = value;
|
|
|
|
#else
|
|
|
|
v[lane] = value;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
const T& GetScalar(T* v, int lane) {
|
|
|
|
constexpr int kElems = kSimd128Size / sizeof(T);
|
|
|
|
#if defined(V8_TARGET_BIG_ENDIAN)
|
|
|
|
const int index = kElems - 1 - lane;
|
|
|
|
#else
|
|
|
|
const int index = lane;
|
|
|
|
#endif
|
|
|
|
USE(kElems);
|
|
|
|
DCHECK(index >= 0 && index < kElems);
|
|
|
|
return v[index];
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-10-16 08:49:45 +00:00
|
|
|
// Pad the globals with a few unused slots to get a non-zero offset.
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
2017-08-19 16:34:11 +00:00
|
|
|
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2017-05-09 19:54:19 +00:00
|
|
|
SetVectorByLanes(global, {{0, 1, 2, 3}});
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
BUILD(
|
|
|
|
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(0),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(1),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(2),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(3),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(4))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-10-16 08:49:45 +00:00
|
|
|
// Pad the globals with a few unused slots to get a non-zero offset.
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
2017-08-19 16:34:11 +00:00
|
|
|
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2017-10-16 08:49:45 +00:00
|
|
|
BUILD(r, WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
|
|
|
|
WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(4),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(34))),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(4),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(45))),
|
2017-10-16 08:49:45 +00:00
|
|
|
WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(4),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_I32V(56))),
|
|
|
|
WASM_I32V(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-05-09 19:54:19 +00:00
|
|
|
CHECK_EQ(GetScalar(global, 0), 23);
|
|
|
|
CHECK_EQ(GetScalar(global, 1), 34);
|
|
|
|
CHECK_EQ(GetScalar(global, 2), 45);
|
|
|
|
CHECK_EQ(GetScalar(global, 3), 56);
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-08-19 16:34:11 +00:00
|
|
|
float* global = r.builder().AddGlobal<float>(kWasmS128);
|
2017-05-09 19:54:19 +00:00
|
|
|
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
BUILD(
|
|
|
|
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(0.0),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(1.5),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(2.25),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(3.5),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(0))),
|
|
|
|
WASM_SET_LOCAL(1, WASM_I32V(0))),
|
|
|
|
WASM_GET_LOCAL(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4SetGlobal) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
|
2017-08-19 16:34:11 +00:00
|
|
|
float* global = r.builder().AddGlobal<float>(kWasmS128);
|
2017-02-27 23:45:03 +00:00
|
|
|
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_F32(45.5))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_F32(32.25))),
|
|
|
|
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(0),
|
|
|
|
WASM_F32(65.0))),
|
|
|
|
WASM_I32V(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-05-09 19:54:19 +00:00
|
|
|
CHECK_EQ(GetScalar(global, 0), 13.5f);
|
|
|
|
CHECK_EQ(GetScalar(global, 1), 45.5f);
|
|
|
|
CHECK_EQ(GetScalar(global, 2), 32.25f);
|
|
|
|
CHECK_EQ(GetScalar(global, 3), 65.0f);
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
2017-03-15 23:34:53 +00:00
|
|
|
|
2018-06-13 21:20:48 +00:00
|
|
|
WASM_SIMD_TEST(SimdLoadStoreLoad) {
|
2018-05-18 21:47:59 +00:00
|
|
|
WasmRunner<int32_t> r(execution_mode, lower_simd);
|
2018-04-27 20:00:12 +00:00
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
2017-10-13 20:55:17 +00:00
|
|
|
// Load memory, store it, then reload it and extract the first lane. Use a
|
|
|
|
// non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
|
|
|
|
BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(4), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(4))));
|
2017-03-15 23:34:53 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
int32_t expected = *i;
|
2017-10-13 20:55:17 +00:00
|
|
|
r.builder().WriteMemory(&memory[1], expected);
|
2017-03-15 23:34:53 +00:00
|
|
|
CHECK_EQ(expected, r.Call());
|
|
|
|
}
|
|
|
|
}
|
2017-09-01 12:57:34 +00:00
|
|
|
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_TEST
|
2018-05-18 21:47:59 +00:00
|
|
|
#undef WASM_SIMD_COMPILED_AND_LOWERED_TEST
|
2017-10-05 20:22:49 +00:00
|
|
|
#undef WASM_SIMD_COMPILED_TEST
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_CHECK_LANE
|
|
|
|
#undef WASM_SIMD_CHECK4
|
|
|
|
#undef WASM_SIMD_CHECK_SPLAT4
|
|
|
|
#undef WASM_SIMD_CHECK8
|
|
|
|
#undef WASM_SIMD_CHECK_SPLAT8
|
|
|
|
#undef WASM_SIMD_CHECK16
|
|
|
|
#undef WASM_SIMD_CHECK_SPLAT16
|
|
|
|
#undef WASM_SIMD_CHECK_F32_LANE
|
|
|
|
#undef WASM_SIMD_CHECK_F32x4
|
|
|
|
#undef WASM_SIMD_CHECK_SPLAT_F32x4
|
|
|
|
#undef WASM_SIMD_CHECK_F32_LANE_ESTIMATE
|
|
|
|
#undef WASM_SIMD_CHECK_SPLAT_F32x4_ESTIMATE
|
|
|
|
#undef TO_BYTE
|
|
|
|
#undef WASM_SIMD_OP
|
|
|
|
#undef WASM_SIMD_SPLAT
|
|
|
|
#undef WASM_SIMD_UNOP
|
|
|
|
#undef WASM_SIMD_BINOP
|
|
|
|
#undef WASM_SIMD_SHIFT_OP
|
|
|
|
#undef WASM_SIMD_CONCAT_OP
|
|
|
|
#undef WASM_SIMD_SELECT
|
|
|
|
#undef WASM_SIMD_F32x4_SPLAT
|
|
|
|
#undef WASM_SIMD_F32x4_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_F32x4_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I32x4_SPLAT
|
|
|
|
#undef WASM_SIMD_I32x4_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I32x4_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I16x8_SPLAT
|
|
|
|
#undef WASM_SIMD_I16x8_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I16x8_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I8x16_SPLAT
|
|
|
|
#undef WASM_SIMD_I8x16_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I8x16_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_S8x16_SHUFFLE_OP
|
|
|
|
#undef WASM_SIMD_LOAD_MEM
|
|
|
|
#undef WASM_SIMD_STORE_MEM
|
|
|
|
#undef WASM_SIMD_SELECT_TEST
|
|
|
|
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
|
|
|
|
#undef WASM_SIMD_BOOL_REDUCTION_TEST
|
|
|
|
|
2017-09-21 03:29:52 +00:00
|
|
|
} // namespace test_run_wasm_simd
|
2017-09-01 12:57:34 +00:00
|
|
|
} // namespace wasm
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|