2016-08-23 19:59:19 +00:00
|
|
|
// Copyright 2016 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2020-11-02 07:23:29 +00:00
|
|
|
#include <algorithm>
|
|
|
|
#include <array>
|
|
|
|
#include <cmath>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <cstring>
|
2020-05-18 22:40:02 +00:00
|
|
|
#include <limits>
|
2020-11-02 07:23:29 +00:00
|
|
|
#include <tuple>
|
2019-01-10 11:47:08 +00:00
|
|
|
#include <type_traits>
|
2020-11-02 07:23:29 +00:00
|
|
|
#include <vector>
|
2019-01-10 11:47:08 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
#include "src/base/bits.h"
|
2020-11-02 07:23:29 +00:00
|
|
|
#include "src/base/logging.h"
|
|
|
|
#include "src/base/macros.h"
|
|
|
|
#include "src/base/memory.h"
|
2019-01-10 11:47:08 +00:00
|
|
|
#include "src/base/overflowing-math.h"
|
2021-01-19 23:19:03 +00:00
|
|
|
#include "src/base/safe_conversions.h"
|
2020-11-02 07:23:29 +00:00
|
|
|
#include "src/base/utils/random-number-generator.h"
|
2019-05-21 09:30:15 +00:00
|
|
|
#include "src/codegen/assembler-inl.h"
|
2020-11-02 07:23:29 +00:00
|
|
|
#include "src/codegen/cpu-features.h"
|
|
|
|
#include "src/codegen/machine-type.h"
|
2020-10-28 02:43:49 +00:00
|
|
|
#include "src/common/globals.h"
|
2020-11-02 07:23:29 +00:00
|
|
|
#include "src/flags/flags.h"
|
|
|
|
#include "src/utils/utils.h"
|
|
|
|
#include "src/utils/vector.h"
|
|
|
|
#include "src/wasm/compilation-environment.h"
|
|
|
|
#include "src/wasm/value-type.h"
|
|
|
|
#include "src/wasm/wasm-constants.h"
|
2020-10-12 17:09:03 +00:00
|
|
|
#include "src/wasm/wasm-opcodes.h"
|
2016-08-23 19:59:19 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/compiler/value-helper.h"
|
|
|
|
#include "test/cctest/wasm/wasm-run-utils.h"
|
2020-11-02 07:23:29 +00:00
|
|
|
#include "test/common/flag-utils.h"
|
|
|
|
#include "test/common/wasm/flag-utils.h"
|
2017-04-25 11:29:17 +00:00
|
|
|
#include "test/common/wasm/wasm-macro-gen.h"
|
2016-08-23 19:59:19 +00:00
|
|
|
|
2017-09-01 12:57:34 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace wasm {
|
2017-09-21 03:29:52 +00:00
|
|
|
namespace test_run_wasm_simd {
|
2016-08-23 19:59:19 +00:00
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2019-07-18 09:03:02 +00:00
|
|
|
using DoubleUnOp = double (*)(double);
|
2019-07-24 20:01:43 +00:00
|
|
|
using DoubleBinOp = double (*)(double, double);
|
2019-07-16 02:55:31 +00:00
|
|
|
using DoubleCompareOp = int64_t (*)(double, double);
|
2019-03-29 09:40:26 +00:00
|
|
|
using FloatUnOp = float (*)(float);
|
|
|
|
using FloatBinOp = float (*)(float, float);
|
|
|
|
using FloatCompareOp = int (*)(float, float);
|
2019-07-02 20:56:11 +00:00
|
|
|
using Int64UnOp = int64_t (*)(int64_t);
|
2019-07-02 16:15:05 +00:00
|
|
|
using Int64BinOp = int64_t (*)(int64_t, int64_t);
|
2019-07-03 16:11:59 +00:00
|
|
|
using Int64ShiftOp = int64_t (*)(int64_t, int);
|
2019-03-29 09:40:26 +00:00
|
|
|
using Int32UnOp = int32_t (*)(int32_t);
|
|
|
|
using Int32BinOp = int32_t (*)(int32_t, int32_t);
|
|
|
|
using Int32CompareOp = int (*)(int32_t, int32_t);
|
|
|
|
using Int32ShiftOp = int32_t (*)(int32_t, int);
|
|
|
|
using Int16UnOp = int16_t (*)(int16_t);
|
|
|
|
using Int16BinOp = int16_t (*)(int16_t, int16_t);
|
|
|
|
using Int16CompareOp = int (*)(int16_t, int16_t);
|
|
|
|
using Int16ShiftOp = int16_t (*)(int16_t, int);
|
|
|
|
using Int8UnOp = int8_t (*)(int8_t);
|
|
|
|
using Int8BinOp = int8_t (*)(int8_t, int8_t);
|
|
|
|
using Int8CompareOp = int (*)(int8_t, int8_t);
|
|
|
|
using Int8ShiftOp = int8_t (*)(int8_t, int);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
#define WASM_SIMD_TEST(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
TestExecutionTier execution_tier); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_liftoff) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kLiftoff); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_interpreter) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_simd_lowered) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kLowerSimd, TestExecutionTier::kTurbofan); \
|
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
TestExecutionTier execution_tier)
|
2020-01-23 21:02:10 +00:00
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
// Generic expected value functions.
|
2019-01-10 11:47:08 +00:00
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
2016-12-20 16:49:53 +00:00
|
|
|
T Negate(T a) {
|
|
|
|
return -a;
|
|
|
|
}
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
// For signed integral types, use base::AddWithWraparound.
|
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
T Add(T a, T b) {
|
|
|
|
return a + b;
|
|
|
|
}
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
// For signed integral types, use base::SubWithWraparound.
|
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
T Sub(T a, T b) {
|
|
|
|
return a - b;
|
|
|
|
}
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
// For signed integral types, use base::MulWithWraparound.
|
|
|
|
template <typename T, typename = typename std::enable_if<
|
|
|
|
std::is_floating_point<T>::value>::type>
|
2017-01-26 02:18:00 +00:00
|
|
|
T Mul(T a, T b) {
|
|
|
|
return a * b;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
T Minimum(T a, T b) {
|
2020-11-02 07:23:29 +00:00
|
|
|
return std::min(a, b);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T Maximum(T a, T b) {
|
2020-11-02 07:23:29 +00:00
|
|
|
return std::max(a, b);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedMinimum(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T UnsignedMaximum(T a, T b) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
|
|
|
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
|
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int Equal(float a, float b) { return a == b ? -1 : 0; }
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T Equal(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a == b ? -1 : 0;
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int NotEqual(float a, float b) { return a != b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T NotEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a != b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int Less(float a, float b) { return a < b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T Less(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a < b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int LessEqual(float a, float b) { return a <= b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T LessEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a <= b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int Greater(float a, float b) { return a > b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T Greater(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a > b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
int GreaterEqual(float a, float b) { return a >= b ? -1 : 0; }
|
|
|
|
|
2017-01-26 02:18:00 +00:00
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T GreaterEqual(T a, T b) {
|
2017-06-20 22:04:57 +00:00
|
|
|
return a >= b ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedLess(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) < static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedLessEqual(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedGreater(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) > static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-03-08 00:43:11 +00:00
|
|
|
T UnsignedGreaterEqual(T a, T b) {
|
2017-01-26 02:18:00 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2017-06-20 22:04:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? -1 : 0;
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2017-02-07 17:33:37 +00:00
|
|
|
template <typename T>
|
|
|
|
T LogicalShiftLeft(T a, int shift) {
|
2019-01-25 00:33:28 +00:00
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2019-08-29 23:55:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) << (shift % (sizeof(T) * 8));
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T LogicalShiftRight(T a, int shift) {
|
|
|
|
using UnsignedT = typename std::make_unsigned<T>::type;
|
2019-08-29 23:55:57 +00:00
|
|
|
return static_cast<UnsignedT>(a) >> (shift % (sizeof(T) * 8));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Define our own ArithmeticShiftRight instead of using the one from utils.h
|
|
|
|
// because the shift amount needs to be taken modulo lane width.
|
|
|
|
template <typename T>
|
|
|
|
T ArithmeticShiftRight(T a, int shift) {
|
|
|
|
return a >> (shift % (sizeof(T) * 8));
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 04:14:53 +00:00
|
|
|
template <typename T>
|
|
|
|
T Abs(T a) {
|
|
|
|
return std::abs(a);
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:55:31 +00:00
|
|
|
// only used for F64x2 tests below
|
|
|
|
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
|
|
|
|
|
|
|
|
int64_t NotEqual(double a, double b) { return a != b ? -1 : 0; }
|
2019-07-16 04:40:33 +00:00
|
|
|
|
|
|
|
int64_t Greater(double a, double b) { return a > b ? -1 : 0; }
|
|
|
|
|
|
|
|
int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
|
|
|
|
|
|
|
|
int64_t Less(double a, double b) { return a < b ? -1 : 0; }
|
|
|
|
|
|
|
|
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
|
2019-07-16 02:55:31 +00:00
|
|
|
|
2020-03-17 02:29:49 +00:00
|
|
|
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
// Only used for qfma and qfms tests below.
|
|
|
|
|
|
|
|
// FMOperation holds the params (a, b, c) for a Multiply-Add or
|
|
|
|
// Multiply-Subtract operation, and the expected result if the operation was
|
|
|
|
// fused, rounded only once for the entire operation, or unfused, rounded after
|
|
|
|
// multiply and again after add/subtract.
|
|
|
|
template <typename T>
|
|
|
|
struct FMOperation {
|
|
|
|
const T a;
|
|
|
|
const T b;
|
|
|
|
const T c;
|
|
|
|
const T fused_result;
|
|
|
|
const T unfused_result;
|
|
|
|
};
|
|
|
|
|
|
|
|
// large_n is large number that overflows T when multiplied by itself, this is a
|
|
|
|
// useful constant to test fused/unfused behavior.
|
|
|
|
template <typename T>
|
|
|
|
constexpr T large_n = T(0);
|
|
|
|
|
|
|
|
template <>
|
|
|
|
constexpr double large_n<double> = 1e200;
|
|
|
|
|
|
|
|
template <>
|
|
|
|
constexpr float large_n<float> = 1e20;
|
|
|
|
|
|
|
|
// Fused Multiply-Add performs a + b * c.
|
|
|
|
template <typename T>
|
|
|
|
static constexpr FMOperation<T> qfma_array[] = {
|
|
|
|
{1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
|
|
|
|
// fused: a + b * c = -inf + (positive overflow) = -inf
|
|
|
|
// unfused: a + b * c = -inf + inf = NaN
|
|
|
|
{-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
|
|
|
|
-std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
|
|
|
|
// fused: a + b * c = inf + (negative overflow) = inf
|
|
|
|
// unfused: a + b * c = inf + -inf = NaN
|
|
|
|
{std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
|
|
|
|
std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
|
|
|
|
// NaN
|
|
|
|
{std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
|
|
|
|
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
|
|
|
|
// -NaN
|
|
|
|
{-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
|
|
|
|
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static constexpr Vector<const FMOperation<T>> qfma_vector() {
|
|
|
|
return ArrayVector(qfma_array<T>);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fused Multiply-Subtract performs a - b * c.
|
|
|
|
template <typename T>
|
|
|
|
static constexpr FMOperation<T> qfms_array[]{
|
|
|
|
{1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
|
|
|
|
// fused: a - b * c = inf - (positive overflow) = inf
|
|
|
|
// unfused: a - b * c = inf - inf = NaN
|
|
|
|
{std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
|
|
|
|
std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
|
|
|
|
// fused: a - b * c = -inf - (negative overflow) = -inf
|
|
|
|
// unfused: a - b * c = -inf - -inf = NaN
|
|
|
|
{-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
|
|
|
|
-std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
|
|
|
|
// NaN
|
|
|
|
{std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
|
|
|
|
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
|
|
|
|
// -NaN
|
|
|
|
{-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
|
|
|
|
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static constexpr Vector<const FMOperation<T>> qfms_vector() {
|
|
|
|
return ArrayVector(qfms_array<T>);
|
|
|
|
}
|
|
|
|
|
2020-02-21 02:57:29 +00:00
|
|
|
// Fused results only when fma3 feature is enabled, and running on TurboFan or
|
|
|
|
// Liftoff (which can fall back to TurboFan if FMA is not implemented).
|
2020-08-04 11:09:23 +00:00
|
|
|
bool ExpectFused(TestExecutionTier tier) {
|
2019-09-25 19:33:24 +00:00
|
|
|
#ifdef V8_TARGET_ARCH_X64
|
2020-02-21 02:57:29 +00:00
|
|
|
return CpuFeatures::IsSupported(FMA3) &&
|
2020-08-04 11:09:23 +00:00
|
|
|
(tier == TestExecutionTier::kTurbofan ||
|
|
|
|
tier == TestExecutionTier::kLiftoff);
|
2019-09-25 19:33:24 +00:00
|
|
|
#else
|
2020-08-04 11:09:23 +00:00
|
|
|
return (tier == TestExecutionTier::kTurbofan ||
|
|
|
|
tier == TestExecutionTier::kLiftoff);
|
2019-09-25 19:33:24 +00:00
|
|
|
#endif
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
}
|
2020-03-17 02:29:49 +00:00
|
|
|
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
} // namespace
|
|
|
|
|
2019-10-08 23:43:15 +00:00
|
|
|
#define WASM_SIMD_CHECK_LANE_S(TYPE, value, LANE_TYPE, lane_value, lane_index) \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_LOCAL_GET(lane_value), \
|
2019-10-08 23:43:15 +00:00
|
|
|
WASM_SIMD_##TYPE##_EXTRACT_LANE( \
|
2020-12-17 16:55:33 +00:00
|
|
|
lane_index, WASM_LOCAL_GET(value))), \
|
2019-10-08 23:43:15 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO))
|
|
|
|
|
|
|
|
// Unsigned Extracts are only available for I8x16, I16x8 types
|
|
|
|
#define WASM_SIMD_CHECK_LANE_U(TYPE, value, LANE_TYPE, lane_value, lane_index) \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_LOCAL_GET(lane_value), \
|
2019-10-08 23:43:15 +00:00
|
|
|
WASM_SIMD_##TYPE##_EXTRACT_LANE_U( \
|
2020-12-17 16:55:33 +00:00
|
|
|
lane_index, WASM_LOCAL_GET(value))), \
|
2016-12-06 01:12:15 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO))
|
|
|
|
|
2019-01-10 23:22:07 +00:00
|
|
|
// The macro below disables tests lowering for certain nodes where the simd
|
|
|
|
// lowering doesn't work correctly. Early return here if the CPU does not
|
|
|
|
// support SIMD as the graph will be implicitly lowered in that case.
|
2020-08-04 11:09:23 +00:00
|
|
|
#define WASM_SIMD_TEST_NO_LOWERING(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
TestExecutionTier execution_tier); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
if (!CpuFeatures::SupportsWasmSimd128()) return; \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_liftoff) { \
|
|
|
|
if (!CpuFeatures::SupportsWasmSimd128()) return; \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kLiftoff); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_interpreter) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
|
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
TestExecutionTier execution_tier)
|
2019-01-10 23:22:07 +00:00
|
|
|
|
2021-03-05 04:03:58 +00:00
|
|
|
// Use this for post-mvp instructions, for which we only implement interpreter
|
|
|
|
// and TurboFan support. Liftoff support is generally not implemented yet, so
|
|
|
|
// don't test that, and neither is scalar lowering.
|
|
|
|
#define WASM_SIMD_TEST_POST_MVP(name) \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
TestExecutionTier execution_tier); \
|
|
|
|
TEST(RunWasm_##name##_turbofan) { \
|
|
|
|
if (!CpuFeatures::SupportsWasmSimd128()) return; \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
FLAG_SCOPE(wasm_simd_post_mvp); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
|
|
|
|
} \
|
|
|
|
TEST(RunWasm_##name##_interpreter) { \
|
|
|
|
EXPERIMENTAL_FLAG_SCOPE(simd); \
|
|
|
|
FLAG_SCOPE(wasm_simd_post_mvp); \
|
|
|
|
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
|
|
|
|
} \
|
|
|
|
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
|
|
|
|
TestExecutionTier execution_tier)
|
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
// Returns true if the platform can represent the result.
|
2019-07-16 08:25:57 +00:00
|
|
|
template <typename T>
|
|
|
|
bool PlatformCanRepresent(T x) {
|
2019-03-05 01:44:05 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM
|
|
|
|
return std::fpclassify(x) != FP_SUBNORMAL;
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true for very small and very large numbers. We skip these test
|
|
|
|
// values for the approximation instructions, which don't work at the extremes.
|
|
|
|
bool IsExtreme(float x) {
|
|
|
|
float abs_x = std::fabs(x);
|
|
|
|
const float kSmallFloatThreshold = 1.0e-32f;
|
|
|
|
const float kLargeFloatThreshold = 1.0e32f;
|
|
|
|
return abs_x != 0.0f && // 0 or -0 are fine.
|
|
|
|
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
|
|
|
|
}
|
|
|
|
|
2020-10-19 18:58:39 +00:00
|
|
|
#if V8_OS_AIX
|
|
|
|
template <typename T>
|
|
|
|
bool MightReverseSign(T float_op) {
|
|
|
|
return float_op == static_cast<T>(Negate) ||
|
|
|
|
float_op == static_cast<T>(std::abs);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-11-15 12:56:22 +00:00
|
|
|
WASM_SIMD_TEST(S128Globals) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input and output vectors.
|
|
|
|
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(1, WASM_GLOBAL_GET(0)), WASM_ONE);
|
2019-11-15 12:56:22 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
WriteLittleEndianValue<int32_t>(&g0[i], x);
|
|
|
|
}
|
|
|
|
r.Call();
|
|
|
|
int32_t expected = x;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
int32_t actual = ReadLittleEndianValue<int32_t>(&g1[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-29 23:44:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Splat) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold output vector.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(param1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-05 01:44:05 +00:00
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
float expected = x;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
} else {
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST(F32x4ReplaceLane) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its (FP) index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(3.14159f))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(temp1), WASM_F32(0.0f))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(temp1), WASM_F32(1.0f))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
|
|
|
|
2, WASM_LOCAL_GET(temp1), WASM_F32(2.0f))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
3, WASM_LOCAL_GET(temp1), WASM_F32(3.0f))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-05 01:44:05 +00:00
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(static_cast<float>(i), ReadLittleEndianValue<float>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
// Tests both signed and unsigned conversion.
|
2020-07-07 23:53:13 +00:00
|
|
|
WASM_SIMD_TEST(F32x4ConvertI32x4) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create two output vectors to hold signed and unsigned results.
|
|
|
|
float* g0 = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
float* g1 = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4, WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
1, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2019-03-08 00:43:11 +00:00
|
|
|
float expected_signed = static_cast<float>(x);
|
|
|
|
float expected_unsigned = static_cast<float>(static_cast<uint32_t>(x));
|
2019-03-05 01:44:05 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<float>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<float>(&g1[i]));
|
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:43:48 +00:00
|
|
|
bool IsSameNan(float expected, float actual) {
|
|
|
|
// Sign is non-deterministic.
|
|
|
|
uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
|
|
|
|
uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
|
|
|
|
// Some implementations convert signaling NaNs to quiet NaNs.
|
|
|
|
return (expected_bits == actual_bits) ||
|
|
|
|
((expected_bits | 0x00400000) == actual_bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsCanonical(float actual) {
|
|
|
|
uint32_t actual_bits = bit_cast<uint32_t>(actual);
|
|
|
|
// Canonical NaN has quiet bit and no payload.
|
|
|
|
return (actual_bits & 0xFFC00000) == actual_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckFloatResult(float x, float y, float expected, float actual,
|
|
|
|
bool exact = true) {
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
if (std::isnan(x) && IsSameNan(x, actual)) return;
|
|
|
|
if (std::isnan(y) && IsSameNan(y, actual)) return;
|
|
|
|
if (IsSameNan(expected, actual)) return;
|
|
|
|
if (IsCanonical(actual)) return;
|
|
|
|
// This is expected to assert; it's useful for debugging.
|
|
|
|
CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
|
|
|
|
} else {
|
|
|
|
if (exact) {
|
|
|
|
CHECK_EQ(expected, actual);
|
|
|
|
// The sign of 0's must match.
|
|
|
|
CHECK_EQ(std::signbit(expected), std::signbit(actual));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Otherwise, perform an approximate equality test. First check for
|
|
|
|
// equality to handle +/-Infinity where approximate equality doesn't work.
|
|
|
|
if (expected == actual) return;
|
|
|
|
|
|
|
|
// 1% error allows all platforms to pass easily.
|
|
|
|
constexpr float kApproximationError = 0.01f;
|
|
|
|
float abs_error = std::abs(expected) * kApproximationError,
|
|
|
|
min = expected - abs_error, max = expected + abs_error;
|
|
|
|
CHECK_LE(min, actual);
|
|
|
|
CHECK_GE(max, actual);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test some values not included in the float inputs from value_helper. These
|
|
|
|
// tests are useful for opcodes that are synthesized during code gen, like Min
|
|
|
|
// and Max on ia32 and x64.
|
|
|
|
static constexpr uint32_t nan_test_array[] = {
|
|
|
|
// Bit patterns of quiet NaNs and signaling NaNs, with or without
|
|
|
|
// additional payload.
|
2020-09-01 00:37:47 +00:00
|
|
|
0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0xFFFFFFFF, 0x7F876543, 0xFF876543,
|
|
|
|
// NaN with top payload bit unset.
|
|
|
|
0x7FA00000,
|
2019-03-26 23:43:48 +00:00
|
|
|
// Both Infinities.
|
|
|
|
0x7F800000, 0xFF800000,
|
|
|
|
// Some "normal" numbers, 1 and -1.
|
|
|
|
0x3F800000, 0xBF800000};
|
|
|
|
|
|
|
|
#define FOR_FLOAT32_NAN_INPUTS(i) \
|
|
|
|
for (size_t i = 0; i < arraysize(nan_test_array); ++i)
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmOpcode opcode, FloatUnOp expected_op,
|
2019-03-26 23:43:48 +00:00
|
|
|
bool exact = true) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-05 01:44:05 +00:00
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
// Extreme values have larger errors so skip them for approximation tests.
|
2019-03-26 23:43:48 +00:00
|
|
|
if (!exact && IsExtreme(x)) continue;
|
2019-03-05 01:44:05 +00:00
|
|
|
float expected = expected_op(x);
|
2020-10-14 14:29:43 +00:00
|
|
|
#if V8_OS_AIX
|
2020-10-19 18:58:39 +00:00
|
|
|
if (!MightReverseSign<FloatUnOp>(expected_op))
|
|
|
|
expected = FpOpWorkaround<float>(x, expected);
|
2020-10-14 14:29:43 +00:00
|
|
|
#endif
|
2019-03-05 01:44:05 +00:00
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
2019-03-26 23:43:48 +00:00
|
|
|
CheckFloatResult(x, x, expected, actual, exact);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-10 10:03:29 +00:00
|
|
|
FOR_FLOAT32_NAN_INPUTS(i) {
|
|
|
|
float x = bit_cast<float>(nan_test_array[i]);
|
2019-03-26 23:43:48 +00:00
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
// Extreme values have larger errors so skip them for approximation tests.
|
|
|
|
if (!exact && IsExtreme(x)) continue;
|
|
|
|
float expected = expected_op(x);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x, x, expected, actual, exact);
|
2019-03-05 01:44:05 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Abs) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2019-09-25 21:58:07 +00:00
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Neg) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Neg, Negate);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-01 11:13:00 +00:00
|
|
|
|
2019-09-11 16:33:44 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Sqrt) {
|
2020-10-09 00:09:39 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, std::sqrt);
|
2019-09-11 16:33:44 +00:00
|
|
|
}
|
|
|
|
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(F32x4RecipApprox) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
|
2019-03-26 23:43:48 +00:00
|
|
|
base::Recip, false /* !exact */);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(F32x4RecipSqrtApprox) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
|
2019-03-26 23:43:48 +00:00
|
|
|
base::RecipSqrt, false /* !exact */);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ceil) {
|
2020-05-26 18:26:17 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Ceil, ceilf, true);
|
|
|
|
}
|
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Floor) {
|
2020-05-26 18:26:17 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Floor, floorf, true);
|
|
|
|
}
|
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Trunc) {
|
2020-05-26 18:26:17 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Trunc, truncf, true);
|
|
|
|
}
|
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F32x4NearestInt) {
|
2020-05-26 18:26:17 +00:00
|
|
|
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4NearestInt, nearbyintf,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmOpcode opcode, FloatBinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-05 01:44:05 +00:00
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT32_INPUTS(y) {
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
float expected = expected_op(x, y);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
2019-03-26 23:43:48 +00:00
|
|
|
CheckFloatResult(x, y, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FOR_FLOAT32_NAN_INPUTS(i) {
|
|
|
|
float x = bit_cast<float>(nan_test_array[i]);
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT32_NAN_INPUTS(j) {
|
|
|
|
float y = bit_cast<float>(nan_test_array[j]);
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
float expected = expected_op(x, y);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x, y, expected, actual, true /* exact */);
|
2019-03-05 01:44:05 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 23:43:48 +00:00
|
|
|
#undef FOR_FLOAT32_NAN_INPUTS
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Add) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Sub) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Sub, Sub);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Mul) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2019-08-23 16:47:41 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Div) {
|
2020-10-09 00:09:39 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Div, base::Divide);
|
2019-08-23 16:47:41 +00:00
|
|
|
}
|
2019-07-29 23:17:15 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Min) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2019-07-29 23:17:15 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Max) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2020-10-08 23:41:02 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Pmin) {
|
[wasm-simd] Prototype f32x4 and f64x2 pmin and pmax
This patch implements f32x4.pmin, f32x4.pmax, f64x2.pmin, and f64x2.pmax
for x64 and interpreter.
Pseudo-min and Pseudo-max instructions were proposed in
https://github.com/WebAssembly/simd/pull/122. These instructions
exactly match std::min and std::max in C++ STL, and thus have different
semantics from the existing min and max.
The instruction-selector for x64 switches the operands around, because
it allows for defining the dst to be same as first (really the second
input node), allowing better codegen.
For example, b = f32x4.pmin(a, b) directly maps to vminps(b, b, a) or
minps(b, a), as long as we can define dst == b, and switching the
instruction operands around allows us to do that.
Bug: v8:10501
Change-Id: I06f983fc1764caf673e600ac91d9c0ac5166e17e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2186630
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67688}
2020-05-08 16:47:19 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmin, Minimum);
|
|
|
|
}
|
|
|
|
|
2020-10-08 23:41:02 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Pmax) {
|
[wasm-simd] Prototype f32x4 and f64x2 pmin and pmax
This patch implements f32x4.pmin, f32x4.pmax, f64x2.pmin, and f64x2.pmax
for x64 and interpreter.
Pseudo-min and Pseudo-max instructions were proposed in
https://github.com/WebAssembly/simd/pull/122. These instructions
exactly match std::min and std::max in C++ STL, and thus have different
semantics from the existing min and max.
The instruction-selector for x64 switches the operands around, because
it allows for defining the dst to be same as first (really the second
input node), allowing better codegen.
For example, b = f32x4.pmin(a, b) directly maps to vminps(b, b, a) or
minps(b, a), as long as we can define dst == b, and switching the
instruction operands around allows us to do that.
Bug: v8:10501
Change-Id: I06f983fc1764caf673e600ac91d9c0ac5166e17e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2186630
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67688}
2020-05-08 16:47:19 +00:00
|
|
|
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmax, Maximum);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode opcode,
|
|
|
|
FloatCompareOp expected_op) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2016-12-20 16:49:53 +00:00
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT32_INPUTS(y) {
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
float diff = x - y; // Model comparison as subtraction.
|
|
|
|
if (!PlatformCanRepresent(diff)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
int32_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
2016-12-20 16:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Eq) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ne) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-03 02:31:44 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Gt) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Gt, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Ge) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ge, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Lt) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Lt, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-03-08 00:01:36 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(F32x4Le) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2020-03-17 02:29:49 +00:00
|
|
|
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(F32x4Qfma) {
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1, value3 = 2;
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
|
|
|
|
for (FMOperation<float> x : qfma_vector<float>()) {
|
|
|
|
r.Call(x.a, x.b, x.c);
|
|
|
|
float expected =
|
|
|
|
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(F32x4Qfms) {
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1, value3 = 2;
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
|
|
|
|
for (FMOperation<float> x : qfms_vector<float>()) {
|
|
|
|
r.Call(x.a, x.b, x.c);
|
|
|
|
float expected =
|
|
|
|
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-17 02:29:49 +00:00
|
|
|
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2Splat) {
|
2019-07-25 21:57:55 +00:00
|
|
|
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(param1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-25 21:57:55 +00:00
|
|
|
|
|
|
|
FOR_INT64_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int64_t expected = x;
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ExtractLane) {
|
2019-07-25 21:57:55 +00:00
|
|
|
WasmRunner<int64_t> r(execution_tier, lower_simd);
|
|
|
|
r.AllocateLocal(kWasmI64);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2019-07-25 21:57:55 +00:00
|
|
|
r,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_SIMD_I64x2_EXTRACT_LANE(
|
2019-07-25 21:57:55 +00:00
|
|
|
0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(0xFFFFFFFFFF)))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_SIMD_I64x2_EXTRACT_LANE(1, WASM_LOCAL_GET(1)));
|
2019-07-25 21:57:55 +00:00
|
|
|
CHECK_EQ(0xFFFFFFFFFF, r.Call());
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ReplaceLane) {
|
2019-07-25 21:57:55 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(temp1), WASM_I64V(0))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_I64x2_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
1, WASM_LOCAL_GET(temp1), WASM_I64V(1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-25 21:57:55 +00:00
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int64_t i = 0; i < 2; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 23:46:34 +00:00
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-07-26 16:44:20 +00:00
|
|
|
WasmOpcode opcode, Int64UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-26 16:44:20 +00:00
|
|
|
|
|
|
|
FOR_INT64_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int64_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2Neg) {
|
2019-07-26 16:44:20 +00:00
|
|
|
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
|
|
|
|
base::NegateWithWraparound);
|
|
|
|
}
|
|
|
|
|
2021-02-10 23:26:30 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2Abs) {
|
2021-02-09 21:42:15 +00:00
|
|
|
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Abs, std::abs);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-07-25 23:46:34 +00:00
|
|
|
WasmOpcode opcode, Int64ShiftOp expected_op) {
|
2019-08-29 23:55:57 +00:00
|
|
|
// Intentionally shift by 64, should be no-op.
|
|
|
|
for (int shift = 1; shift <= 64; shift++) {
|
2019-07-25 23:46:34 +00:00
|
|
|
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
|
2020-01-29 19:13:57 +00:00
|
|
|
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
|
|
|
|
int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
|
2019-07-25 23:46:34 +00:00
|
|
|
byte value = 0;
|
2020-01-29 19:13:57 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
// Shift using an immediate, and shift using a value loaded from memory.
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2020-12-17 16:55:33 +00:00
|
|
|
r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
|
2020-01-29 19:13:57 +00:00
|
|
|
WASM_I32V(shift))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
|
2020-12-17 16:55:33 +00:00
|
|
|
opcode, WASM_LOCAL_GET(simd),
|
2020-01-29 19:13:57 +00:00
|
|
|
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
|
|
|
|
WASM_ONE);
|
2019-07-25 23:46:34 +00:00
|
|
|
|
2020-01-29 19:13:57 +00:00
|
|
|
r.builder().WriteMemory(&memory[0], shift);
|
2019-07-25 23:46:34 +00:00
|
|
|
FOR_INT64_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int64_t expected = expected_op(x, shift);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
2020-01-29 19:13:57 +00:00
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
|
2019-07-25 23:46:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2Shl) {
|
2019-07-25 23:46:34 +00:00
|
|
|
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
|
|
|
|
LogicalShiftLeft);
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ShrS) {
|
2019-07-25 23:46:34 +00:00
|
|
|
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
|
|
|
|
ArithmeticShiftRight);
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ShrU) {
|
2019-07-25 23:46:34 +00:00
|
|
|
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
|
|
|
|
LogicalShiftRight);
|
|
|
|
}
|
2019-07-29 16:58:53 +00:00
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-07-29 16:58:53 +00:00
|
|
|
WasmOpcode opcode, Int64BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-29 16:58:53 +00:00
|
|
|
|
|
|
|
FOR_INT64_INPUTS(x) {
|
|
|
|
FOR_INT64_INPUTS(y) {
|
|
|
|
r.Call(x, y);
|
|
|
|
int64_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2Add) {
|
2019-07-29 16:58:53 +00:00
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
|
|
|
|
base::AddWithWraparound);
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2Sub) {
|
2019-07-29 16:58:53 +00:00
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
|
|
|
|
base::SubWithWraparound);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
|
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
|
|
|
|
}
|
2019-07-25 21:57:55 +00:00
|
|
|
|
2021-02-01 21:40:17 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
|
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
|
|
|
|
}
|
|
|
|
|
2021-02-09 21:56:43 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
|
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
|
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
|
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
|
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Splat) {
|
2019-07-10 04:22:46 +00:00
|
|
|
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold output vector.
|
|
|
|
double* g = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(param1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-10 04:22:46 +00:00
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
double expected = x;
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
} else {
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2ExtractLane) {
|
2019-07-10 04:22:46 +00:00
|
|
|
WasmRunner<double, double> r(execution_tier, lower_simd);
|
|
|
|
byte param1 = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmF64);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(temp1,
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_F64x2_EXTRACT_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(param1)))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(temp1))),
|
|
|
|
WASM_SIMD_F64x2_EXTRACT_LANE(1, WASM_LOCAL_GET(temp2)));
|
2019-07-10 04:22:46 +00:00
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
double actual = r.Call(x);
|
|
|
|
double expected = x;
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
} else {
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2ReplaceLane) {
|
2019-07-10 04:22:46 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2019-10-23 21:32:34 +00:00
|
|
|
// Set up globals to hold input/output vector.
|
|
|
|
double* g0 = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
double* g1 = r.builder().AddGlobal<double>(kWasmS128);
|
2019-07-10 04:22:46 +00:00
|
|
|
// Build function to replace each lane with its (FP) index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e100))),
|
2020-05-05 18:04:33 +00:00
|
|
|
// Replace lane 0.
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_LOCAL_GET(temp1), WASM_F64(0.0f))),
|
2020-05-05 18:04:33 +00:00
|
|
|
// Replace lane 1.
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_F64x2_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
1, WASM_LOCAL_GET(temp1), WASM_F64(1.0f))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-10 04:22:46 +00:00
|
|
|
|
|
|
|
r.Call();
|
2019-10-23 21:32:34 +00:00
|
|
|
CHECK_EQ(0., ReadLittleEndianValue<double>(&g0[0]));
|
|
|
|
CHECK_EQ(1e100, ReadLittleEndianValue<double>(&g0[1]));
|
|
|
|
CHECK_EQ(1e100, ReadLittleEndianValue<double>(&g1[0]));
|
|
|
|
CHECK_EQ(1., ReadLittleEndianValue<double>(&g1[1]));
|
2019-07-10 04:22:46 +00:00
|
|
|
}
|
2019-07-16 04:40:33 +00:00
|
|
|
|
2020-09-22 23:58:40 +00:00
|
|
|
WASM_SIMD_TEST(F64x2ExtractLaneWithI64x2) {
|
2019-10-16 22:59:32 +00:00
|
|
|
WasmRunner<int64_t> r(execution_tier, lower_simd);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r, WASM_IF_ELSE_L(
|
|
|
|
WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(1e15))),
|
|
|
|
WASM_F64_REINTERPRET_I64(WASM_I64V(1e15))),
|
|
|
|
WASM_I64V(1), WASM_I64V(0)));
|
2019-10-16 22:59:32 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
}
|
|
|
|
|
2020-09-22 23:58:40 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ExtractWithF64x2) {
|
2019-10-16 22:59:32 +00:00
|
|
|
WasmRunner<int64_t> r(execution_tier, lower_simd);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r, WASM_IF_ELSE_L(
|
|
|
|
WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
|
|
|
|
WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
|
|
|
|
WASM_I64V(1), WASM_I64V(0)));
|
2019-10-16 22:59:32 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
}
|
|
|
|
|
2019-07-31 20:13:36 +00:00
|
|
|
bool IsExtreme(double x) {
|
|
|
|
double abs_x = std::fabs(x);
|
|
|
|
const double kSmallFloatThreshold = 1.0e-298;
|
|
|
|
const double kLargeFloatThreshold = 1.0e298;
|
|
|
|
return abs_x != 0.0f && // 0 or -0 are fine.
|
|
|
|
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
|
|
|
|
}
|
|
|
|
|
2019-07-18 09:03:02 +00:00
|
|
|
bool IsSameNan(double expected, double actual) {
|
|
|
|
// Sign is non-deterministic.
|
|
|
|
uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
|
|
|
|
uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
|
|
|
|
// Some implementations convert signaling NaNs to quiet NaNs.
|
|
|
|
return (expected_bits == actual_bits) ||
|
|
|
|
((expected_bits | 0x0008000000000000) == actual_bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsCanonical(double actual) {
|
|
|
|
uint64_t actual_bits = bit_cast<uint64_t>(actual);
|
|
|
|
// Canonical NaN has quiet bit and no payload.
|
2019-07-31 00:05:09 +00:00
|
|
|
return (actual_bits & 0xFFF8000000000000) == actual_bits;
|
2019-07-18 09:03:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CheckDoubleResult(double x, double y, double expected, double actual,
|
|
|
|
bool exact = true) {
|
|
|
|
if (std::isnan(expected)) {
|
|
|
|
CHECK(std::isnan(actual));
|
|
|
|
if (std::isnan(x) && IsSameNan(x, actual)) return;
|
|
|
|
if (std::isnan(y) && IsSameNan(y, actual)) return;
|
|
|
|
if (IsSameNan(expected, actual)) return;
|
|
|
|
if (IsCanonical(actual)) return;
|
|
|
|
// This is expected to assert; it's useful for debugging.
|
|
|
|
CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
|
|
|
|
} else {
|
|
|
|
if (exact) {
|
|
|
|
CHECK_EQ(expected, actual);
|
|
|
|
// The sign of 0's must match.
|
|
|
|
CHECK_EQ(std::signbit(expected), std::signbit(actual));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Otherwise, perform an approximate equality test. First check for
|
|
|
|
// equality to handle +/-Infinity where approximate equality doesn't work.
|
|
|
|
if (expected == actual) return;
|
|
|
|
|
|
|
|
// 1% error allows all platforms to pass easily.
|
|
|
|
constexpr double kApproximationError = 0.01f;
|
|
|
|
double abs_error = std::abs(expected) * kApproximationError,
|
|
|
|
min = expected - abs_error, max = expected + abs_error;
|
|
|
|
CHECK_LE(min, actual);
|
|
|
|
CHECK_GE(max, actual);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test some values not included in the double inputs from value_helper. These
|
|
|
|
// tests are useful for opcodes that are synthesized during code gen, like Min
|
|
|
|
// and Max on ia32 and x64.
|
|
|
|
static constexpr uint64_t double_nan_test_array[] = {
|
|
|
|
// quiet NaNs, + and -
|
|
|
|
0x7FF8000000000001, 0xFFF8000000000001,
|
|
|
|
// with payload
|
|
|
|
0x7FF8000000000011, 0xFFF8000000000011,
|
|
|
|
// signaling NaNs, + and -
|
|
|
|
0x7FF0000000000001, 0xFFF0000000000001,
|
|
|
|
// with payload
|
|
|
|
0x7FF0000000000011, 0xFFF0000000000011,
|
|
|
|
// Both Infinities.
|
|
|
|
0x7FF0000000000000, 0xFFF0000000000000,
|
|
|
|
// Some "normal" numbers, 1 and -1.
|
|
|
|
0x3FF0000000000000, 0xBFF0000000000000};
|
|
|
|
|
|
|
|
#define FOR_FLOAT64_NAN_INPUTS(i) \
|
|
|
|
for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-07-18 09:03:02 +00:00
|
|
|
WasmOpcode opcode, DoubleUnOp expected_op,
|
|
|
|
bool exact = true) {
|
|
|
|
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
double* g = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-18 09:03:02 +00:00
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
// Extreme values have larger errors so skip them for approximation tests.
|
|
|
|
if (!exact && IsExtreme(x)) continue;
|
|
|
|
double expected = expected_op(x);
|
2020-10-14 14:29:43 +00:00
|
|
|
#if V8_OS_AIX
|
2020-10-19 18:58:39 +00:00
|
|
|
if (!MightReverseSign<DoubleUnOp>(expected_op))
|
|
|
|
expected = FpOpWorkaround<double>(x, expected);
|
2020-10-14 14:29:43 +00:00
|
|
|
#endif
|
2019-07-18 09:03:02 +00:00
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
2019-07-19 09:18:49 +00:00
|
|
|
CheckDoubleResult(x, x, expected, actual, exact);
|
2019-07-18 09:03:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FOR_FLOAT64_NAN_INPUTS(i) {
|
|
|
|
double x = bit_cast<double>(double_nan_test_array[i]);
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
// Extreme values have larger errors so skip them for approximation tests.
|
|
|
|
if (!exact && IsExtreme(x)) continue;
|
|
|
|
double expected = expected_op(x);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x, x, expected, actual, exact);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Abs) {
|
2019-07-18 09:03:02 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Neg) {
|
2019-07-17 06:44:49 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Sqrt) {
|
2020-10-09 00:09:39 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Sqrt, std::sqrt);
|
2019-09-11 16:33:44 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Ceil) {
|
2020-06-08 16:45:43 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Ceil, ceil, true);
|
|
|
|
}
|
2020-06-25 20:08:55 +00:00
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Floor) {
|
2020-06-08 16:45:43 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Floor, floor, true);
|
|
|
|
}
|
2020-06-29 17:58:15 +00:00
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Trunc) {
|
2020-06-08 16:45:43 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Trunc, trunc, true);
|
|
|
|
}
|
2020-06-29 20:46:22 +00:00
|
|
|
|
2020-10-08 20:10:44 +00:00
|
|
|
WASM_SIMD_TEST(F64x2NearestInt) {
|
2020-06-08 16:45:43 +00:00
|
|
|
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2NearestInt, nearbyint,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
2021-01-19 23:19:03 +00:00
|
|
|
template <typename SrcType>
|
|
|
|
void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode opcode) {
|
|
|
|
WasmRunner<int32_t, SrcType> r(execution_tier, lower_simd);
|
|
|
|
double* g = r.builder().template AddGlobal<double>(kWasmS128);
|
|
|
|
// TODO(zhin): set top lanes to 0 to assert conversion happens on low lanes.
|
|
|
|
BUILD(
|
|
|
|
r,
|
|
|
|
WASM_GLOBAL_SET(
|
|
|
|
0, WASM_SIMD_UNOP(opcode, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
for (SrcType x : compiler::ValueHelper::GetVector<SrcType>()) {
|
|
|
|
r.Call(x);
|
|
|
|
double expected = static_cast<double>(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x, x, expected, actual, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4S) {
|
|
|
|
RunF64x2ConvertLowI32x4Test<int32_t>(execution_tier, lower_simd,
|
|
|
|
kExprF64x2ConvertLowI32x4S);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4U) {
|
|
|
|
RunF64x2ConvertLowI32x4Test<uint32_t>(execution_tier, lower_simd,
|
|
|
|
kExprF64x2ConvertLowI32x4U);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename SrcType>
|
|
|
|
void RunI32x4TruncSatF64x2Test(TestExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode opcode) {
|
|
|
|
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
|
|
|
|
SrcType* g = r.builder().AddGlobal<SrcType>(kWasmS128);
|
|
|
|
BUILD(
|
|
|
|
r,
|
|
|
|
WASM_GLOBAL_SET(
|
|
|
|
0, WASM_SIMD_UNOP(opcode, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
SrcType expected = base::saturated_cast<SrcType>(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
SrcType actual = ReadLittleEndianValue<SrcType>(&g[i]);
|
|
|
|
CHECK_EQ(expected, actual);
|
|
|
|
}
|
|
|
|
// Top lanes are zero-ed.
|
|
|
|
for (int i = 2; i < 4; i++) {
|
|
|
|
CHECK_EQ(0, ReadLittleEndianValue<SrcType>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2SZero) {
|
|
|
|
RunI32x4TruncSatF64x2Test<int32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4TruncSatF64x2SZero);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2UZero) {
|
|
|
|
RunI32x4TruncSatF64x2Test<uint32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4TruncSatF64x2UZero);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(F32x4DemoteF64x2Zero) {
|
|
|
|
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
|
|
|
|
float* g = r.builder().AddGlobal<float>(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
WASM_GLOBAL_SET(
|
|
|
|
0, WASM_SIMD_UNOP(kExprF32x4DemoteF64x2Zero,
|
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
float expected = DoubleToFloat32(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x, x, expected, actual, true);
|
|
|
|
}
|
|
|
|
for (int i = 2; i < 4; i++) {
|
|
|
|
float actual = ReadLittleEndianValue<float>(&g[i]);
|
|
|
|
CheckFloatResult(x, x, 0, actual, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
|
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
double* g = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
BUILD(r,
|
|
|
|
WASM_GLOBAL_SET(
|
|
|
|
0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)))),
|
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
double expected = static_cast<double>(x);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x, x, expected, actual, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-07-24 20:01:43 +00:00
|
|
|
WasmOpcode opcode, DoubleBinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
double* g = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-07-24 20:01:43 +00:00
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT64_INPUTS(y) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
double expected = expected_op(x, y);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x, y, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FOR_FLOAT64_NAN_INPUTS(i) {
|
|
|
|
double x = bit_cast<double>(double_nan_test_array[i]);
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT64_NAN_INPUTS(j) {
|
|
|
|
double y = bit_cast<double>(double_nan_test_array[j]);
|
|
|
|
double expected = expected_op(x, y);
|
|
|
|
if (!PlatformCanRepresent(expected)) continue;
|
|
|
|
r.Call(x, y);
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x, y, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-14 22:55:49 +00:00
|
|
|
#undef FOR_FLOAT64_NAN_INPUTS
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Add) {
|
2019-07-24 20:01:43 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Add, Add);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Sub) {
|
2019-07-24 20:01:43 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Sub, Sub);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Mul) {
|
2019-07-24 20:01:43 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Mul, Mul);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Div) {
|
2020-10-09 00:09:39 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, base::Divide);
|
2019-08-13 16:17:38 +00:00
|
|
|
}
|
2019-08-14 17:46:10 +00:00
|
|
|
|
2020-10-08 23:41:02 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Pmin) {
|
[wasm-simd] Prototype f32x4 and f64x2 pmin and pmax
This patch implements f32x4.pmin, f32x4.pmax, f64x2.pmin, and f64x2.pmax
for x64 and interpreter.
Pseudo-min and Pseudo-max instructions were proposed in
https://github.com/WebAssembly/simd/pull/122. These instructions
exactly match std::min and std::max in C++ STL, and thus have different
semantics from the existing min and max.
The instruction-selector for x64 switches the operands around, because
it allows for defining the dst to be same as first (really the second
input node), allowing better codegen.
For example, b = f32x4.pmin(a, b) directly maps to vminps(b, b, a) or
minps(b, a), as long as we can define dst == b, and switching the
instruction operands around allows us to do that.
Bug: v8:10501
Change-Id: I06f983fc1764caf673e600ac91d9c0ac5166e17e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2186630
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67688}
2020-05-08 16:47:19 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmin, Minimum);
|
|
|
|
}
|
|
|
|
|
2020-10-08 23:41:02 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Pmax) {
|
[wasm-simd] Prototype f32x4 and f64x2 pmin and pmax
This patch implements f32x4.pmin, f32x4.pmax, f64x2.pmin, and f64x2.pmax
for x64 and interpreter.
Pseudo-min and Pseudo-max instructions were proposed in
https://github.com/WebAssembly/simd/pull/122. These instructions
exactly match std::min and std::max in C++ STL, and thus have different
semantics from the existing min and max.
The instruction-selector for x64 switches the operands around, because
it allows for defining the dst to be same as first (really the second
input node), allowing better codegen.
For example, b = f32x4.pmin(a, b) directly maps to vminps(b, b, a) or
minps(b, a), as long as we can define dst == b, and switching the
instruction operands around allows us to do that.
Bug: v8:10501
Change-Id: I06f983fc1764caf673e600ac91d9c0ac5166e17e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2186630
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67688}
2020-05-08 16:47:19 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmax, Maximum);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode opcode,
|
|
|
|
DoubleCompareOp expected_op) {
|
2019-08-14 17:46:10 +00:00
|
|
|
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2019-10-30 18:06:29 +00:00
|
|
|
// Make the lanes of each temp compare differently:
|
|
|
|
// temp1 = y, x and temp2 = y, y.
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp1,
|
|
|
|
WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(value2))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-08-14 17:46:10 +00:00
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
FOR_FLOAT64_INPUTS(y) {
|
|
|
|
if (!PlatformCanRepresent(y)) continue;
|
|
|
|
double diff = x - y; // Model comparison as subtraction.
|
|
|
|
if (!PlatformCanRepresent(diff)) continue;
|
|
|
|
r.Call(x, y);
|
2019-10-30 18:06:29 +00:00
|
|
|
int64_t expected0 = expected_op(x, y);
|
|
|
|
int64_t expected1 = expected_op(y, y);
|
|
|
|
CHECK_EQ(expected0, ReadLittleEndianValue<int64_t>(&g[0]));
|
|
|
|
CHECK_EQ(expected1, ReadLittleEndianValue<int64_t>(&g[1]));
|
2019-08-14 17:46:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Eq) {
|
2019-08-14 17:46:10 +00:00
|
|
|
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Ne) {
|
2019-08-14 17:46:10 +00:00
|
|
|
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Gt) {
|
2019-08-14 17:46:10 +00:00
|
|
|
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Ge) {
|
2019-08-14 17:46:10 +00:00
|
|
|
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Lt) {
|
2019-08-14 17:46:10 +00:00
|
|
|
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Le) {
|
2019-08-14 17:46:10 +00:00
|
|
|
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
|
|
|
|
}
|
2019-08-13 16:17:38 +00:00
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Min) {
|
2019-07-31 20:13:36 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Min, JSMin);
|
|
|
|
}
|
|
|
|
|
2020-09-08 21:55:21 +00:00
|
|
|
WASM_SIMD_TEST(F64x2Max) {
|
2019-07-31 20:13:36 +00:00
|
|
|
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Max, JSMax);
|
|
|
|
}
|
|
|
|
|
2020-09-10 04:02:36 +00:00
|
|
|
WASM_SIMD_TEST(I64x2Mul) {
|
2019-07-08 04:56:33 +00:00
|
|
|
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
|
|
|
|
base::MulWithWraparound);
|
|
|
|
}
|
|
|
|
|
2020-03-17 02:29:49 +00:00
|
|
|
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(F64x2Qfma) {
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
double* g = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1, value3 = 2;
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
|
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
|
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
|
|
|
|
for (FMOperation<double> x : qfma_vector<double>()) {
|
|
|
|
r.Call(x.a, x.b, x.c);
|
|
|
|
double expected =
|
|
|
|
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(F64x2Qfms) {
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
|
|
|
|
// Set up global to hold mask output.
|
|
|
|
double* g = r.builder().AddGlobal<double>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform compare op, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1, value3 = 2;
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
|
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
|
|
|
|
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
|
|
|
|
for (FMOperation<double> x : qfms_vector<double>()) {
|
|
|
|
r.Call(x.a, x.b, x.c);
|
|
|
|
double expected =
|
|
|
|
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
double actual = ReadLittleEndianValue<double>(&g[i]);
|
|
|
|
CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-17 02:29:49 +00:00
|
|
|
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
|
2019-10-16 20:21:00 +00:00
|
|
|
|
2020-01-29 23:44:10 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Splat) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(param1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected = x;
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
int32_t actual = ReadLittleEndianValue<int32_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ReplaceLane) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(-1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_I32x4_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int32_t i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Splat) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(param1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected = x;
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
int16_t actual = ReadLittleEndianValue<int16_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2020-01-03 19:01:21 +00:00
|
|
|
|
|
|
|
// Test values that do not fit in a int16.
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected = truncate_to_int16(x);
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
int16_t actual = ReadLittleEndianValue<int16_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ReplaceLane) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_I32V(-1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
4, WASM_LOCAL_GET(temp1), WASM_I32V(4))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
5, WASM_LOCAL_GET(temp1), WASM_I32V(5))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
6, WASM_LOCAL_GET(temp1), WASM_I32V(6))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_I16x8_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
7, WASM_LOCAL_GET(temp1), WASM_I32V(7))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int16_t i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int16_t>(&g[i]));
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2020-06-18 23:38:01 +00:00
|
|
|
WASM_SIMD_TEST(I8x16BitMask) {
|
2020-03-18 23:14:40 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = r.AllocateLocal(kWasmS128);
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
|
|
|
|
WASM_SIMD_UNOP(kExprI8x16BitMask, WASM_LOCAL_GET(value1)));
|
2020-03-18 23:14:40 +00:00
|
|
|
|
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
int32_t actual = r.Call(x);
|
|
|
|
// Lane 0 is always 0 (positive), lane 1 is always -1.
|
2020-03-23 18:45:25 +00:00
|
|
|
int32_t expected = std::signbit(static_cast<double>(x)) ? 0xFFFE : 0x0002;
|
2020-03-18 23:14:40 +00:00
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-18 23:38:01 +00:00
|
|
|
WASM_SIMD_TEST(I16x8BitMask) {
|
2020-03-18 23:14:40 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = r.AllocateLocal(kWasmS128);
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
|
|
|
|
WASM_SIMD_UNOP(kExprI16x8BitMask, WASM_LOCAL_GET(value1)));
|
2020-03-18 23:14:40 +00:00
|
|
|
|
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
int32_t actual = r.Call(x);
|
|
|
|
// Lane 0 is always 0 (positive), lane 1 is always -1.
|
2020-03-23 18:45:25 +00:00
|
|
|
int32_t expected = std::signbit(static_cast<double>(x)) ? 0xFE : 2;
|
2020-03-18 23:14:40 +00:00
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-18 23:38:01 +00:00
|
|
|
WASM_SIMD_TEST(I32x4BitMask) {
|
2020-03-18 23:14:40 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = r.AllocateLocal(kWasmS128);
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
|
|
|
|
WASM_SIMD_UNOP(kExprI32x4BitMask, WASM_LOCAL_GET(value1)));
|
2020-03-18 23:14:40 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
int32_t actual = r.Call(x);
|
|
|
|
// Lane 0 is always 0 (positive), lane 1 is always -1.
|
2020-03-23 18:45:25 +00:00
|
|
|
int32_t expected = std::signbit(static_cast<double>(x)) ? 0xE : 2;
|
2020-03-18 23:14:40 +00:00
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-12 18:11:52 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2BitMask) {
|
|
|
|
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = r.AllocateLocal(kWasmS128);
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_LOCAL_SET(value1, WASM_SIMD_I64x2_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(value1), WASM_I64V_1(0))),
|
|
|
|
WASM_SIMD_UNOP(kExprI64x2BitMask, WASM_LOCAL_GET(value1)));
|
2020-10-12 18:11:52 +00:00
|
|
|
|
|
|
|
for (int64_t x : compiler::ValueHelper::GetVector<int64_t>()) {
|
|
|
|
int32_t actual = r.Call(x);
|
|
|
|
// Lane 0 is always 0 (positive).
|
|
|
|
int32_t expected = std::signbit(static_cast<double>(x)) ? 0x2 : 0x0;
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-31 13:31:52 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Splat) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Set up a global to hold output vector.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
byte param1 = 0;
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(param1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int8_t expected = x;
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
int8_t actual = ReadLittleEndianValue<int8_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2020-01-03 19:01:21 +00:00
|
|
|
|
|
|
|
// Test values that do not fit in a int16.
|
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int8_t expected = truncate_to_int8(x);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
int8_t actual = ReadLittleEndianValue<int8_t>(&g[i]);
|
|
|
|
CHECK_EQ(actual, expected);
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ReplaceLane) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Set up a global to hold input/output vector.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build function to replace each lane with its index.
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_I32V(-1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
4, WASM_LOCAL_GET(temp1), WASM_I32V(4))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
5, WASM_LOCAL_GET(temp1), WASM_I32V(5))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
6, WASM_LOCAL_GET(temp1), WASM_I32V(6))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
7, WASM_LOCAL_GET(temp1), WASM_I32V(7))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
8, WASM_LOCAL_GET(temp1), WASM_I32V(8))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
9, WASM_LOCAL_GET(temp1), WASM_I32V(9))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
10, WASM_LOCAL_GET(temp1), WASM_I32V(10))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
11, WASM_LOCAL_GET(temp1), WASM_I32V(11))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
12, WASM_LOCAL_GET(temp1), WASM_I32V(12))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
13, WASM_LOCAL_GET(temp1), WASM_I32V(13))),
|
|
|
|
WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
|
|
|
|
14, WASM_LOCAL_GET(temp1), WASM_I32V(14))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
15, WASM_LOCAL_GET(temp1), WASM_I32V(15))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
|
|
|
r.Call();
|
|
|
|
for (int8_t i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(i, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
// Use doubles to ensure exact conversion.
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
int32_t ConvertToInt(double val, bool unsigned_integer) {
|
2018-04-17 00:59:05 +00:00
|
|
|
if (std::isnan(val)) return 0;
|
|
|
|
if (unsigned_integer) {
|
|
|
|
if (val < 0) return 0;
|
|
|
|
if (val > kMaxUInt32) return kMaxUInt32;
|
|
|
|
return static_cast<uint32_t>(val);
|
|
|
|
} else {
|
|
|
|
if (val < kMinInt) return kMinInt;
|
|
|
|
if (val > kMaxInt) return kMaxInt;
|
|
|
|
return static_cast<int>(val);
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests both signed and unsigned conversion.
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ConvertF32x4) {
|
2019-03-05 01:44:05 +00:00
|
|
|
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
|
|
|
|
// Create two output vectors to hold signed and unsigned results.
|
|
|
|
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4, WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
1, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2016-08-23 19:59:19 +00:00
|
|
|
|
2019-03-05 01:44:05 +00:00
|
|
|
FOR_FLOAT32_INPUTS(x) {
|
|
|
|
if (!PlatformCanRepresent(x)) continue;
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected_signed = ConvertToInt(x, false);
|
|
|
|
int32_t expected_unsigned = ConvertToInt(x, true);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g1[i]));
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I16x8 (unpacking).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ConvertI16x8) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create four output vectors to hold signed and unsigned results.
|
|
|
|
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g2 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g3 = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2020-11-02 07:23:29 +00:00
|
|
|
int32_t expected_signed = static_cast<int32_t>(x);
|
|
|
|
int32_t expected_unsigned = static_cast<int32_t>(static_cast<uint16_t>(x));
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g1[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g2[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g3[i]));
|
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-06 20:45:45 +00:00
|
|
|
// Tests both signed and unsigned conversion from I32x4 (unpacking).
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create four output vectors to hold signed and unsigned results.
|
|
|
|
int64_t* g0 = r.builder().AddGlobal<int64_t>(kWasmS128);
|
|
|
|
int64_t* g1 = r.builder().AddGlobal<int64_t>(kWasmS128);
|
2021-01-13 10:16:50 +00:00
|
|
|
uint64_t* g2 = r.builder().AddGlobal<uint64_t>(kWasmS128);
|
|
|
|
uint64_t* g3 = r.builder().AddGlobal<uint64_t>(kWasmS128);
|
2020-10-06 20:45:45 +00:00
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4High,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4Low,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4High,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4Low,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-10-06 20:45:45 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2020-11-02 07:23:29 +00:00
|
|
|
int64_t expected_signed = static_cast<int64_t>(x);
|
2021-01-13 10:16:50 +00:00
|
|
|
uint64_t expected_unsigned =
|
|
|
|
static_cast<uint64_t>(static_cast<uint32_t>(x));
|
2020-10-06 20:45:45 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g1[i]));
|
2021-01-13 10:16:50 +00:00
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<uint64_t>(&g2[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<uint64_t>(&g3[i]));
|
2020-10-06 20:45:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int32UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int32_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Neg) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
|
|
|
|
base::NegateWithWraparound);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-13 20:24:43 +00:00
|
|
|
|
2020-03-03 06:56:03 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Abs) {
|
2020-11-02 07:23:29 +00:00
|
|
|
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Abs, std::abs);
|
2020-02-21 04:14:53 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(S128Not) {
|
2020-11-02 07:23:29 +00:00
|
|
|
RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not,
|
|
|
|
[](int32_t x) { return ~x; });
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2020-11-02 09:16:52 +00:00
|
|
|
template <typename Narrow, typename Wide>
|
|
|
|
void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
|
|
|
|
LowerSimd lower_simd, WasmOpcode ext_add_pairwise,
|
|
|
|
WasmOpcode splat) {
|
|
|
|
constexpr int num_lanes = kSimd128Size / sizeof(Wide);
|
|
|
|
WasmRunner<int32_t, Narrow> r(execution_tier, lower_simd);
|
|
|
|
Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
|
|
|
|
|
|
|
|
// TODO(v8:11086) We splat the same value, so pairwise adding ends up adding
|
|
|
|
// the same value to itself, consider a more complicated test, like having 2
|
|
|
|
// vectors, and shuffling them.
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(splat),
|
2020-11-02 09:16:52 +00:00
|
|
|
WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE);
|
|
|
|
|
|
|
|
for (Narrow x : compiler::ValueHelper::GetVector<Narrow>()) {
|
|
|
|
r.Call(x);
|
|
|
|
Wide expected = AddLong<Wide>(x, x);
|
|
|
|
for (int i = 0; i < num_lanes; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<Wide>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8S) {
|
|
|
|
RunExtAddPairwiseTest<int16_t, int32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4ExtAddPairwiseI16x8S,
|
|
|
|
kExprI16x8Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8U) {
|
|
|
|
RunExtAddPairwiseTest<uint16_t, uint32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4ExtAddPairwiseI16x8U,
|
|
|
|
kExprI16x8Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16S) {
|
|
|
|
RunExtAddPairwiseTest<int8_t, int16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8ExtAddPairwiseI8x16S,
|
|
|
|
kExprI8x16Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16U) {
|
|
|
|
RunExtAddPairwiseTest<uint8_t, uint16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8ExtAddPairwiseI8x16U,
|
|
|
|
kExprI8x16Splat);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int32BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2016-12-06 01:12:15 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
FOR_INT32_INPUTS(y) {
|
|
|
|
r.Call(x, y);
|
|
|
|
int32_t expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 01:12:15 +00:00
|
|
|
}
|
2016-08-23 19:59:19 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Add) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add,
|
|
|
|
base::AddWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Sub) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub,
|
|
|
|
base::SubWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
[Turbofan] Add native ARM support for basic SIMD 32x4 operations.
- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.
LOG=N
BUG=v8:4124
Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
2016-12-19 22:23:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Mul) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul,
|
|
|
|
base::MulWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MinS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MaxS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MinU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMinimum);
|
2017-02-21 16:45:30 +00:00
|
|
|
}
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I32x4MaxU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxU,
|
2018-05-23 22:21:05 +00:00
|
|
|
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-21 16:45:30 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128And) {
|
2020-11-02 07:23:29 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And,
|
|
|
|
[](int32_t x, int32_t y) { return x & y; });
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128Or) {
|
2020-11-02 07:23:29 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or,
|
|
|
|
[](int32_t x, int32_t y) { return x | y; });
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(S128Xor) {
|
2020-11-02 07:23:29 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor,
|
|
|
|
[](int32_t x, int32_t y) { return x ^ y; });
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-07-26 11:55:18 +00:00
|
|
|
|
2020-01-10 17:45:02 +00:00
|
|
|
// Bitwise operation, doesn't really matter what simd type we test it with.
|
2020-01-23 00:36:20 +00:00
|
|
|
WASM_SIMD_TEST(S128AndNot) {
|
2020-11-02 07:23:29 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128AndNot,
|
|
|
|
[](int32_t x, int32_t y) { return x & ~y; });
|
2020-01-10 17:45:02 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Eq) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Ne) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeS, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtU, UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4LeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtU, UnsignedGreater);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I32x4GeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-03-08 00:01:36 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int32ShiftOp expected_op) {
|
2019-08-29 23:55:57 +00:00
|
|
|
// Intentionally shift by 32, should be no-op.
|
|
|
|
for (int shift = 1; shift <= 32; shift++) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2020-01-29 19:13:57 +00:00
|
|
|
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
|
|
|
|
int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
|
|
|
|
int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2019-03-08 00:43:11 +00:00
|
|
|
byte value = 0;
|
2020-01-29 19:13:57 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
// Shift using an immediate, and shift using a value loaded from memory.
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2020-12-17 16:55:33 +00:00
|
|
|
r, WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
|
2020-01-29 19:13:57 +00:00
|
|
|
WASM_I32V(shift))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
|
2020-12-17 16:55:33 +00:00
|
|
|
opcode, WASM_LOCAL_GET(simd),
|
2020-01-29 19:13:57 +00:00
|
|
|
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
|
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
2020-01-29 19:13:57 +00:00
|
|
|
r.builder().WriteMemory(&memory[0], shift);
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2019-07-30 18:50:02 +00:00
|
|
|
int32_t expected = expected_op(x, shift);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
2020-01-29 19:13:57 +00:00
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
|
2019-03-08 00:43:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-03 17:14:07 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I32x4Shl) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftLeft);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ShrS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
|
2018-07-03 17:14:07 +00:00
|
|
|
ArithmeticShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ShrU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
2017-03-03 02:04:07 +00:00
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I8x16 (unpacking).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ConvertI8x16) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create four output vectors to hold signed and unsigned results.
|
|
|
|
int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g2 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g3 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2020-11-02 07:23:29 +00:00
|
|
|
int16_t expected_signed = static_cast<int16_t>(x);
|
|
|
|
int16_t expected_unsigned = static_cast<int16_t>(static_cast<uint8_t>(x));
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g1[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g2[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g3[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests both signed and unsigned conversion from I32x4 (packing).
|
|
|
|
WASM_SIMD_TEST(I16x8ConvertI32x4) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create output vectors to hold signed and unsigned results.
|
|
|
|
int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
1, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2021-01-29 01:47:22 +00:00
|
|
|
int16_t expected_signed = base::saturated_cast<int16_t>(x);
|
|
|
|
int16_t expected_unsigned = base::saturated_cast<uint16_t>(x);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g1[i]));
|
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int16UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int16_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Neg) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
|
|
|
|
base::NegateWithWraparound);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2020-03-03 06:56:03 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Abs) {
|
2020-02-21 04:14:53 +00:00
|
|
|
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Abs, Abs);
|
|
|
|
}
|
|
|
|
|
2019-12-09 10:32:55 +00:00
|
|
|
template <typename T = int16_t, typename OpType = T (*)(T, T)>
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-12-09 10:32:55 +00:00
|
|
|
WasmOpcode opcode, OpType expected_op) {
|
|
|
|
WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Global to hold output.
|
2019-12-09 10:32:55 +00:00
|
|
|
T* g = r.builder().template AddGlobal<T>(kWasmS128);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2019-12-09 10:32:55 +00:00
|
|
|
for (T x : compiler::ValueHelper::GetVector<T>()) {
|
|
|
|
for (T y : compiler::ValueHelper::GetVector<T>()) {
|
2019-03-08 00:43:11 +00:00
|
|
|
r.Call(x, y);
|
2019-12-09 10:32:55 +00:00
|
|
|
T expected = expected_op(x, y);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 8; i++) {
|
2019-12-09 10:32:55 +00:00
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
|
2019-03-08 00:43:11 +00:00
|
|
|
}
|
2018-06-20 06:03:44 +00:00
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Add) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
|
|
|
|
base::AddWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddSatS) {
|
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatS,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateAdd<int16_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Sub) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub,
|
|
|
|
base::SubWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I16x8SubSatS) {
|
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatS,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateSub<int16_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Mul) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul,
|
|
|
|
base::MulWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MinS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MaxS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I16x8AddSatU) {
|
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatU,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateAdd<uint16_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I16x8SubSatU) {
|
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatU,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateSub<uint16_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MinU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMinimum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I16x8MaxU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Eq) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Ne) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeS, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtU, UnsignedGreater);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtU, UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 21:00:57 +00:00
|
|
|
WASM_SIMD_TEST(I16x8RoundingAverageU) {
|
2019-12-09 10:32:55 +00:00
|
|
|
RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8RoundingAverageU,
|
|
|
|
base::RoundingAverageUnsigned);
|
|
|
|
}
|
|
|
|
|
2020-10-01 20:08:54 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I16x8Q15MulRSatS) {
|
|
|
|
RunI16x8BinOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8Q15MulRSatS,
|
|
|
|
SaturateRoundingQMul<int16_t>);
|
|
|
|
}
|
2020-10-16 19:54:50 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
enum class MulHalf { kLow, kHigh };
|
|
|
|
|
|
|
|
// Helper to run ext mul tests. It will splat 2 input values into 2 v128, call
|
|
|
|
// the mul op on these operands, and set the result into a global.
|
|
|
|
// It will zero the top or bottom half of one of the operands, this will catch
|
|
|
|
// mistakes if we are multiply the incorrect halves.
|
|
|
|
template <typename S, typename T, typename OpType = T (*)(S, S)>
|
|
|
|
void RunExtMulTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
|
|
|
WasmOpcode opcode, OpType expected_op, WasmOpcode splat,
|
|
|
|
MulHalf half) {
|
|
|
|
WasmRunner<int32_t, S, S> r(execution_tier, lower_simd);
|
|
|
|
int lane_to_zero = half == MulHalf::kLow ? 1 : 0;
|
|
|
|
T* g = r.builder().template AddGlobal<T>(kWasmS128);
|
|
|
|
|
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-10-16 19:54:50 +00:00
|
|
|
0, WASM_SIMD_BINOP(
|
|
|
|
opcode,
|
|
|
|
WASM_SIMD_I64x2_REPLACE_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
lane_to_zero, WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
|
2020-10-16 19:54:50 +00:00
|
|
|
WASM_I64V_1(0)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1)))),
|
2020-10-16 19:54:50 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
constexpr int lanes = kSimd128Size / sizeof(T);
|
|
|
|
for (S x : compiler::ValueHelper::GetVector<S>()) {
|
|
|
|
for (S y : compiler::ValueHelper::GetVector<S>()) {
|
|
|
|
r.Call(x, y);
|
|
|
|
T expected = expected_op(x, y);
|
|
|
|
for (int i = 0; i < lanes; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ExtMulLowI8x16S) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8ExtMulLowI8x16S, MultiplyLong,
|
|
|
|
kExprI8x16Splat, MulHalf::kLow);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ExtMulHighI8x16S) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8ExtMulHighI8x16S, MultiplyLong,
|
|
|
|
kExprI8x16Splat, MulHalf::kHigh);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ExtMulLowI8x16U) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8ExtMulLowI8x16U, MultiplyLong,
|
|
|
|
kExprI8x16Splat, MulHalf::kLow);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ExtMulHighI8x16U) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
|
|
|
|
kExprI16x8ExtMulHighI8x16U, MultiplyLong,
|
|
|
|
kExprI8x16Splat, MulHalf::kHigh);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ExtMulLowI16x8S) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4ExtMulLowI16x8S, MultiplyLong,
|
|
|
|
kExprI16x8Splat, MulHalf::kLow);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ExtMulHighI16x8S) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4ExtMulHighI16x8S, MultiplyLong,
|
|
|
|
kExprI16x8Splat, MulHalf::kHigh);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ExtMulLowI16x8U) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4ExtMulLowI16x8U, MultiplyLong,
|
|
|
|
kExprI16x8Splat, MulHalf::kLow);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I32x4ExtMulHighI16x8U) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
|
|
|
|
kExprI32x4ExtMulHighI16x8U, MultiplyLong,
|
|
|
|
kExprI16x8Splat, MulHalf::kHigh);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ExtMulLowI32x4S) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
|
|
|
|
kExprI64x2ExtMulLowI32x4S, MultiplyLong,
|
|
|
|
kExprI32x4Splat, MulHalf::kLow);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ExtMulHighI32x4S) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
|
|
|
|
kExprI64x2ExtMulHighI32x4S, MultiplyLong,
|
|
|
|
kExprI32x4Splat, MulHalf::kHigh);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ExtMulLowI32x4U) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
|
|
|
|
kExprI64x2ExtMulLowI32x4U, MultiplyLong,
|
|
|
|
kExprI32x4Splat, MulHalf::kLow);
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:09:33 +00:00
|
|
|
WASM_SIMD_TEST(I64x2ExtMulHighI32x4U) {
|
2020-10-16 19:54:50 +00:00
|
|
|
RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
|
|
|
|
kExprI64x2ExtMulHighI32x4U, MultiplyLong,
|
|
|
|
kExprI32x4Splat, MulHalf::kHigh);
|
|
|
|
}
|
2020-10-01 20:08:54 +00:00
|
|
|
|
2020-10-16 18:27:45 +00:00
|
|
|
WASM_SIMD_TEST(I32x4DotI16x8S) {
|
2020-06-12 00:05:20 +00:00
|
|
|
WasmRunner<int32_t, int16_t, int16_t> r(execution_tier, lower_simd);
|
|
|
|
int32_t* g = r.builder().template AddGlobal<int32_t>(kWasmS128);
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_BINOP(kExprI32x4DotI16x8S, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-06-12 00:05:20 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
for (int16_t x : compiler::ValueHelper::GetVector<int16_t>()) {
|
|
|
|
for (int16_t y : compiler::ValueHelper::GetVector<int16_t>()) {
|
|
|
|
r.Call(x, y);
|
|
|
|
// x * y * 2 can overflow (0x8000), the behavior is to wraparound.
|
|
|
|
int32_t expected = base::MulWithWraparound(x * y, 2);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int16ShiftOp expected_op) {
|
2019-08-29 23:55:57 +00:00
|
|
|
// Intentionally shift by 16, should be no-op.
|
|
|
|
for (int shift = 1; shift <= 16; shift++) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2020-01-29 19:13:57 +00:00
|
|
|
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
|
|
|
|
int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
|
|
|
|
int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
|
2019-03-08 00:43:11 +00:00
|
|
|
byte value = 0;
|
2020-01-29 19:13:57 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
// Shift using an immediate, and shift using a value loaded from memory.
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2020-12-17 16:55:33 +00:00
|
|
|
r, WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
|
2020-01-29 19:13:57 +00:00
|
|
|
WASM_I32V(shift))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
|
2020-12-17 16:55:33 +00:00
|
|
|
opcode, WASM_LOCAL_GET(simd),
|
2020-01-29 19:13:57 +00:00
|
|
|
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
|
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
2020-01-29 19:13:57 +00:00
|
|
|
r.builder().WriteMemory(&memory[0], shift);
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2019-08-29 23:55:57 +00:00
|
|
|
int16_t expected = expected_op(x, shift);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 8; i++) {
|
2020-01-29 19:13:57 +00:00
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
|
2019-03-08 00:43:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-03 17:14:07 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I16x8Shl) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftLeft);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ShrS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
|
2018-07-03 17:14:07 +00:00
|
|
|
ArithmeticShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ShrU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int8UnOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
int8_t expected = expected_op(x);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 23:24:10 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Neg) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
|
|
|
|
base::NegateWithWraparound);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2020-03-03 06:56:03 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Abs) {
|
2020-02-21 04:14:53 +00:00
|
|
|
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
|
|
|
|
}
|
|
|
|
|
2020-10-09 19:57:22 +00:00
|
|
|
WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Global to hold output.
|
|
|
|
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
// Build fn to splat test value, perform unop, and write the result.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_UNOP(kExprI8x16Popcnt, WASM_LOCAL_GET(temp1))),
|
2020-10-09 19:57:22 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
FOR_UINT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
|
|
|
unsigned expected = base::bits::CountPopulation(x);
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
// Tests both signed and unsigned conversion from I16x8 (packing).
|
2018-06-20 06:03:44 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ConvertI16x8) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
// Create output vectors to hold signed and unsigned results.
|
2021-02-02 17:26:54 +00:00
|
|
|
int8_t* g_s = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
uint8_t* g_u = r.builder().AddGlobal<uint8_t>(kWasmS128);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Build fn to splat test value, perform conversions, and write the results.
|
|
|
|
byte value = 0;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-12-17 16:55:33 +00:00
|
|
|
1, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT16_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2021-01-29 01:47:22 +00:00
|
|
|
int8_t expected_signed = base::saturated_cast<int8_t>(x);
|
2021-02-02 17:26:54 +00:00
|
|
|
uint8_t expected_unsigned = base::saturated_cast<uint8_t>(x);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 16; i++) {
|
2021-02-02 17:26:54 +00:00
|
|
|
CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g_s[i]));
|
|
|
|
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<uint8_t>(&g_u[i]));
|
2018-06-20 06:03:44 +00:00
|
|
|
}
|
[WASM SIMD] Implement packing and unpacking integer conversions.
- Adds WASM opcodes I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, which unpack half of
an I16x8 register into a whole I32x4 register, with signed or unsigned
extension. Having separate Low/High opcodes works around the difficulty
of having multiple output registers, which would be necessary if we unpacked
the entire I16x8 register.
- Adds WASM opcodes I16x8SConvertI8x16Low, I16x8SConvertI8x16High,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, similarly to above.
- Adds WASM opcodes I16x8SConvertI32x4, I16x8UConvertI32x4,
I8x16SConvert16x8, I8x16UConvertI16x8, which pack two source registers
into a single destination register with signed or unsigned saturation. These
could have been separated into half operations, but this is simpler to
implement with SSE, AVX, and is acceptable on ARM. It also avoids adding
operations that only modify half of their destination register.
- Implements these opcodes for ARM.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2800523002
Cr-Commit-Position: refs/heads/master@{#44541}
2017-04-10 23:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 10:32:55 +00:00
|
|
|
template <typename T = int8_t, typename OpType = T (*)(T, T)>
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-12-09 10:32:55 +00:00
|
|
|
WasmOpcode opcode, OpType expected_op) {
|
|
|
|
WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Global to hold output.
|
2019-12-09 10:32:55 +00:00
|
|
|
T* g = r.builder().template AddGlobal<T>(kWasmS128);
|
2019-03-08 00:43:11 +00:00
|
|
|
// Build fn to splat test values, perform binop, and write the result.
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(temp2))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
2019-12-09 10:32:55 +00:00
|
|
|
for (T x : compiler::ValueHelper::GetVector<T>()) {
|
|
|
|
for (T y : compiler::ValueHelper::GetVector<T>()) {
|
2019-03-08 00:43:11 +00:00
|
|
|
r.Call(x, y);
|
2019-12-09 10:32:55 +00:00
|
|
|
T expected = expected_op(x, y);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 16; i++) {
|
2019-12-09 10:32:55 +00:00
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
|
2019-03-08 00:43:11 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Add) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
|
|
|
|
base::AddWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I8x16AddSatS) {
|
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatS,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateAdd<int8_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Sub) {
|
2019-01-10 11:47:08 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub,
|
|
|
|
base::SubWithWraparound);
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I8x16SubSatS) {
|
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatS,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateSub<int8_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MinS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinS, Minimum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MaxS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxS, Maximum);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I8x16AddSatU) {
|
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatU,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateAdd<uint8_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 20:59:22 +00:00
|
|
|
WASM_SIMD_TEST(I8x16SubSatU) {
|
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatU,
|
2020-10-09 00:09:39 +00:00
|
|
|
SaturateSub<uint8_t>);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MinU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMinimum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(I8x16MaxU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxU,
|
2018-05-18 21:47:59 +00:00
|
|
|
UnsignedMaximum);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Eq) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Eq, Equal);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Ne) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Ne, NotEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
[wasm] Implement first set of SIMD I8x16 ops
- I8x16Splat, I8x16ExtractLane, I8x16ReplaceLane
- Binops: I8x16Add, I8x16AddSaturateS, I8x16Sub, I8x16SubSaturateS, I8x16MinS,
I8x16MaxS, I8x16AddSaturateU, I8x16SubSaturateU, I8x16MinU, I8x16MaxU
- Compare ops: I8x16Eq, I8x16Ne
BUG=v8:6020
R=bbudge@chromium.org, bmeurer@chromium.org
Review-Url: https://codereview.chromium.org/2829483002
Cr-Commit-Position: refs/heads/master@{#44706}
2017-04-18 23:23:12 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtS, Greater);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GeS, GreaterEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LtS, Less);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeS) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LeS, LessEqual);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-02-21 16:45:30 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtU, UnsignedGreater);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GeU,
|
|
|
|
UnsignedGreaterEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LtU, UnsignedLess);
|
2017-10-05 20:22:49 +00:00
|
|
|
}
|
2017-01-26 02:18:00 +00:00
|
|
|
|
2018-05-25 21:50:54 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeU) {
|
2019-03-08 00:43:11 +00:00
|
|
|
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LeU,
|
|
|
|
UnsignedLessEqual);
|
2017-01-26 02:18:00 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
|
2020-02-06 21:00:57 +00:00
|
|
|
WASM_SIMD_TEST(I8x16RoundingAverageU) {
|
2019-12-09 10:32:55 +00:00
|
|
|
RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd,
|
|
|
|
kExprI8x16RoundingAverageU,
|
|
|
|
base::RoundingAverageUnsigned);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmOpcode opcode, Int8ShiftOp expected_op) {
|
2019-08-29 23:55:57 +00:00
|
|
|
// Intentionally shift by 8, should be no-op.
|
|
|
|
for (int shift = 1; shift <= 8; shift++) {
|
2019-03-08 00:43:11 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2020-02-06 22:30:40 +00:00
|
|
|
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
|
|
|
|
int8_t* g_imm = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
int8_t* g_mem = r.builder().AddGlobal<int8_t>(kWasmS128);
|
2019-03-08 00:43:11 +00:00
|
|
|
byte value = 0;
|
2020-02-06 22:30:40 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
|
|
|
// Shift using an immediate, and shift using a value loaded from memory.
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2020-12-17 16:55:33 +00:00
|
|
|
r, WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
|
2020-02-06 22:30:40 +00:00
|
|
|
WASM_I32V(shift))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
|
2020-12-17 16:55:33 +00:00
|
|
|
opcode, WASM_LOCAL_GET(simd),
|
2020-02-06 22:30:40 +00:00
|
|
|
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
|
|
|
|
WASM_ONE);
|
2019-03-08 00:43:11 +00:00
|
|
|
|
2020-02-06 22:30:40 +00:00
|
|
|
r.builder().WriteMemory(&memory[0], shift);
|
2019-03-08 00:43:11 +00:00
|
|
|
FOR_INT8_INPUTS(x) {
|
|
|
|
r.Call(x);
|
2019-07-30 18:50:02 +00:00
|
|
|
int8_t expected = expected_op(x, shift);
|
2019-03-08 00:43:11 +00:00
|
|
|
for (int i = 0; i < 16; i++) {
|
2020-02-06 22:30:40 +00:00
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_imm[i]));
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_mem[i]));
|
2019-03-08 00:43:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-03 17:14:07 +00:00
|
|
|
}
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Shl) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftLeft);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShrS) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
|
2018-07-03 17:14:07 +00:00
|
|
|
ArithmeticShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 23:14:14 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShrU) {
|
2018-08-21 15:01:31 +00:00
|
|
|
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
|
2018-07-03 17:14:07 +00:00
|
|
|
LogicalShiftRight);
|
2017-02-07 17:33:37 +00:00
|
|
|
}
|
2017-02-13 20:24:43 +00:00
|
|
|
|
2017-06-08 20:54:32 +00:00
|
|
|
// Test Select by making a mask where the 0th and 3rd lanes are true and the
|
2017-06-20 22:04:57 +00:00
|
|
|
// rest false, and comparing for non-equality with zero to convert to a boolean
|
2017-06-08 20:54:32 +00:00
|
|
|
// vector.
|
2020-05-05 18:04:33 +00:00
|
|
|
#define WASM_SIMD_SELECT_TEST(format) \
|
|
|
|
WASM_SIMD_TEST(S##format##Select) { \
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd); \
|
|
|
|
byte val1 = 0; \
|
|
|
|
byte val2 = 1; \
|
|
|
|
byte src1 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte src2 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte mask = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(src1, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val1))), \
|
|
|
|
WASM_LOCAL_SET(src2, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val2))), \
|
|
|
|
WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
1, WASM_LOCAL_GET(zero), WASM_I32V(-1))), \
|
|
|
|
WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
2, WASM_LOCAL_GET(mask), WASM_I32V(-1))), \
|
|
|
|
WASM_LOCAL_SET( \
|
2020-05-05 18:04:33 +00:00
|
|
|
mask, \
|
|
|
|
WASM_SIMD_SELECT( \
|
2020-12-17 16:55:33 +00:00
|
|
|
format, WASM_LOCAL_GET(src1), WASM_LOCAL_GET(src2), \
|
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, WASM_LOCAL_GET(mask), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE); \
|
|
|
|
\
|
|
|
|
CHECK_EQ(1, r.Call(0x12, 0x34)); \
|
2017-02-13 20:24:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_SELECT_TEST(32x4)
|
|
|
|
WASM_SIMD_SELECT_TEST(16x8)
|
|
|
|
WASM_SIMD_SELECT_TEST(8x16)
|
2017-06-08 20:54:32 +00:00
|
|
|
|
|
|
|
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
|
|
|
|
// rest 0. The mask is not the result of a comparison op.
|
2020-05-05 18:04:33 +00:00
|
|
|
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
|
2020-10-30 01:20:22 +00:00
|
|
|
WASM_SIMD_TEST(S##format##NonCanonicalSelect) { \
|
2020-05-05 18:04:33 +00:00
|
|
|
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, \
|
|
|
|
lower_simd); \
|
|
|
|
byte val1 = 0; \
|
|
|
|
byte val2 = 1; \
|
|
|
|
byte combined = 2; \
|
|
|
|
byte src1 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte src2 = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte mask = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(src1, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val1))), \
|
|
|
|
WASM_LOCAL_SET(src2, \
|
|
|
|
WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val2))), \
|
|
|
|
WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
|
|
|
|
WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
1, WASM_LOCAL_GET(zero), WASM_I32V(0xF))), \
|
|
|
|
WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
|
|
|
|
2, WASM_LOCAL_GET(mask), WASM_I32V(0xF))), \
|
|
|
|
WASM_LOCAL_SET(mask, WASM_SIMD_SELECT(format, WASM_LOCAL_GET(src1), \
|
|
|
|
WASM_LOCAL_GET(src2), \
|
|
|
|
WASM_LOCAL_GET(mask))), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE); \
|
|
|
|
\
|
|
|
|
CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
|
2017-06-08 20:54:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(16x8)
|
|
|
|
WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
|
2017-03-02 19:50:33 +00:00
|
|
|
|
2017-04-24 18:53:16 +00:00
|
|
|
// Test binary ops with two lane test patterns, all lanes distinct.
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
template <typename T>
|
2017-04-24 18:53:16 +00:00
|
|
|
void RunBinaryLaneOpTest(
|
2020-08-04 11:09:23 +00:00
|
|
|
TestExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode simd_op,
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
const std::array<T, kSimd128Size / sizeof(T)>& expected) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
|
2017-08-19 16:34:11 +00:00
|
|
|
T* src0 = r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
|
2017-05-04 16:50:51 +00:00
|
|
|
static const int kElems = kSimd128Size / sizeof(T);
|
|
|
|
for (int i = 0; i < kElems; i++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
WriteLittleEndianValue<T>(&src0[i], i);
|
|
|
|
WriteLittleEndianValue<T>(&src1[i], kElems + i);
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
2020-09-25 18:08:04 +00:00
|
|
|
if (simd_op == kExprI8x16Shuffle) {
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SHUFFLE_OP(simd_op, expected,
|
|
|
|
WASM_GLOBAL_GET(0),
|
|
|
|
WASM_GLOBAL_GET(1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2017-06-13 23:40:51 +00:00
|
|
|
} else {
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(simd_op, WASM_GLOBAL_GET(0),
|
|
|
|
WASM_GLOBAL_GET(1))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
for (size_t i = 0; i < expected.size(); i++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
CHECK_EQ(ReadLittleEndianValue<T>(&src0[i]), expected[i]);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-09 17:01:36 +00:00
|
|
|
// Test shuffle ops.
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunShuffleOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2018-06-09 17:01:36 +00:00
|
|
|
WasmOpcode simd_op,
|
2018-07-13 18:24:42 +00:00
|
|
|
const std::array<int8_t, kSimd128Size>& shuffle) {
|
2018-06-09 17:01:36 +00:00
|
|
|
// Test the original shuffle.
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
|
|
|
|
// Test a non-canonical (inputs reversed) version of the shuffle.
|
2018-07-13 18:24:42 +00:00
|
|
|
std::array<int8_t, kSimd128Size> other_shuffle(shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) other_shuffle[i] ^= kSimd128Size;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
|
2018-07-13 18:24:42 +00:00
|
|
|
other_shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
|
|
|
|
// Test the swizzle (one-operand) version of the shuffle.
|
2018-07-13 18:24:42 +00:00
|
|
|
std::array<int8_t, kSimd128Size> swizzle(shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) swizzle[i] &= (kSimd128Size - 1);
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, swizzle);
|
2018-06-09 17:01:36 +00:00
|
|
|
|
|
|
|
// Test the non-canonical swizzle (one-operand) version of the shuffle.
|
2018-07-13 18:24:42 +00:00
|
|
|
std::array<int8_t, kSimd128Size> other_swizzle(shuffle);
|
2018-06-09 17:01:36 +00:00
|
|
|
for (size_t i = 0; i < shuffle.size(); ++i) other_swizzle[i] |= kSimd128Size;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
|
2018-07-13 18:24:42 +00:00
|
|
|
other_swizzle);
|
2018-06-09 17:01:36 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
#define SHUFFLE_LIST(V) \
|
|
|
|
V(S128Identity) \
|
|
|
|
V(S32x4Dup) \
|
|
|
|
V(S32x4ZipLeft) \
|
|
|
|
V(S32x4ZipRight) \
|
|
|
|
V(S32x4UnzipLeft) \
|
|
|
|
V(S32x4UnzipRight) \
|
|
|
|
V(S32x4TransposeLeft) \
|
|
|
|
V(S32x4TransposeRight) \
|
|
|
|
V(S32x2Reverse) \
|
|
|
|
V(S32x4Irregular) \
|
[x64][wasm-simd] Pattern match 32x4 rotate
Code like:
x = wasm_v32x4_shuffle(x, x, 1, 2, 3, 0);
is currently matched by S8x16Concat, which lowers to two instructions:
movapd xmm_dst, xmm_src
palignr xmm_dst, xmm_src, 0x4
There is a special case after a S8x16Concat is matched:.
- is_swizzle, the inputs are the same
- it is a 32x4 shuffle (offset % 4 == 0)
Which can have a better codegen:
- (dst == src) shufps dst, src, 0b00111001
- (dst != src) pshufd dst, src, 0b00111001
Add a new simd shuffle matcher which will match 32x4 rotate, and
construct the appropriate indices referring to the 32x4 elements.
pshufd for the given example. However, this matching happens after
S8x16Concat, so we get the palignr first. We could move the pattern
matching cases around, but it will lead to some cases where
where it would have matched a S8x16Concat, but now matches a
S32x4shuffle instead, leading to worse codegen.
Note: we also pattern match on 32x4Swizzle, which correctly generates
Change-Id: Ie3aca53bbc06826be2cf49632de4c24ec73d0a9a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2589062
Reviewed-by: Bill Budge <bbudge@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71754}
2020-12-14 23:31:33 +00:00
|
|
|
V(S32x4Rotate) \
|
2018-07-13 18:24:42 +00:00
|
|
|
V(S16x8Dup) \
|
|
|
|
V(S16x8ZipLeft) \
|
|
|
|
V(S16x8ZipRight) \
|
|
|
|
V(S16x8UnzipLeft) \
|
|
|
|
V(S16x8UnzipRight) \
|
|
|
|
V(S16x8TransposeLeft) \
|
|
|
|
V(S16x8TransposeRight) \
|
|
|
|
V(S16x4Reverse) \
|
|
|
|
V(S16x2Reverse) \
|
|
|
|
V(S16x8Irregular) \
|
|
|
|
V(S8x16Dup) \
|
|
|
|
V(S8x16ZipLeft) \
|
|
|
|
V(S8x16ZipRight) \
|
|
|
|
V(S8x16UnzipLeft) \
|
|
|
|
V(S8x16UnzipRight) \
|
|
|
|
V(S8x16TransposeLeft) \
|
|
|
|
V(S8x16TransposeRight) \
|
|
|
|
V(S8x8Reverse) \
|
|
|
|
V(S8x4Reverse) \
|
|
|
|
V(S8x2Reverse) \
|
|
|
|
V(S8x16Irregular)
|
|
|
|
|
|
|
|
enum ShuffleKey {
|
|
|
|
#define SHUFFLE_ENUM_VALUE(Name) k##Name,
|
|
|
|
SHUFFLE_LIST(SHUFFLE_ENUM_VALUE)
|
|
|
|
#undef SHUFFLE_ENUM_VALUE
|
|
|
|
kNumShuffleKeys
|
|
|
|
};
|
|
|
|
|
|
|
|
using Shuffle = std::array<int8_t, kSimd128Size>;
|
|
|
|
using ShuffleMap = std::map<ShuffleKey, const Shuffle>;
|
|
|
|
|
|
|
|
ShuffleMap test_shuffles = {
|
|
|
|
{kS128Identity,
|
|
|
|
{{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}}},
|
|
|
|
{kS32x4Dup,
|
|
|
|
{{16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19}}},
|
|
|
|
{kS32x4ZipLeft, {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}}},
|
|
|
|
{kS32x4ZipRight,
|
|
|
|
{{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}}},
|
|
|
|
{kS32x4UnzipLeft,
|
|
|
|
{{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}}},
|
|
|
|
{kS32x4UnzipRight,
|
|
|
|
{{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}}},
|
|
|
|
{kS32x4TransposeLeft,
|
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}}},
|
|
|
|
{kS32x4TransposeRight,
|
|
|
|
{{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}}},
|
|
|
|
{kS32x2Reverse, // swizzle only
|
|
|
|
{{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}}},
|
|
|
|
{kS32x4Irregular,
|
|
|
|
{{0, 1, 2, 3, 16, 17, 18, 19, 16, 17, 18, 19, 20, 21, 22, 23}}},
|
[x64][wasm-simd] Pattern match 32x4 rotate
Code like:
x = wasm_v32x4_shuffle(x, x, 1, 2, 3, 0);
is currently matched by S8x16Concat, which lowers to two instructions:
movapd xmm_dst, xmm_src
palignr xmm_dst, xmm_src, 0x4
There is a special case after a S8x16Concat is matched:.
- is_swizzle, the inputs are the same
- it is a 32x4 shuffle (offset % 4 == 0)
Which can have a better codegen:
- (dst == src) shufps dst, src, 0b00111001
- (dst != src) pshufd dst, src, 0b00111001
Add a new simd shuffle matcher which will match 32x4 rotate, and
construct the appropriate indices referring to the 32x4 elements.
pshufd for the given example. However, this matching happens after
S8x16Concat, so we get the palignr first. We could move the pattern
matching cases around, but it will lead to some cases where
where it would have matched a S8x16Concat, but now matches a
S32x4shuffle instead, leading to worse codegen.
Note: we also pattern match on 32x4Swizzle, which correctly generates
Change-Id: Ie3aca53bbc06826be2cf49632de4c24ec73d0a9a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2589062
Reviewed-by: Bill Budge <bbudge@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71754}
2020-12-14 23:31:33 +00:00
|
|
|
{kS32x4Rotate, {{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3}}},
|
2018-07-13 18:24:42 +00:00
|
|
|
{kS16x8Dup,
|
|
|
|
{{18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19}}},
|
|
|
|
{kS16x8ZipLeft, {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}}},
|
|
|
|
{kS16x8ZipRight,
|
|
|
|
{{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}}},
|
|
|
|
{kS16x8UnzipLeft,
|
|
|
|
{{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}}},
|
|
|
|
{kS16x8UnzipRight,
|
|
|
|
{{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}}},
|
|
|
|
{kS16x8TransposeLeft,
|
|
|
|
{{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}}},
|
|
|
|
{kS16x8TransposeRight,
|
|
|
|
{{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}}},
|
|
|
|
{kS16x4Reverse, // swizzle only
|
|
|
|
{{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}}},
|
|
|
|
{kS16x2Reverse, // swizzle only
|
|
|
|
{{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}}},
|
|
|
|
{kS16x8Irregular,
|
|
|
|
{{0, 1, 16, 17, 16, 17, 0, 1, 4, 5, 20, 21, 6, 7, 22, 23}}},
|
|
|
|
{kS8x16Dup,
|
|
|
|
{{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19}}},
|
|
|
|
{kS8x16ZipLeft, {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}}},
|
|
|
|
{kS8x16ZipRight,
|
|
|
|
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}}},
|
|
|
|
{kS8x16UnzipLeft,
|
|
|
|
{{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}}},
|
|
|
|
{kS8x16UnzipRight,
|
|
|
|
{{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}}},
|
|
|
|
{kS8x16TransposeLeft,
|
|
|
|
{{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}}},
|
|
|
|
{kS8x16TransposeRight,
|
|
|
|
{{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}}},
|
|
|
|
{kS8x8Reverse, // swizzle only
|
|
|
|
{{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}}},
|
|
|
|
{kS8x4Reverse, // swizzle only
|
|
|
|
{{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}}},
|
|
|
|
{kS8x2Reverse, // swizzle only
|
|
|
|
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}}},
|
|
|
|
{kS8x16Irregular,
|
|
|
|
{{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}}},
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SHUFFLE_TEST(Name) \
|
|
|
|
WASM_SIMD_TEST(Name) { \
|
|
|
|
ShuffleMap::const_iterator it = test_shuffles.find(k##Name); \
|
|
|
|
DCHECK_NE(it, test_shuffles.end()); \
|
2020-09-25 18:08:04 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprI8x16Shuffle, \
|
2018-07-13 18:24:42 +00:00
|
|
|
it->second); \
|
|
|
|
}
|
|
|
|
SHUFFLE_LIST(SHUFFLE_TEST)
|
|
|
|
#undef SHUFFLE_TEST
|
|
|
|
#undef SHUFFLE_LIST
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test shuffles that blend the two vectors (elements remain in their lanes.)
|
|
|
|
WASM_SIMD_TEST(S8x16Blend) {
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
|
|
|
for (int bias = 1; bias < kSimd128Size; bias++) {
|
|
|
|
for (int i = 0; i < bias; i++) expected[i] = i;
|
|
|
|
for (int i = bias; i < kSimd128Size; i++) expected[i] = i + kSimd128Size;
|
2020-09-25 18:08:04 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprI8x16Shuffle, expected);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test shuffles that concatenate the two vectors.
|
|
|
|
WASM_SIMD_TEST(S8x16Concat) {
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
|
|
|
// n is offset or bias of concatenation.
|
|
|
|
for (int n = 1; n < kSimd128Size; ++n) {
|
|
|
|
int i = 0;
|
|
|
|
// last kLanes - n bytes of first vector.
|
|
|
|
for (int j = n; j < kSimd128Size; ++j) {
|
|
|
|
expected[i++] = j;
|
|
|
|
}
|
|
|
|
// first n bytes of second vector
|
|
|
|
for (int j = 0; j < n; ++j) {
|
|
|
|
expected[i++] = j + kSimd128Size;
|
|
|
|
}
|
2020-09-25 18:08:04 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprI8x16Shuffle, expected);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
[wasm-simd][x64] Pattern match on shufps-style shuffles
When a 8x16 shuffle matches a 32x4 shuffle (every group of 4 indices are
consecutive), and the first 2 indices are in the range [0-3], and the
other 2 indices are in the range [4-7], then we can match it to a
shufps. E.g. [0,2,4,6], [1,3,5,7]. These shuffles are commonly used to
extract odd/even floats.
Change-Id: I031fe44f71a13bbc72115c22b02a5eaaf29d3794
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2596579
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71860}
2020-12-22 02:33:58 +00:00
|
|
|
WASM_SIMD_TEST(ShuffleShufps) {
|
|
|
|
// We reverse engineer the shufps immediates into 8x16 shuffles.
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
|
|
|
for (int mask = 0; mask < 256; mask++) {
|
|
|
|
// Each iteration of this loop sets byte[i] of the 32x4 lanes.
|
|
|
|
// Low 2 lanes (2-bits each) select from first input.
|
|
|
|
uint8_t index0 = (mask & 3) * 4;
|
|
|
|
uint8_t index1 = ((mask >> 2) & 3) * 4;
|
|
|
|
// Next 2 bits select from src2, so add 16 to the index.
|
|
|
|
uint8_t index2 = ((mask >> 4) & 3) * 4 + 16;
|
|
|
|
uint8_t index3 = ((mask >> 6) & 3) * 4 + 16;
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
expected[0 + i] = index0 + i;
|
|
|
|
expected[4 + i] = index1 + i;
|
|
|
|
expected[8 + i] = index2 + i;
|
|
|
|
expected[12 + i] = index3 + i;
|
|
|
|
}
|
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprI8x16Shuffle, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-09 17:25:33 +00:00
|
|
|
struct SwizzleTestArgs {
|
|
|
|
const Shuffle input;
|
|
|
|
const Shuffle indices;
|
|
|
|
const Shuffle expected;
|
|
|
|
};
|
|
|
|
|
|
|
|
static constexpr SwizzleTestArgs swizzle_test_args[] = {
|
|
|
|
{{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
|
|
|
|
{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
|
|
|
|
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}},
|
|
|
|
{{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
|
|
|
|
{15, 0, 14, 1, 13, 2, 12, 3, 11, 4, 10, 5, 9, 6, 8, 7},
|
|
|
|
{0, 15, 1, 14, 2, 13, 3, 12, 4, 11, 5, 10, 6, 9, 7, 8}},
|
|
|
|
{{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
|
|
|
|
{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
|
|
|
|
{15, 13, 11, 9, 7, 5, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0}},
|
|
|
|
// all indices are out of range
|
|
|
|
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
|
|
|
|
{16, 17, 18, 19, 20, 124, 125, 126, 127, -1, -2, -3, -4, -5, -6, -7},
|
|
|
|
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}};
|
|
|
|
|
|
|
|
static constexpr Vector<const SwizzleTestArgs> swizzle_test_vector =
|
|
|
|
ArrayVector(swizzle_test_args);
|
|
|
|
|
2020-09-25 18:08:04 +00:00
|
|
|
WASM_SIMD_TEST(I8x16Swizzle) {
|
2019-10-09 17:25:33 +00:00
|
|
|
// RunBinaryLaneOpTest set up the two globals to be consecutive integers,
|
|
|
|
// [0-15] and [16-31]. Using [0-15] as the indices will not sufficiently test
|
|
|
|
// swizzle since the expected result is a no-op, using [16-31] will result in
|
|
|
|
// all 0s.
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
static const int kElems = kSimd128Size / sizeof(uint8_t);
|
|
|
|
uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
|
|
|
|
uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
|
|
|
|
uint8_t* src1 = r.builder().AddGlobal<uint8_t>(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2019-10-09 17:25:33 +00:00
|
|
|
r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
|
|
|
|
WASM_GLOBAL_GET(2))),
|
2019-10-09 17:25:33 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
for (SwizzleTestArgs si : swizzle_test_vector) {
|
|
|
|
for (int i = 0; i < kElems; i++) {
|
|
|
|
WriteLittleEndianValue<uint8_t>(&src0[i], si.input[i]);
|
|
|
|
WriteLittleEndianValue<uint8_t>(&src1[i], si.indices[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
|
|
|
|
for (int i = 0; i < kElems; i++) {
|
|
|
|
CHECK_EQ(ReadLittleEndianValue<uint8_t>(&dst[i]), si.expected[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Combine 3 shuffles a, b, and c by applying both a and b and then applying c
|
|
|
|
// to those two results.
|
|
|
|
Shuffle Combine(const Shuffle& a, const Shuffle& b, const Shuffle& c) {
|
|
|
|
Shuffle result;
|
|
|
|
for (int i = 0; i < kSimd128Size; ++i) {
|
|
|
|
result[i] = c[i] < kSimd128Size ? a[c[i]] : b[c[i] - kSimd128Size];
|
|
|
|
}
|
|
|
|
return result;
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
const Shuffle& GetRandomTestShuffle(v8::base::RandomNumberGenerator* rng) {
|
|
|
|
return test_shuffles[static_cast<ShuffleKey>(rng->NextInt(kNumShuffleKeys))];
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test shuffles that are random combinations of 3 test shuffles. Completely
|
|
|
|
// random shuffles almost always generate the slow general shuffle code, so
|
|
|
|
// don't exercise as many code paths.
|
2020-09-25 18:08:04 +00:00
|
|
|
WASM_SIMD_TEST(I8x16ShuffleFuzz) {
|
2018-07-13 18:24:42 +00:00
|
|
|
v8::base::RandomNumberGenerator* rng = CcTest::random_number_generator();
|
|
|
|
static const int kTests = 100;
|
|
|
|
for (int i = 0; i < kTests; ++i) {
|
|
|
|
auto shuffle = Combine(GetRandomTestShuffle(rng), GetRandomTestShuffle(rng),
|
|
|
|
GetRandomTestShuffle(rng));
|
2020-09-25 18:08:04 +00:00
|
|
|
RunShuffleOpTest(execution_tier, lower_simd, kExprI8x16Shuffle, shuffle);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
2017-05-04 16:50:51 +00:00
|
|
|
}
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
void AppendShuffle(const Shuffle& shuffle, std::vector<byte>* buffer) {
|
2020-09-25 18:08:04 +00:00
|
|
|
byte opcode[] = {WASM_SIMD_OP(kExprI8x16Shuffle)};
|
2018-07-13 18:24:42 +00:00
|
|
|
for (size_t i = 0; i < arraysize(opcode); ++i) buffer->push_back(opcode[i]);
|
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) buffer->push_back((shuffle[i]));
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 01:19:59 +00:00
|
|
|
void BuildShuffle(const std::vector<Shuffle>& shuffles,
|
2019-07-08 08:42:48 +00:00
|
|
|
std::vector<byte>* buffer) {
|
2018-07-13 18:24:42 +00:00
|
|
|
// Perform the leaf shuffles on globals 0 and 1.
|
|
|
|
size_t row_index = (shuffles.size() - 1) / 2;
|
|
|
|
for (size_t i = row_index; i < shuffles.size(); ++i) {
|
2020-12-17 16:56:08 +00:00
|
|
|
byte operands[] = {WASM_GLOBAL_GET(0), WASM_GLOBAL_GET(1)};
|
2018-07-13 18:24:42 +00:00
|
|
|
for (size_t j = 0; j < arraysize(operands); ++j)
|
|
|
|
buffer->push_back(operands[j]);
|
|
|
|
AppendShuffle(shuffles[i], buffer);
|
|
|
|
}
|
|
|
|
// Now perform inner shuffles in the correct order on operands on the stack.
|
|
|
|
do {
|
|
|
|
for (size_t i = row_index / 2; i < row_index; ++i) {
|
|
|
|
AppendShuffle(shuffles[i], buffer);
|
|
|
|
}
|
|
|
|
row_index /= 2;
|
|
|
|
} while (row_index != 0);
|
2019-10-08 13:16:30 +00:00
|
|
|
byte epilog[] = {kExprGlobalSet, static_cast<byte>(0), WASM_ONE};
|
2018-07-13 18:24:42 +00:00
|
|
|
for (size_t j = 0; j < arraysize(epilog); ++j) buffer->push_back(epilog[j]);
|
2017-05-09 21:04:27 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunWasmCode(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2018-07-13 18:24:42 +00:00
|
|
|
const std::vector<byte>& code,
|
|
|
|
std::array<int8_t, kSimd128Size>* result) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2018-07-13 18:24:42 +00:00
|
|
|
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
|
|
|
|
int8_t* src0 = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
int8_t* src1 = r.builder().AddGlobal<int8_t>(kWasmS128);
|
|
|
|
for (int i = 0; i < kSimd128Size; ++i) {
|
2018-08-10 11:20:40 +00:00
|
|
|
WriteLittleEndianValue<int8_t>(&src0[i], i);
|
|
|
|
WriteLittleEndianValue<int8_t>(&src1[i], kSimd128Size + i);
|
2018-07-13 18:24:42 +00:00
|
|
|
}
|
|
|
|
r.Build(code.data(), code.data() + code.size());
|
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
for (size_t i = 0; i < kSimd128Size; i++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
(*result)[i] = ReadLittleEndianValue<int8_t>(&src0[i]);
|
2018-05-31 16:53:09 +00:00
|
|
|
}
|
|
|
|
}
|
2017-06-13 23:40:51 +00:00
|
|
|
|
2018-07-13 18:24:42 +00:00
|
|
|
// Test multiple shuffles executed in sequence.
|
2020-07-29 19:00:34 +00:00
|
|
|
WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
|
|
|
|
// Don't compare interpreter results with itself.
|
2020-08-04 11:09:23 +00:00
|
|
|
if (execution_tier == TestExecutionTier::kInterpreter) {
|
2020-07-29 19:00:34 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-07-13 18:24:42 +00:00
|
|
|
v8::base::RandomNumberGenerator* rng = CcTest::random_number_generator();
|
|
|
|
static const int kShuffles = 100;
|
|
|
|
for (int i = 0; i < kShuffles; ++i) {
|
|
|
|
// Create an odd number in [3..23] of random test shuffles so we can build
|
|
|
|
// a complete binary tree (stored as a heap) of shuffle operations. The leaf
|
|
|
|
// shuffles operate on the test pattern inputs, while the interior shuffles
|
|
|
|
// operate on the results of the two child shuffles.
|
|
|
|
int num_shuffles = rng->NextInt(10) * 2 + 3;
|
|
|
|
std::vector<Shuffle> shuffles;
|
|
|
|
for (int j = 0; j < num_shuffles; ++j) {
|
|
|
|
shuffles.push_back(GetRandomTestShuffle(rng));
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
2018-07-13 18:24:42 +00:00
|
|
|
// Generate the code for the shuffle expression.
|
|
|
|
std::vector<byte> buffer;
|
|
|
|
BuildShuffle(shuffles, &buffer);
|
|
|
|
|
|
|
|
// Run the code using the interpreter to get the expected result.
|
|
|
|
std::array<int8_t, kSimd128Size> expected;
|
2020-08-04 11:09:23 +00:00
|
|
|
RunWasmCode(TestExecutionTier::kInterpreter, kNoLowerSimd, buffer,
|
|
|
|
&expected);
|
2018-07-13 18:24:42 +00:00
|
|
|
// Run the SIMD or scalar lowered compiled code and compare results.
|
|
|
|
std::array<int8_t, kSimd128Size> result;
|
2018-08-21 15:01:31 +00:00
|
|
|
RunWasmCode(execution_tier, lower_simd, buffer, &result);
|
2018-07-13 18:24:42 +00:00
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) {
|
|
|
|
CHECK_EQ(result[i], expected[i]);
|
[WASM SIMD] Implement primitive shuffles.
- Adds unary Reverse shuffles (swizzles): S32x2Reverse, S16x4Reverse,
S16x2Reverse, S8x8Reverse, S8x4Reverse, S8x2Reverse. Reversals are
done within the sub-vectors that prefix the opcode name, e.g. S8x2
reverses the 8 consecutive pairs in an S8x16 vector.
- Adds binary Zip (interleave) left and right half-shuffles to return a
single vector: S32x4ZipLeft, S32x4ZipRightS16x8ZipLeft, S16x8ZipRight,
S8x16ZipLeft, S8x16ZipRight.
- Adds binary Unzip (de-interleave) left and right half shuffles to return
a single vector: S32x4UnzipLeft, S32x4UnzipRight, S16x8UnzipLeft,
S16x8UnzipRight, S8x16UnzipLeft, S8x16UnzipRight.
- Adds binary Transpose left and right half shuffles to return
a single vector: S32x4TransposeLeft, S32x4TransposeRight,
S16x8TransposeLeft, S16xTransposeRight, S8x16TransposeLeft,
S8x16TransposeRight.
- Adds binary Concat (concatenate) byte shuffle: S8x16Concat #bytes to
paste two vectors together.
LOG=N
BUG=v8:6020
Review-Url: https://codereview.chromium.org/2801183002
Cr-Commit-Position: refs/heads/master@{#44734}
2017-04-19 22:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-04 16:50:51 +00:00
|
|
|
|
2017-03-02 19:50:33 +00:00
|
|
|
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
|
|
|
|
// result. Use relational ops on numeric vectors to create the boolean vector
|
|
|
|
// test inputs. Test inputs with all true, all false, one true, and one false.
|
2019-08-29 22:07:27 +00:00
|
|
|
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
|
2018-06-29 19:29:16 +00:00
|
|
|
WASM_SIMD_TEST(ReductionTest##lanes) { \
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd); \
|
2019-08-29 22:07:27 +00:00
|
|
|
if (lanes == 2 && lower_simd == kLowerSimd) return; \
|
2017-03-02 19:50:33 +00:00
|
|
|
byte zero = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte one_one = r.AllocateLocal(kWasmS128); \
|
|
|
|
byte reduced = r.AllocateLocal(kWasmI32); \
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(int_type(0))), \
|
|
|
|
WASM_LOCAL_SET( \
|
2021-01-29 22:48:10 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(zero), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET( \
|
2021-01-29 22:48:10 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(zero), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET( \
|
2021-03-05 23:56:29 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(zero), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET( \
|
2021-03-05 23:56:29 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(zero), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(one_one, \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_I##format##_REPLACE_LANE( \
|
2020-12-17 16:55:33 +00:00
|
|
|
lanes - 1, WASM_LOCAL_GET(zero), int_type(1))), \
|
|
|
|
WASM_LOCAL_SET( \
|
2021-01-29 22:48:10 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(one_one), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET( \
|
2021-01-29 22:48:10 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(one_one), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET( \
|
2021-03-05 23:56:29 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Eq, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(one_one), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET( \
|
2021-03-05 23:56:29 +00:00
|
|
|
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
|
2017-03-02 19:50:33 +00:00
|
|
|
WASM_SIMD_BINOP(kExprI##format##Ne, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_GET(one_one), \
|
|
|
|
WASM_LOCAL_GET(zero)))), \
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_RETURN1(WASM_ZERO)), \
|
|
|
|
WASM_ONE); \
|
2017-03-02 19:50:33 +00:00
|
|
|
CHECK_EQ(1, r.Call()); \
|
|
|
|
}
|
|
|
|
|
2021-02-01 21:40:17 +00:00
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(64x2, 2, WASM_I64V)
|
2019-08-29 22:07:27 +00:00
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4, WASM_I32V)
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8, WASM_I32V)
|
|
|
|
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16, WASM_I32V)
|
2017-03-02 19:50:33 +00:00
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r, WASM_IF_ELSE_I(
|
|
|
|
WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
|
|
|
|
WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
|
|
|
|
WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 20:43:34 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4ExtractLane) {
|
|
|
|
WasmRunner<float> r(execution_tier, lower_simd);
|
|
|
|
r.AllocateLocal(kWasmF32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_SIMD_F32x4_EXTRACT_LANE(
|
2020-05-05 18:04:33 +00:00
|
|
|
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5)))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)));
|
2019-07-02 20:43:34 +00:00
|
|
|
CHECK_EQ(30.5, r.Call());
|
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
|
2017-03-16 14:06:49 +00:00
|
|
|
// Choose two floating point values whose sum is normal and exactly
|
|
|
|
// representable as a float.
|
2017-12-02 00:30:37 +00:00
|
|
|
const int kOne = 0x3F800000;
|
2017-03-16 14:06:49 +00:00
|
|
|
const int kTwo = 0x40000000;
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(
|
|
|
|
WASM_F32_EQ(
|
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_BINOP(kExprF32x4Add,
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(kOne)),
|
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(kTwo)))),
|
|
|
|
WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(kOne)),
|
|
|
|
WASM_F32_REINTERPRET_I32(WASM_I32V(kTwo)))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 22:21:05 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
|
|
|
WASM_IF_ELSE_I(
|
|
|
|
WASM_I32_EQ(
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_BINOP(kExprI32x4Add,
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
|
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
|
|
|
|
WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
|
|
|
|
WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
|
|
|
|
WASM_I32V(1), WASM_I32V(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4Local) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
|
2017-02-27 23:45:03 +00:00
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(0)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(31, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_SIMD_I32x4_EXTRACT_LANE(
|
2020-05-05 18:04:33 +00:00
|
|
|
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(76, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4For) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
|
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_LOCAL_GET(1),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(53))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_LOCAL_GET(1),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(23))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_LOOP(
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(
|
|
|
|
1, WASM_SIMD_BINOP(kExprI32x4Add, WASM_LOCAL_GET(1),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(1)))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(5)), WASM_BR(1))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(1)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(36)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(58)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_LOCAL_GET(1)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(28)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_LOCAL_GET(1)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(36)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_GET(0));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4For) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
|
|
|
r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
|
|
|
|
WASM_LOCAL_SET(1, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_LOCAL_GET(1),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_F32(19.5))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_LOOP(
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(
|
|
|
|
1, WASM_SIMD_BINOP(kExprF32x4Add, WASM_LOCAL_GET(1),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_F32x4_SPLAT(WASM_F32(2.0)))),
|
|
|
|
WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(3)), WASM_BR(1))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(1)),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(1)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_F32(27.25)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0))),
|
|
|
|
WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_LOCAL_GET(1)),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_F32(25.5)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(0, WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_GET(0));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 19:54:19 +00:00
|
|
|
template <typename T, int numLanes = 4>
|
|
|
|
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
|
|
|
|
for (int lane = 0; lane < numLanes; lane++) {
|
2018-08-10 11:20:40 +00:00
|
|
|
WriteLittleEndianValue<T>(&v[lane], arr[lane]);
|
2017-05-09 19:54:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2018-08-10 11:20:40 +00:00
|
|
|
const T GetScalar(T* v, int lane) {
|
2017-05-09 19:54:19 +00:00
|
|
|
constexpr int kElems = kSimd128Size / sizeof(T);
|
|
|
|
const int index = lane;
|
|
|
|
USE(kElems);
|
|
|
|
DCHECK(index >= 0 && index < kElems);
|
2018-08-10 11:20:40 +00:00
|
|
|
return ReadLittleEndianValue<T>(&v[index]);
|
2017-05-09 19:54:19 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-10-16 08:49:45 +00:00
|
|
|
// Pad the globals with a few unused slots to get a non-zero offset.
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
2017-08-19 16:34:11 +00:00
|
|
|
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2017-05-09 19:54:19 +00:00
|
|
|
SetVectorByLanes(global, {{0, 1, 2, 3}});
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2020-12-17 16:55:33 +00:00
|
|
|
r, WASM_LOCAL_SET(1, WASM_I32V(1)),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(0),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GLOBAL_GET(4))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(1),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GLOBAL_GET(4))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(2),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GLOBAL_GET(4))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_I32_NE(WASM_I32V(3),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GLOBAL_GET(4))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_GET(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-10-16 08:49:45 +00:00
|
|
|
// Pad the globals with a few unused slots to get a non-zero offset.
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
|
|
|
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
|
2017-08-19 16:34:11 +00:00
|
|
|
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
|
|
|
|
WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GLOBAL_GET(4),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(34))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GLOBAL_GET(4),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(45))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GLOBAL_GET(4),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_I32V(56))),
|
|
|
|
WASM_I32V(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-05-09 19:54:19 +00:00
|
|
|
CHECK_EQ(GetScalar(global, 0), 23);
|
|
|
|
CHECK_EQ(GetScalar(global, 1), 34);
|
|
|
|
CHECK_EQ(GetScalar(global, 2), 45);
|
|
|
|
CHECK_EQ(GetScalar(global, 3), 56);
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-08-19 16:34:11 +00:00
|
|
|
float* global = r.builder().AddGlobal<float>(kWasmS128);
|
2017-05-09 19:54:19 +00:00
|
|
|
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
|
2017-02-27 23:45:03 +00:00
|
|
|
r.AllocateLocal(kWasmI32);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2020-12-17 16:55:33 +00:00
|
|
|
r, WASM_LOCAL_SET(1, WASM_I32V(1)),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(0.0),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GLOBAL_GET(0))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(1.5),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GLOBAL_GET(0))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(2.25),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(2, WASM_GLOBAL_GET(0))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
2017-02-27 23:45:03 +00:00
|
|
|
WASM_IF(WASM_F32_NE(WASM_F32(3.5),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GLOBAL_GET(0))),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(1, WASM_I32V(0))),
|
|
|
|
WASM_LOCAL_GET(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 21:38:23 +00:00
|
|
|
WASM_SIMD_TEST(SimdF32x4SetGlobal) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
2017-08-19 16:34:11 +00:00
|
|
|
float* global = r.builder().AddGlobal<float>(kWasmS128);
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
|
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GLOBAL_GET(0),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_F32(45.5))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(2, WASM_GLOBAL_GET(0),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_F32(32.25))),
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GLOBAL_GET(0),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_F32(65.0))),
|
|
|
|
WASM_I32V(1));
|
2017-03-16 14:06:49 +00:00
|
|
|
CHECK_EQ(1, r.Call(0));
|
2017-05-09 19:54:19 +00:00
|
|
|
CHECK_EQ(GetScalar(global, 0), 13.5f);
|
|
|
|
CHECK_EQ(GetScalar(global, 1), 45.5f);
|
|
|
|
CHECK_EQ(GetScalar(global, 2), 32.25f);
|
|
|
|
CHECK_EQ(GetScalar(global, 3), 65.0f);
|
2017-02-27 23:45:03 +00:00
|
|
|
}
|
2017-03-15 23:34:53 +00:00
|
|
|
|
2021-01-21 20:32:19 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
|
2020-11-20 01:06:13 +00:00
|
|
|
// TODO(v8:11168): Prototyping prefetch.
|
2021-03-05 04:03:58 +00:00
|
|
|
WASM_SIMD_TEST_POST_MVP(SimdPrefetch) {
|
2020-11-20 01:06:13 +00:00
|
|
|
{
|
|
|
|
// Test PrefetchT.
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
BUILD(r, WASM_ZERO, WASM_SIMD_OP(kExprPrefetchT), ZERO_ALIGNMENT,
|
|
|
|
ZERO_OFFSET,
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
r.builder().WriteMemory(&memory[0], i);
|
|
|
|
CHECK_EQ(i, r.Call());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Test PrefetchNT.
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
BUILD(r, WASM_ZERO, WASM_SIMD_OP(kExprPrefetchNT), ZERO_ALIGNMENT,
|
|
|
|
ZERO_OFFSET,
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
r.builder().WriteMemory(&memory[0], i);
|
|
|
|
CHECK_EQ(i, r.Call());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Test OOB.
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
|
|
|
|
// Prefetch kWasmPageSize+1 but still load from 0.
|
|
|
|
BUILD(r, WASM_I32V(kWasmPageSize + 1), WASM_SIMD_OP(kExprPrefetchNT),
|
|
|
|
ZERO_ALIGNMENT, ZERO_OFFSET,
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
r.builder().WriteMemory(&memory[0], i);
|
|
|
|
CHECK_EQ(i, r.Call());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-01-21 20:32:19 +00:00
|
|
|
#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
|
2020-11-20 01:06:13 +00:00
|
|
|
|
2019-08-26 16:50:16 +00:00
|
|
|
WASM_SIMD_TEST(SimdLoadStoreLoad) {
|
2018-08-21 15:01:31 +00:00
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
2018-04-27 20:00:12 +00:00
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
2017-10-13 20:55:17 +00:00
|
|
|
// Load memory, store it, then reload it and extract the first lane. Use a
|
|
|
|
// non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(8), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(8))));
|
2017-03-15 23:34:53 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
int32_t expected = i;
|
2017-10-13 20:55:17 +00:00
|
|
|
r.builder().WriteMemory(&memory[1], expected);
|
2017-03-15 23:34:53 +00:00
|
|
|
CHECK_EQ(expected, r.Call());
|
|
|
|
}
|
2020-11-18 03:49:34 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// OOB tests for loads.
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
BUILD(r, WASM_SIMD_I32x4_EXTRACT_LANE(
|
2020-12-17 16:55:33 +00:00
|
|
|
0, WASM_SIMD_LOAD_MEM(WASM_LOCAL_GET(0))));
|
2020-11-18 03:49:34 +00:00
|
|
|
|
|
|
|
for (uint32_t offset = kWasmPageSize - (kSimd128Size - 1);
|
|
|
|
offset < kWasmPageSize; ++offset) {
|
|
|
|
CHECK_TRAP(r.Call(offset));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// OOB tests for stores.
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
BUILD(r,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_STORE_MEM(WASM_LOCAL_GET(0), WASM_SIMD_LOAD_MEM(WASM_ZERO)),
|
2020-11-18 03:49:34 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
for (uint32_t offset = kWasmPageSize - (kSimd128Size - 1);
|
|
|
|
offset < kWasmPageSize; ++offset) {
|
|
|
|
CHECK_TRAP(r.Call(offset));
|
|
|
|
}
|
|
|
|
}
|
2017-03-15 23:34:53 +00:00
|
|
|
}
|
2017-09-01 12:57:34 +00:00
|
|
|
|
2019-09-19 22:01:07 +00:00
|
|
|
WASM_SIMD_TEST(SimdLoadStoreLoadMemargOffset) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
int32_t* memory =
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
constexpr byte offset_1 = 4;
|
|
|
|
constexpr byte offset_2 = 8;
|
|
|
|
// Load from memory at offset_1, store to offset_2, load from offset_2, and
|
|
|
|
// extract first lane. We use non-zero memarg offsets to test offset decoding.
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(
|
2019-09-19 22:01:07 +00:00
|
|
|
r,
|
|
|
|
WASM_SIMD_STORE_MEM_OFFSET(
|
|
|
|
offset_2, WASM_ZERO, WASM_SIMD_LOAD_MEM_OFFSET(offset_1, WASM_ZERO)),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_LOAD_MEM_OFFSET(offset_2, WASM_ZERO)));
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
|
|
|
int32_t expected = i;
|
|
|
|
// Index 1 of memory (int32_t) will be bytes 4 to 8.
|
|
|
|
r.builder().WriteMemory(&memory[1], expected);
|
|
|
|
CHECK_EQ(expected, r.Call());
|
|
|
|
}
|
2020-11-18 03:49:34 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// OOB tests for loads with offsets.
|
|
|
|
for (uint32_t offset = kWasmPageSize - (kSimd128Size - 1);
|
|
|
|
offset < kWasmPageSize; ++offset) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
BUILD(r, WASM_SIMD_I32x4_EXTRACT_LANE(
|
|
|
|
0, WASM_SIMD_LOAD_MEM_OFFSET(U32V_3(offset), WASM_ZERO)));
|
|
|
|
CHECK_TRAP(r.Call());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// OOB tests for stores with offsets
|
|
|
|
for (uint32_t offset = kWasmPageSize - (kSimd128Size - 1);
|
|
|
|
offset < kWasmPageSize; ++offset) {
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
|
|
|
|
BUILD(r,
|
|
|
|
WASM_SIMD_STORE_MEM_OFFSET(U32V_3(offset), WASM_ZERO,
|
|
|
|
WASM_SIMD_LOAD_MEM(WASM_ZERO)),
|
|
|
|
WASM_ONE);
|
|
|
|
CHECK_TRAP(r.Call(offset));
|
|
|
|
}
|
|
|
|
}
|
2019-09-19 22:01:07 +00:00
|
|
|
}
|
|
|
|
|
2020-04-20 18:26:34 +00:00
|
|
|
// Test a multi-byte opcode with offset values that encode into valid opcodes.
|
|
|
|
// This is to exercise decoding logic and make sure we get the lengths right.
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load8SplatOffset) {
|
2020-04-20 18:26:34 +00:00
|
|
|
// This offset is [82, 22] when encoded, which contains valid opcodes.
|
|
|
|
constexpr int offset = 4354;
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
int8_t* memory = r.builder().AddMemoryElems<int8_t>(kWasmPageSize);
|
|
|
|
int8_t* global = r.builder().AddGlobal<int8_t>(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(
|
2020-09-22 20:54:28 +00:00
|
|
|
0, WASM_SIMD_LOAD_OP_OFFSET(kExprS128Load8Splat, WASM_I32V(0),
|
2020-07-17 17:48:28 +00:00
|
|
|
U32V_2(offset))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
2020-04-20 18:26:34 +00:00
|
|
|
|
|
|
|
// We don't really care about all valid values, so just test for 1.
|
|
|
|
int8_t x = 7;
|
|
|
|
r.builder().WriteMemory(&memory[offset], x);
|
|
|
|
r.Call();
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
CHECK_EQ(x, ReadLittleEndianValue<int8_t>(&global[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
template <typename T>
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunLoadSplatTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
WasmOpcode op) {
|
|
|
|
constexpr int lanes = 16 / sizeof(T);
|
|
|
|
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
T* memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
|
|
|
|
T* global = r.builder().AddGlobal<T>(kWasmS128);
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_ONE);
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
|
|
|
|
for (T x : compiler::ValueHelper::GetVector<T>()) {
|
|
|
|
// 16-th byte in memory is lanes-th element (size T) of memory.
|
|
|
|
r.builder().WriteMemory(&memory[lanes], x);
|
|
|
|
r.Call();
|
|
|
|
for (int i = 0; i < lanes; i++) {
|
|
|
|
CHECK_EQ(x, ReadLittleEndianValue<T>(&global[i]));
|
|
|
|
}
|
|
|
|
}
|
2020-08-18 22:06:22 +00:00
|
|
|
|
|
|
|
// Test for OOB.
|
|
|
|
{
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
|
|
|
|
r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
|
2020-08-18 22:06:22 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
// Load splats load sizeof(T) bytes.
|
|
|
|
for (uint32_t offset = kWasmPageSize - (sizeof(T) - 1);
|
|
|
|
offset < kWasmPageSize; ++offset) {
|
|
|
|
CHECK_TRAP(r.Call(offset));
|
|
|
|
}
|
|
|
|
}
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load8Splat) {
|
|
|
|
RunLoadSplatTest<int8_t>(execution_tier, lower_simd, kExprS128Load8Splat);
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load16Splat) {
|
|
|
|
RunLoadSplatTest<int16_t>(execution_tier, lower_simd, kExprS128Load16Splat);
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load32Splat) {
|
|
|
|
RunLoadSplatTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Splat);
|
2019-11-07 18:39:30 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(S128Load64Splat) {
|
2020-09-22 20:54:28 +00:00
|
|
|
RunLoadSplatTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Splat);
|
2019-11-07 18:39:30 +00:00
|
|
|
}
|
|
|
|
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
template <typename S, typename T>
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunLoadExtendTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
WasmOpcode op) {
|
2020-05-18 22:40:02 +00:00
|
|
|
static_assert(sizeof(S) < sizeof(T),
|
|
|
|
"load extend should go from smaller to larger type");
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
constexpr int lanes_s = 16 / sizeof(S);
|
|
|
|
constexpr int lanes_t = 16 / sizeof(T);
|
|
|
|
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
|
2020-05-14 21:54:43 +00:00
|
|
|
// Load extends always load 64 bits, so alignment values can be from 0 to 3.
|
|
|
|
for (byte alignment = 0; alignment <= 3; alignment++) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
|
|
|
|
T* global = r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_ALIGNMENT(
|
2020-05-14 21:54:43 +00:00
|
|
|
op, WASM_I32V(mem_index), alignment)),
|
|
|
|
WASM_ONE);
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
|
2020-05-14 21:54:43 +00:00
|
|
|
for (S x : compiler::ValueHelper::GetVector<S>()) {
|
|
|
|
for (int i = 0; i < lanes_s; i++) {
|
|
|
|
// 16-th byte in memory is lanes-th element (size T) of memory.
|
|
|
|
r.builder().WriteMemory(&memory[lanes_s + i], x);
|
|
|
|
}
|
|
|
|
r.Call();
|
|
|
|
for (int i = 0; i < lanes_t; i++) {
|
|
|
|
CHECK_EQ(static_cast<T>(x), ReadLittleEndianValue<T>(&global[i]));
|
|
|
|
}
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-18 22:40:02 +00:00
|
|
|
|
|
|
|
// Test for offset.
|
|
|
|
{
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
|
|
|
|
T* global = r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
constexpr byte offset = sizeof(S);
|
2020-07-17 17:48:28 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, offset)),
|
2020-07-17 17:48:28 +00:00
|
|
|
WASM_ONE);
|
2020-05-18 22:40:02 +00:00
|
|
|
|
|
|
|
// Let max_s be the max_s value for type S, we set up the memory as such:
|
|
|
|
// memory = [max_s, max_s - 1, ... max_s - (lane_s - 1)].
|
|
|
|
constexpr S max_s = std::numeric_limits<S>::max();
|
|
|
|
for (int i = 0; i < lanes_s; i++) {
|
|
|
|
// Integer promotion due to -, static_cast to narrow.
|
|
|
|
r.builder().WriteMemory(&memory[i], static_cast<S>(max_s - i));
|
|
|
|
}
|
|
|
|
|
|
|
|
r.Call();
|
|
|
|
|
|
|
|
// Loads will be offset by sizeof(S), so will always start from (max_s - 1).
|
|
|
|
for (int i = 0; i < lanes_t; i++) {
|
|
|
|
// Integer promotion due to -, static_cast to narrow.
|
|
|
|
T expected = static_cast<T>(max_s - i - 1);
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<T>(&global[i]));
|
|
|
|
}
|
|
|
|
}
|
2020-08-18 22:06:22 +00:00
|
|
|
|
|
|
|
// Test for OOB.
|
|
|
|
{
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
|
|
|
|
r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
|
2020-08-18 22:06:22 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
// Load extends load 8 bytes, so should trap from -7.
|
|
|
|
for (uint32_t offset = kWasmPageSize - 7; offset < kWasmPageSize;
|
|
|
|
++offset) {
|
|
|
|
CHECK_TRAP(r.Call(offset));
|
|
|
|
}
|
|
|
|
}
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load8x8U) {
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
RunLoadExtendTest<uint8_t, uint16_t>(execution_tier, lower_simd,
|
2020-09-22 20:54:28 +00:00
|
|
|
kExprS128Load8x8U);
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load8x8S) {
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
RunLoadExtendTest<int8_t, int16_t>(execution_tier, lower_simd,
|
2020-09-22 20:54:28 +00:00
|
|
|
kExprS128Load8x8S);
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load16x4U) {
|
2019-11-15 18:41:44 +00:00
|
|
|
RunLoadExtendTest<uint16_t, uint32_t>(execution_tier, lower_simd,
|
2020-09-22 20:54:28 +00:00
|
|
|
kExprS128Load16x4U);
|
2019-11-15 18:41:44 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load16x4S) {
|
2019-11-15 18:41:44 +00:00
|
|
|
RunLoadExtendTest<int16_t, int32_t>(execution_tier, lower_simd,
|
2020-09-22 20:54:28 +00:00
|
|
|
kExprS128Load16x4S);
|
2019-11-15 18:41:44 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load32x2U) {
|
2019-11-15 18:41:44 +00:00
|
|
|
RunLoadExtendTest<uint32_t, uint64_t>(execution_tier, lower_simd,
|
2020-09-22 20:54:28 +00:00
|
|
|
kExprS128Load32x2U);
|
2019-11-15 18:41:44 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 20:54:28 +00:00
|
|
|
WASM_SIMD_TEST(S128Load32x2S) {
|
2019-11-15 18:41:44 +00:00
|
|
|
RunLoadExtendTest<int32_t, int64_t>(execution_tier, lower_simd,
|
2020-09-22 20:54:28 +00:00
|
|
|
kExprS128Load32x2S);
|
2019-11-15 18:41:44 +00:00
|
|
|
}
|
[wasm-simd] Implement load_splat and load_extend
Introduce new operator LoadTransform that holds a LoadTransformInfo param,
which describes the kind of load (normal, unaligned, protected), and a
transformation (splat or extend, signed or unsigned).
We have a new method that a full decoder needs to implement, LoadTransform,
which resuses the existing LoadType we have, but also takes a LoadTransform,
to distinguish between splats and extends at the decoder level.
This implements 4 out of the 10 suggested load splat/extend operations
(to keep the cl smaller), and is also missing interpreter support (will
be added in the future).
Change-Id: I1e65c693bfbe30e2a511c81b5a32e06aacbddc19
Bug: v8:9886
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863863
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64819}
2019-11-05 17:22:19 +00:00
|
|
|
|
2020-07-17 17:47:13 +00:00
|
|
|
template <typename S>
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunLoadZeroTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2020-07-17 17:47:13 +00:00
|
|
|
WasmOpcode op) {
|
|
|
|
constexpr int lanes_s = kSimd128Size / sizeof(S);
|
|
|
|
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
|
2020-10-26 00:22:39 +00:00
|
|
|
constexpr S sentinel = S{-1};
|
|
|
|
S* memory;
|
|
|
|
S* global;
|
|
|
|
|
|
|
|
auto initialize_builder = [=](WasmRunner<int32_t>* r) -> std::tuple<S*, S*> {
|
|
|
|
S* memory = r->builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
|
|
|
|
S* global = r->builder().AddGlobal<S>(kWasmS128);
|
|
|
|
r->builder().RandomizeMemory();
|
|
|
|
r->builder().WriteMemory(&memory[lanes_s], sentinel);
|
|
|
|
return std::make_tuple(memory, global);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Check all supported alignments.
|
|
|
|
constexpr int max_alignment = base::bits::CountTrailingZeros(sizeof(S));
|
|
|
|
for (byte alignment = 0; alignment <= max_alignment; alignment++) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
std::tie(memory, global) = initialize_builder(&r);
|
2020-10-21 17:28:04 +00:00
|
|
|
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
|
2020-10-26 00:22:39 +00:00
|
|
|
WASM_ONE);
|
|
|
|
r.Call();
|
|
|
|
|
|
|
|
// Only first lane is set to sentinel.
|
|
|
|
CHECK_EQ(sentinel, ReadLittleEndianValue<S>(&global[0]));
|
|
|
|
// The other lanes are zero.
|
|
|
|
for (int i = 1; i < lanes_s; i++) {
|
|
|
|
CHECK_EQ(S{0}, ReadLittleEndianValue<S>(&global[i]));
|
|
|
|
}
|
|
|
|
}
|
2020-10-22 01:08:17 +00:00
|
|
|
|
2020-10-26 00:22:39 +00:00
|
|
|
{
|
|
|
|
// Use memarg to specific offset.
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
std::tie(memory, global) = initialize_builder(&r);
|
|
|
|
|
|
|
|
BUILD(
|
|
|
|
r,
|
2020-12-17 16:56:08 +00:00
|
|
|
WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, mem_index)),
|
2020-10-26 00:22:39 +00:00
|
|
|
WASM_ONE);
|
|
|
|
r.Call();
|
|
|
|
|
|
|
|
// Only first lane is set to sentinel.
|
|
|
|
CHECK_EQ(sentinel, ReadLittleEndianValue<S>(&global[0]));
|
|
|
|
// The other lanes are zero.
|
|
|
|
for (int i = 1; i < lanes_s; i++) {
|
|
|
|
CHECK_EQ(S{0}, ReadLittleEndianValue<S>(&global[i]));
|
|
|
|
}
|
2020-07-17 17:47:13 +00:00
|
|
|
}
|
2020-10-20 17:31:38 +00:00
|
|
|
|
|
|
|
// Test for OOB.
|
|
|
|
{
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
|
|
|
|
r.builder().AddGlobal<S>(kWasmS128);
|
|
|
|
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
|
2020-10-20 17:31:38 +00:00
|
|
|
WASM_ONE);
|
|
|
|
|
|
|
|
// Load extends load sizeof(S) bytes.
|
|
|
|
for (uint32_t offset = kWasmPageSize - (sizeof(S) - 1);
|
|
|
|
offset < kWasmPageSize; ++offset) {
|
|
|
|
CHECK_TRAP(r.Call(offset));
|
|
|
|
}
|
|
|
|
}
|
2020-07-17 17:47:13 +00:00
|
|
|
}
|
|
|
|
|
2020-11-10 02:00:52 +00:00
|
|
|
WASM_SIMD_TEST(S128Load32Zero) {
|
2020-10-16 21:33:38 +00:00
|
|
|
RunLoadZeroTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Zero);
|
2020-07-17 17:47:13 +00:00
|
|
|
}
|
|
|
|
|
2020-11-10 02:00:52 +00:00
|
|
|
WASM_SIMD_TEST(S128Load64Zero) {
|
2020-10-16 21:33:38 +00:00
|
|
|
RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Zero);
|
2020-07-17 17:47:13 +00:00
|
|
|
}
|
|
|
|
|
2020-10-12 17:09:03 +00:00
|
|
|
template <typename T>
|
|
|
|
void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
|
|
|
WasmOpcode load_op, WasmOpcode splat_op) {
|
|
|
|
WasmOpcode const_op =
|
|
|
|
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
|
|
|
|
|
|
|
|
constexpr int lanes_s = kSimd128Size / sizeof(T);
|
|
|
|
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
|
|
|
|
constexpr int splat_value = 33;
|
2020-10-28 03:49:59 +00:00
|
|
|
T sentinel = T{-1};
|
2020-10-12 17:09:03 +00:00
|
|
|
|
2020-10-28 03:49:59 +00:00
|
|
|
T* memory;
|
|
|
|
T* global;
|
2020-10-12 17:09:03 +00:00
|
|
|
|
2020-10-28 03:49:59 +00:00
|
|
|
auto build_fn = [=, &memory, &global](WasmRunner<int32_t>& r, int mem_index,
|
|
|
|
int lane, int alignment, int offset) {
|
|
|
|
memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
|
|
|
|
global = r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
r.builder().WriteMemory(&memory[lanes_s], sentinel);
|
2020-10-12 17:09:03 +00:00
|
|
|
// Splat splat_value, then only load and replace a single lane with the
|
|
|
|
// sentinel value.
|
|
|
|
BUILD(r, WASM_I32V(mem_index), const_op, splat_value,
|
2020-10-28 03:49:59 +00:00
|
|
|
WASM_SIMD_OP(splat_op), WASM_SIMD_OP(load_op), alignment, offset,
|
|
|
|
lane, kExprGlobalSet, 0, WASM_ONE);
|
|
|
|
};
|
2020-10-12 17:09:03 +00:00
|
|
|
|
2020-10-28 03:49:59 +00:00
|
|
|
auto check_results = [=](T* global, int sentinel_lane = 0) {
|
2020-10-12 17:09:03 +00:00
|
|
|
// Only one lane is loaded, the rest of the lanes are unchanged.
|
|
|
|
for (int i = 0; i < lanes_s; i++) {
|
2020-10-28 03:49:59 +00:00
|
|
|
T expected = i == sentinel_lane ? sentinel : static_cast<T>(splat_value);
|
|
|
|
CHECK_EQ(expected, ReadLittleEndianValue<T>(&global[i]));
|
2020-10-12 17:09:03 +00:00
|
|
|
}
|
2020-10-28 03:49:59 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
for (int lane_index = 0; lane_index < lanes_s; ++lane_index) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
build_fn(r, mem_index, lane_index, /*alignment=*/0, /*offset=*/0);
|
|
|
|
r.Call();
|
|
|
|
check_results(global, lane_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check all possible alignments.
|
|
|
|
constexpr int max_alignment = base::bits::CountTrailingZeros(sizeof(T));
|
|
|
|
for (byte alignment = 0; alignment <= max_alignment; ++alignment) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
build_fn(r, mem_index, /*lane=*/0, alignment, /*offset=*/0);
|
|
|
|
r.Call();
|
|
|
|
check_results(global);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Use memarg to specify offset.
|
|
|
|
int lane_index = 0;
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
build_fn(r, /*mem_index=*/0, /*lane=*/0, /*alignment=*/0,
|
|
|
|
/*offset=*/mem_index);
|
|
|
|
r.Call();
|
|
|
|
check_results(global, lane_index);
|
2020-10-12 17:09:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test for OOB.
|
|
|
|
{
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
|
|
|
|
r.builder().AddGlobal<T>(kWasmS128);
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_GET(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
|
2020-10-12 17:09:03 +00:00
|
|
|
WASM_SIMD_OP(load_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, kExprGlobalSet,
|
|
|
|
0, WASM_ONE);
|
|
|
|
|
|
|
|
// Load lane load sizeof(T) bytes.
|
|
|
|
for (uint32_t index = kWasmPageSize - (sizeof(T) - 1);
|
|
|
|
index < kWasmPageSize; ++index) {
|
|
|
|
CHECK_TRAP(r.Call(index));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Load8Lane) {
|
|
|
|
RunLoadLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Load8Lane,
|
|
|
|
kExprI8x16Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Load16Lane) {
|
|
|
|
RunLoadLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Load16Lane,
|
|
|
|
kExprI16x8Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Load32Lane) {
|
|
|
|
RunLoadLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Lane,
|
|
|
|
kExprI32x4Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
|
|
|
|
RunLoadLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Lane,
|
|
|
|
kExprI64x2Splat);
|
|
|
|
}
|
2020-10-15 22:02:24 +00:00
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
|
|
|
WasmOpcode store_op, WasmOpcode splat_op) {
|
|
|
|
constexpr int lanes = kSimd128Size / sizeof(T);
|
2020-12-01 02:53:08 +00:00
|
|
|
constexpr int mem_index = 16; // Store to mem index 16 (bytes).
|
2020-10-15 22:02:24 +00:00
|
|
|
constexpr int splat_value = 33;
|
|
|
|
WasmOpcode const_op =
|
|
|
|
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
|
|
|
|
|
2020-12-01 02:53:08 +00:00
|
|
|
T* memory; // Will be set by build_fn.
|
2020-10-15 22:02:24 +00:00
|
|
|
|
2020-12-01 02:53:08 +00:00
|
|
|
auto build_fn = [=, &memory](WasmRunner<int32_t>& r, int mem_index,
|
|
|
|
int lane_index, int alignment, int offset) {
|
|
|
|
memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
|
|
|
|
// Splat splat_value, then only Store and replace a single lane.
|
2020-10-15 22:02:24 +00:00
|
|
|
BUILD(r, WASM_I32V(mem_index), const_op, splat_value,
|
2020-12-01 02:53:08 +00:00
|
|
|
WASM_SIMD_OP(splat_op), WASM_SIMD_OP(store_op), alignment, offset,
|
|
|
|
lane_index, WASM_ONE);
|
2020-10-15 22:02:24 +00:00
|
|
|
r.builder().BlankMemory();
|
2020-12-01 02:53:08 +00:00
|
|
|
};
|
2020-10-15 22:02:24 +00:00
|
|
|
|
2020-12-01 02:53:08 +00:00
|
|
|
auto check_results = [=](WasmRunner<int32_t>& r, T* memory) {
|
2020-10-15 22:02:24 +00:00
|
|
|
for (int i = 0; i < lanes; i++) {
|
|
|
|
CHECK_EQ(0, r.builder().ReadMemory(&memory[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(splat_value, r.builder().ReadMemory(&memory[lanes]));
|
|
|
|
|
|
|
|
for (int i = lanes + 1; i < lanes * 2; i++) {
|
|
|
|
CHECK_EQ(0, r.builder().ReadMemory(&memory[i]));
|
|
|
|
}
|
2020-12-01 02:53:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
for (int lane_index = 0; lane_index < lanes; lane_index++) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
build_fn(r, mem_index, lane_index, ZERO_ALIGNMENT, ZERO_OFFSET);
|
|
|
|
r.Call();
|
|
|
|
check_results(r, memory);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check all possible alignments.
|
|
|
|
constexpr int max_alignment = base::bits::CountTrailingZeros(sizeof(T));
|
|
|
|
for (byte alignment = 0; alignment <= max_alignment; ++alignment) {
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
build_fn(r, mem_index, /*lane_index=*/0, alignment, ZERO_OFFSET);
|
|
|
|
r.Call();
|
|
|
|
check_results(r, memory);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Use memarg for offset.
|
|
|
|
WasmRunner<int32_t> r(execution_tier, lower_simd);
|
|
|
|
build_fn(r, /*mem_index=*/0, /*lane_index=*/0, ZERO_ALIGNMENT, mem_index);
|
|
|
|
r.Call();
|
|
|
|
check_results(r, memory);
|
2020-10-15 22:02:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// OOB stores
|
|
|
|
{
|
|
|
|
WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
|
|
|
|
r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
|
|
|
|
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_GET(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
|
2020-10-15 22:02:24 +00:00
|
|
|
WASM_SIMD_OP(store_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, WASM_ONE);
|
|
|
|
|
|
|
|
// StoreLane stores sizeof(T) bytes.
|
|
|
|
for (uint32_t index = kWasmPageSize - (sizeof(T) - 1);
|
|
|
|
index < kWasmPageSize; ++index) {
|
|
|
|
CHECK_TRAP(r.Call(index));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Store8Lane) {
|
|
|
|
RunStoreLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Store8Lane,
|
|
|
|
kExprI8x16Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Store16Lane) {
|
|
|
|
RunStoreLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Store16Lane,
|
|
|
|
kExprI16x8Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Store32Lane) {
|
|
|
|
RunStoreLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Store32Lane,
|
|
|
|
kExprI32x4Splat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
|
|
|
|
RunStoreLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Store64Lane,
|
|
|
|
kExprI64x2Splat);
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:58:31 +00:00
|
|
|
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
|
2019-08-24 00:12:48 +00:00
|
|
|
WASM_SIMD_TEST(S##format##AnyTrue) { \
|
2019-07-16 02:58:31 +00:00
|
|
|
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
|
2019-08-24 00:12:48 +00:00
|
|
|
if (lanes == 2 && lower_simd == kLowerSimd) return; \
|
2019-01-10 23:22:07 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128); \
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD( \
|
2019-01-10 23:22:07 +00:00
|
|
|
r, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
|
2021-01-29 22:48:10 +00:00
|
|
|
WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd))); \
|
2020-10-15 01:32:12 +00:00
|
|
|
CHECK_EQ(1, r.Call(max)); \
|
|
|
|
CHECK_EQ(1, r.Call(5)); \
|
|
|
|
CHECK_EQ(0, r.Call(0)); \
|
2019-01-10 23:22:07 +00:00
|
|
|
}
|
2019-07-16 02:58:31 +00:00
|
|
|
WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
|
|
|
|
WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff, int32_t)
|
|
|
|
WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
|
2019-01-10 23:22:07 +00:00
|
|
|
|
2020-05-15 23:30:03 +00:00
|
|
|
// Special any true test cases that splats a -0.0 double into a i64x2.
|
|
|
|
// This is specifically to ensure that our implementation correct handles that
|
|
|
|
// 0.0 and -0.0 will be different in an anytrue (IEEE753 says they are equals).
|
2021-01-29 22:48:10 +00:00
|
|
|
WASM_SIMD_TEST(V128AnytrueWithNegativeZero) {
|
2020-05-15 23:30:03 +00:00
|
|
|
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
|
2021-01-29 22:48:10 +00:00
|
|
|
WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd)));
|
2020-10-15 01:32:12 +00:00
|
|
|
CHECK_EQ(1, r.Call(0x8000000000000000));
|
|
|
|
CHECK_EQ(0, r.Call(0x0000000000000000));
|
2020-05-15 23:30:03 +00:00
|
|
|
}
|
|
|
|
|
2019-07-16 02:58:31 +00:00
|
|
|
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
|
2021-03-05 23:56:29 +00:00
|
|
|
WASM_SIMD_TEST(I##format##AllTrue) { \
|
2019-07-16 02:58:31 +00:00
|
|
|
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
|
2019-08-24 00:12:48 +00:00
|
|
|
if (lanes == 2 && lower_simd == kLowerSimd) return; \
|
2019-01-10 23:22:07 +00:00
|
|
|
byte simd = r.AllocateLocal(kWasmS128); \
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD( \
|
2019-01-10 23:22:07 +00:00
|
|
|
r, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
|
2021-03-05 23:56:29 +00:00
|
|
|
WASM_SIMD_UNOP(kExprI##format##AllTrue, WASM_LOCAL_GET(simd))); \
|
2020-10-15 01:32:12 +00:00
|
|
|
CHECK_EQ(1, r.Call(max)); \
|
|
|
|
CHECK_EQ(1, r.Call(0x1)); \
|
|
|
|
CHECK_EQ(0, r.Call(0)); \
|
2019-01-10 23:22:07 +00:00
|
|
|
}
|
2021-02-01 21:40:17 +00:00
|
|
|
WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
|
2019-07-16 02:58:31 +00:00
|
|
|
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
|
|
|
|
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
|
|
|
|
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
|
2019-01-10 23:22:07 +00:00
|
|
|
|
2019-03-25 20:44:32 +00:00
|
|
|
WASM_SIMD_TEST(BitSelect) {
|
2019-01-12 01:02:41 +00:00
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte simd = r.AllocateLocal(kWasmS128);
|
2020-05-05 18:04:33 +00:00
|
|
|
BUILD(r,
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(
|
2020-05-05 18:04:33 +00:00
|
|
|
simd,
|
|
|
|
WASM_SIMD_SELECT(32x4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(0x01020304)),
|
2019-01-12 01:02:41 +00:00
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
|
|
|
|
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(simd)));
|
2020-10-15 01:32:12 +00:00
|
|
|
CHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
|
2019-01-12 01:02:41 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunSimdConstTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
2020-07-02 00:19:09 +00:00
|
|
|
const std::array<uint8_t, kSimd128Size>& expected) {
|
|
|
|
WasmRunner<uint32_t> r(execution_tier, lower_simd);
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
|
2020-12-17 16:56:08 +00:00
|
|
|
BUILD(r, WASM_GLOBAL_SET(temp1, WASM_SIMD_CONSTANT(expected)), WASM_ONE);
|
2020-07-02 00:19:09 +00:00
|
|
|
CHECK_EQ(1, r.Call());
|
|
|
|
for (size_t i = 0; i < expected.size(); i++) {
|
|
|
|
CHECK_EQ(ReadLittleEndianValue<uint8_t>(&src0[i]), expected[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(S128Const) {
|
2020-07-02 00:19:09 +00:00
|
|
|
std::array<uint8_t, kSimd128Size> expected;
|
|
|
|
// Test for generic constant
|
|
|
|
for (int i = 0; i < kSimd128Size; i++) {
|
|
|
|
expected[i] = i;
|
|
|
|
}
|
|
|
|
RunSimdConstTest(execution_tier, lower_simd, expected);
|
2020-07-10 16:22:54 +00:00
|
|
|
|
|
|
|
// Keep the first 4 lanes as 0, set the remaining ones.
|
2020-07-10 16:30:16 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
2020-07-10 16:22:54 +00:00
|
|
|
expected[i] = 0;
|
|
|
|
}
|
|
|
|
for (int i = 4; i < kSimd128Size; i++) {
|
|
|
|
expected[i] = i;
|
|
|
|
}
|
|
|
|
RunSimdConstTest(execution_tier, lower_simd, expected);
|
2020-07-14 17:11:49 +00:00
|
|
|
|
|
|
|
// Check sign extension logic used to pack int32s into int64.
|
|
|
|
expected = {0};
|
|
|
|
// Set the top bit of lane 3 (top bit of first int32), the rest can be 0.
|
|
|
|
expected[3] = 0x80;
|
|
|
|
RunSimdConstTest(execution_tier, lower_simd, expected);
|
2020-07-02 00:19:09 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(S128ConstAllZero) {
|
2020-07-14 17:11:49 +00:00
|
|
|
std::array<uint8_t, kSimd128Size> expected = {0};
|
2020-07-02 00:19:09 +00:00
|
|
|
RunSimdConstTest(execution_tier, lower_simd, expected);
|
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(S128ConstAllOnes) {
|
2020-07-02 00:19:09 +00:00
|
|
|
std::array<uint8_t, kSimd128Size> expected;
|
|
|
|
// Test for generic constant
|
|
|
|
for (int i = 0; i < kSimd128Size; i++) {
|
|
|
|
expected[i] = 0xff;
|
|
|
|
}
|
|
|
|
RunSimdConstTest(execution_tier, lower_simd, expected);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
|
2019-03-29 00:20:24 +00:00
|
|
|
LowerSimd lower_simd, WasmOpcode opcode,
|
|
|
|
Int8BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp3 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
|
|
|
|
WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp2))),
|
|
|
|
WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
|
2019-03-29 00:20:24 +00:00
|
|
|
|
2020-10-15 01:32:12 +00:00
|
|
|
CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
|
|
|
|
r.Call(0xff, 0x7fff));
|
|
|
|
CHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
|
|
|
|
r.Call(0xfe, 0x7fff));
|
|
|
|
CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
|
|
|
|
r.Call(0xff, 0x7ffe));
|
2019-03-29 00:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LeUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LeU,
|
|
|
|
UnsignedLessEqual);
|
|
|
|
}
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I8x16LtUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LtU,
|
|
|
|
UnsignedLess);
|
|
|
|
}
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GeUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GeU,
|
|
|
|
UnsignedGreaterEqual);
|
|
|
|
}
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I8x16GtUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GtU,
|
|
|
|
UnsignedGreater);
|
|
|
|
}
|
|
|
|
|
2020-08-04 11:09:23 +00:00
|
|
|
void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
|
2019-03-29 00:20:24 +00:00
|
|
|
LowerSimd lower_simd, WasmOpcode opcode,
|
|
|
|
Int16BinOp expected_op) {
|
|
|
|
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte value1 = 0, value2 = 1;
|
|
|
|
byte temp1 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp2 = r.AllocateLocal(kWasmS128);
|
|
|
|
byte temp3 = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
|
|
|
|
WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
|
|
|
|
WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
|
|
|
|
WASM_LOCAL_GET(temp2))),
|
|
|
|
WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
|
2019-03-29 00:20:24 +00:00
|
|
|
|
2020-10-15 01:32:12 +00:00
|
|
|
CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
|
|
|
|
r.Call(0xffff, 0x7fffffff));
|
|
|
|
CHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
|
|
|
|
r.Call(0xfeff, 0x7fffffff));
|
|
|
|
CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
|
|
|
|
r.Call(0xffff, 0x7ffffeff));
|
2019-03-29 00:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LeUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LeU,
|
|
|
|
UnsignedLessEqual);
|
|
|
|
}
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I16x8LtUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LtU,
|
|
|
|
UnsignedLess);
|
|
|
|
}
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GeUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GeU,
|
|
|
|
UnsignedGreaterEqual);
|
|
|
|
}
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I16x8GtUMixed) {
|
2019-03-29 00:20:24 +00:00
|
|
|
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GtU,
|
|
|
|
UnsignedGreater);
|
|
|
|
}
|
|
|
|
|
2020-10-09 20:29:17 +00:00
|
|
|
WASM_SIMD_TEST(I16x8ExtractLaneU_I8x16Splat) {
|
2020-05-28 22:48:28 +00:00
|
|
|
// Test that we are correctly signed/unsigned extending when extracting.
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
|
|
|
|
byte simd_val = r.AllocateLocal(kWasmS128);
|
2020-12-17 16:55:33 +00:00
|
|
|
BUILD(r, WASM_LOCAL_SET(simd_val, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0))),
|
|
|
|
WASM_SIMD_I16x8_EXTRACT_LANE_U(0, WASM_LOCAL_GET(simd_val)));
|
2020-05-28 22:48:28 +00:00
|
|
|
CHECK_EQ(0xfafa, r.Call(0xfa));
|
|
|
|
}
|
|
|
|
|
2020-05-05 18:04:33 +00:00
|
|
|
#define WASM_EXTRACT_I16x8_TEST(Sign, Type) \
|
|
|
|
WASM_SIMD_TEST(I16X8ExtractLane##Sign) { \
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
|
|
|
|
byte int_val = r.AllocateLocal(kWasmI32); \
|
|
|
|
byte simd_val = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(simd_val, \
|
|
|
|
WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(int_val))), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 0), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 2), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 4), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 6), WASM_ONE); \
|
|
|
|
FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
|
2019-10-08 23:43:15 +00:00
|
|
|
}
|
|
|
|
WASM_EXTRACT_I16x8_TEST(S, UINT16) WASM_EXTRACT_I16x8_TEST(I, INT16)
|
|
|
|
#undef WASM_EXTRACT_I16x8_TEST
|
|
|
|
|
2020-05-05 18:04:33 +00:00
|
|
|
#define WASM_EXTRACT_I8x16_TEST(Sign, Type) \
|
|
|
|
WASM_SIMD_TEST(I8x16ExtractLane##Sign) { \
|
|
|
|
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
|
|
|
|
byte int_val = r.AllocateLocal(kWasmI32); \
|
|
|
|
byte simd_val = r.AllocateLocal(kWasmS128); \
|
|
|
|
BUILD(r, \
|
2020-12-17 16:55:33 +00:00
|
|
|
WASM_LOCAL_SET(simd_val, \
|
|
|
|
WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(int_val))), \
|
2020-05-05 18:04:33 +00:00
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 1), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 3), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 5), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 7), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 9), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 10), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 11), \
|
|
|
|
WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 13), \
|
|
|
|
WASM_ONE); \
|
|
|
|
FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
|
2019-10-08 23:43:15 +00:00
|
|
|
}
|
|
|
|
WASM_EXTRACT_I8x16_TEST(S, UINT8) WASM_EXTRACT_I8x16_TEST(I, INT8)
|
|
|
|
#undef WASM_EXTRACT_I8x16_TEST
|
|
|
|
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_TEST
|
2019-10-08 23:43:15 +00:00
|
|
|
#undef WASM_SIMD_CHECK_LANE_S
|
|
|
|
#undef WASM_SIMD_CHECK_LANE_U
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef TO_BYTE
|
|
|
|
#undef WASM_SIMD_OP
|
|
|
|
#undef WASM_SIMD_SPLAT
|
|
|
|
#undef WASM_SIMD_UNOP
|
|
|
|
#undef WASM_SIMD_BINOP
|
|
|
|
#undef WASM_SIMD_SHIFT_OP
|
|
|
|
#undef WASM_SIMD_CONCAT_OP
|
|
|
|
#undef WASM_SIMD_SELECT
|
2019-07-01 23:58:02 +00:00
|
|
|
#undef WASM_SIMD_F64x2_SPLAT
|
2019-07-10 04:22:46 +00:00
|
|
|
#undef WASM_SIMD_F64x2_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_F64x2_REPLACE_LANE
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_F32x4_SPLAT
|
|
|
|
#undef WASM_SIMD_F32x4_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_F32x4_REPLACE_LANE
|
2019-07-01 23:58:02 +00:00
|
|
|
#undef WASM_SIMD_I64x2_SPLAT
|
|
|
|
#undef WASM_SIMD_I64x2_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I64x2_REPLACE_LANE
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_I32x4_SPLAT
|
|
|
|
#undef WASM_SIMD_I32x4_EXTRACT_LANE
|
|
|
|
#undef WASM_SIMD_I32x4_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I16x8_SPLAT
|
|
|
|
#undef WASM_SIMD_I16x8_EXTRACT_LANE
|
2019-10-08 23:43:15 +00:00
|
|
|
#undef WASM_SIMD_I16x8_EXTRACT_LANE_U
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_I16x8_REPLACE_LANE
|
|
|
|
#undef WASM_SIMD_I8x16_SPLAT
|
|
|
|
#undef WASM_SIMD_I8x16_EXTRACT_LANE
|
2019-10-08 23:43:15 +00:00
|
|
|
#undef WASM_SIMD_I8x16_EXTRACT_LANE_U
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_I8x16_REPLACE_LANE
|
2020-09-25 18:08:04 +00:00
|
|
|
#undef WASM_SIMD_I8x16_SHUFFLE_OP
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_LOAD_MEM
|
2019-09-19 22:01:07 +00:00
|
|
|
#undef WASM_SIMD_LOAD_MEM_OFFSET
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_STORE_MEM
|
2019-09-19 22:01:07 +00:00
|
|
|
#undef WASM_SIMD_STORE_MEM_OFFSET
|
2017-09-08 13:59:05 +00:00
|
|
|
#undef WASM_SIMD_SELECT_TEST
|
|
|
|
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
|
|
|
|
#undef WASM_SIMD_BOOL_REDUCTION_TEST
|
2019-03-25 20:44:32 +00:00
|
|
|
#undef WASM_SIMD_TEST_NO_LOWERING
|
2019-01-10 23:22:07 +00:00
|
|
|
#undef WASM_SIMD_ANYTRUE_TEST
|
|
|
|
#undef WASM_SIMD_ALLTRUE_TEST
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
#undef WASM_SIMD_F64x2_QFMA
|
|
|
|
#undef WASM_SIMD_F64x2_QFMS
|
|
|
|
#undef WASM_SIMD_F32x4_QFMA
|
|
|
|
#undef WASM_SIMD_F32x4_QFMS
|
2020-07-17 17:48:28 +00:00
|
|
|
#undef WASM_SIMD_LOAD_OP
|
|
|
|
#undef WASM_SIMD_LOAD_OP_OFFSET
|
|
|
|
#undef WASM_SIMD_LOAD_OP_ALIGNMENT
|
2017-09-08 13:59:05 +00:00
|
|
|
|
2017-09-21 03:29:52 +00:00
|
|
|
} // namespace test_run_wasm_simd
|
2017-09-01 12:57:34 +00:00
|
|
|
} // namespace wasm
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|