[wasm-simd] Remove the scalar lowering pass

Bug: v8:11613
Change-Id: Ica7fe5ca63fa3729614eb09ace26e679a88577ac
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2826728
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74051}
This commit is contained in:
Deepti Gandluri 2021-04-19 12:50:10 -07:00 committed by Commit Bot
parent 3356078ae1
commit 39e32ac94a
8 changed files with 0 additions and 3362 deletions

View File

@ -2482,7 +2482,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/select-lowering.h",
"src/compiler/serializer-for-background-compilation.h",
"src/compiler/serializer-hints.h",
"src/compiler/simd-scalar-lowering.h",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.h",
@ -3475,7 +3474,6 @@ v8_compiler_sources = [
if (v8_enable_webassembly) {
v8_compiler_sources += [
"src/compiler/int64-lowering.cc",
"src/compiler/simd-scalar-lowering.cc",
"src/compiler/wasm-compiler.cc",
]
}

View File

@ -23,7 +23,3 @@ per-file opcodes.*=ahaas@chromium.org
per-file opcodes.*=bbudge@chromium.org
per-file opcodes.*=gdeepti@chromium.org
per-file opcodes.*=zhin@chromium.org
per-file simd-scalar-lowering.*=bbudge@chromium.org
per-file simd-scalar-lowering.*=gdeepti@chromium.org
per-file simd-scalar-lowering.*=zhin@chromium.org

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_SIMD_SCALAR_LOWERING_H_
#define V8_COMPILER_SIMD_SCALAR_LOWERING_H_
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
template <typename T>
class Signature;
namespace compiler {
class SimdScalarLowering {
public:
SimdScalarLowering(MachineGraph* mcgraph,
SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature);
void LowerGraph();
int GetParameterCountAfterLowering();
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
enum class SimdType : uint8_t {
kFloat64x2,
kFloat32x4,
kInt64x2,
kInt32x4,
kInt16x8,
kInt8x16
};
#if defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0};
#else
static constexpr int kLaneOffsets[16] = {0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15};
#endif
struct Replacement {
Node** node = nullptr;
SimdType type; // represents output type
int num_replacements = 0;
};
struct NodeState {
Node* node;
int input_index;
};
Zone* zone() const { return mcgraph_->zone(); }
Graph* graph() const { return mcgraph_->graph(); }
MachineOperatorBuilder* machine() const { return mcgraph_->machine(); }
CommonOperatorBuilder* common() const { return mcgraph_->common(); }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; }
void LowerNode(Node* node);
bool DefaultLowering(Node* node);
int NumLanes(SimdType type);
void ReplaceNode(Node* old, Node** new_nodes, int count);
bool HasReplacement(size_t index, Node* node);
Node** GetReplacements(Node* node);
int ReplacementCount(Node* node);
void Float64ToInt64(Node** replacements, Node** result);
void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result);
void Int64ToFloat64(Node** replacements, Node** result);
void Int64ToInt32(Node** replacements, Node** result);
template <typename T>
void Int32ToSmallerInt(Node** replacements, Node** result);
template <typename T>
void SmallerIntToInt32(Node** replacements, Node** result);
void Int32ToInt64(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(Node* node, SimdType type);
void LowerLoadTransformOp(Node* node, SimdType type);
void LowerStoreOp(Node* node);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool not_horizontal = true);
Node* ConstructPhiForComparison(Diamond d, SimdType rep_type, int true_value,
int false_value);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
const Operator* op, bool not_horizontal = true);
Node* Mask(Node* input, int32_t mask);
void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
const Operator* op, bool is_signed);
void LowerUnaryOp(Node* node, SimdType input_rep_type, const Operator* op);
void LowerIntMinMax(Node* node, const Operator* op, bool is_max,
SimdType type);
void LowerConvertFromFloat(Node* node, bool is_signed);
void LowerConvertFromInt(Node* node, SimdType input_rep_type,
SimdType output_rep_type, bool is_signed,
int start_index);
void LowerPack(Node* node, SimdType input_rep_type, SimdType output_rep_type,
bool is_signed);
void LowerShiftOp(Node* node, SimdType type);
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
MachineType MachineTypeFrom(SimdType simdType);
void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
void LowerAllTrueOp(Node* node, SimdType rep_type);
void LowerFloatPseudoMinMax(Node* node, const Operator* op, bool is_max,
SimdType type);
void LowerExtMul(Node* node, const Operator* op, SimdType output_type,
SimdType input_type, bool low, bool is_signed);
// Extends node, which is a lowered node of type rep_type, e.g. int8, int16,
// int32 to a 32-bit or 64-bit node. node should be a lowered node (i.e. not a
// SIMD node). The assumption here is that small ints are stored sign
// extended.
Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed);
MachineGraph* const mcgraph_;
SimplifiedOperatorBuilder* const simplified_;
NodeMarker<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
Signature<MachineRepresentation>* signature_;
Node* placeholder_;
int parameter_count_after_lowering_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_SIMD_SCALAR_LOWERING_H_

View File

@ -34,7 +34,6 @@
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/simd-scalar-lowering.h"
#include "src/compiler/zone-stats.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
@ -7795,49 +7794,8 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
return false;
}
// Lower SIMD first, i64x2 nodes will be lowered to int64 nodes, then int64
// lowering will take care of them.
auto sig = CreateMachineSignature(mcgraph->zone(), func_body.sig,
WasmGraphBuilder::kCalledFromWasm);
if (builder.has_simd() &&
(!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
SimplifiedOperatorBuilder simplified(mcgraph->zone());
SimdScalarLowering(mcgraph, &simplified, sig).LowerGraph();
// SimdScalarLowering changes all v128 to 4 i32, so update the machine
// signature for the call to LowerInt64.
size_t return_count = 0;
size_t param_count = 0;
for (auto ret : sig->returns()) {
return_count += ret == MachineRepresentation::kSimd128 ? 4 : 1;
}
for (auto param : sig->parameters()) {
param_count += param == MachineRepresentation::kSimd128 ? 4 : 1;
}
Signature<MachineRepresentation>::Builder sig_builder(
mcgraph->zone(), return_count, param_count);
for (auto ret : sig->returns()) {
if (ret == MachineRepresentation::kSimd128) {
for (int i = 0; i < 4; ++i) {
sig_builder.AddReturn(MachineRepresentation::kWord32);
}
} else {
sig_builder.AddReturn(ret);
}
}
for (auto param : sig->parameters()) {
if (param == MachineRepresentation::kSimd128) {
for (int i = 0; i < 4; ++i) {
sig_builder.AddParam(MachineRepresentation::kWord32);
}
} else {
sig_builder.AddParam(param);
}
}
sig = sig_builder.Build();
}
builder.LowerInt64(sig);
if (func_index >= FLAG_trace_wasm_ast_start &&

View File

@ -439,7 +439,6 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-relaxed-simd.cc",
"wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd-liftoff.cc",
"wasm/test-run-wasm-simd-scalar-lowering.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm-wrappers.cc",
"wasm/test-run-wasm.cc",

View File

@ -384,7 +384,6 @@
# SIMD not fully implemented yet
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd-scalar-lowering/*':[SKIP],
'test-run-wasm-simd/*':[SKIP],
# Some wasm functionality is not implemented yet
@ -700,7 +699,6 @@
['no_simd_sse == True', {
'test-run-wasm-simd/*': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd-scalar-lowering/*': [SKIP],
}], # no_simd_sse == True
################################################################################

View File

@ -1,332 +0,0 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-tier.h"
#include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace test_run_wasm_simd {
#define WASM_SIMD_TEST(name) \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier); \
TEST(RunWasm_##name##_simd_lowered) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kLowerSimd, TestExecutionTier::kTurbofan); \
} \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier)
WASM_SIMD_TEST(I8x16ToF32x4) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
float* g = r.builder().AddGlobal<float>(kWasmS128);
byte param1 = 0;
BUILD(r,
WASM_GLOBAL_SET(
0, WASM_SIMD_UNOP(kExprF32x4Sqrt,
WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(param1)))),
WASM_ONE);
// Arbitrary pattern that doesn't end up creating a NaN.
r.Call(0x5b);
float f = bit_cast<float>(0x5b5b5b5b);
float actual = ReadLittleEndianValue<float>(&g[0]);
float expected = std::sqrt(f);
CHECK_EQ(expected, actual);
}
WASM_SIMD_TEST(F64x2_Call_Return) {
// Check that calling a function with i16x8 arguments, and returns i16x8, is
// correctly lowered. The signature of the functions are always lowered to 4
// Word32, so each i16x8 needs to be correctly converted.
TestSignatures sigs;
WasmRunner<double, double, double> r(execution_tier, lower_simd);
WasmFunctionCompiler& fn = r.NewFunction(sigs.s_ss());
BUILD(fn,
WASM_SIMD_BINOP(kExprF64x2Min, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
byte c1[16] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
byte c2[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f};
BUILD(r,
WASM_SIMD_F64x2_EXTRACT_LANE(
0, WASM_CALL_FUNCTION(fn.function_index(), WASM_SIMD_CONSTANT(c1),
WASM_SIMD_CONSTANT(c2))));
CHECK_EQ(0, r.Call(double{0}, bit_cast<double>(0x7fefffffffffffff)));
}
WASM_SIMD_TEST(F32x4_Call_Return) {
// Check that functions that return F32x4 are correctly lowered into 4 int32
// nodes. The signature of such functions are always lowered to 4 Word32, and
// if the last operation before the return was a f32x4, it will need to be
// bitcasted from float to int.
TestSignatures sigs;
WasmRunner<float, float> r(execution_tier, lower_simd);
// A simple function that just calls f32x4.neg on the param.
WasmFunctionCompiler& fn = r.NewFunction(sigs.s_s());
BUILD(fn, WASM_SIMD_UNOP(kExprF32x4Neg, WASM_LOCAL_GET(0)));
// TODO(v8:10507)
// Use i32x4 splat since scalar lowering has a problem with f32x4 as a param
// to a function call, the lowering is not correct yet.
BUILD(r,
WASM_SIMD_F32x4_EXTRACT_LANE(
0, WASM_CALL_FUNCTION(fn.function_index(),
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)))));
CHECK_EQ(-1.0, r.Call(1));
}
WASM_SIMD_TEST(I8x16_Call_Return) {
// Check that calling a function with i8x16 arguments, and returns i8x16, is
// correctly lowered. The signature of the functions are always lowered to 4
// Word32, so each i8x16 needs to be correctly converted.
TestSignatures sigs;
WasmRunner<uint32_t, uint32_t> r(execution_tier, lower_simd);
WasmFunctionCompiler& fn = r.NewFunction(sigs.s_ss());
BUILD(fn,
WASM_SIMD_BINOP(kExprI8x16Add, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
BUILD(r,
WASM_SIMD_I8x16_EXTRACT_LANE(
0, WASM_CALL_FUNCTION(fn.function_index(),
WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0)))));
CHECK_EQ(2, r.Call(1));
}
WASM_SIMD_TEST(I16x8_Call_Return) {
// Check that calling a function with i16x8 arguments, and returns i16x8, is
// correctly lowered. The signature of the functions are always lowered to 4
// Word32, so each i16x8 needs to be correctly converted.
TestSignatures sigs;
WasmRunner<uint32_t, uint32_t> r(execution_tier, lower_simd);
WasmFunctionCompiler& fn = r.NewFunction(sigs.s_ss());
BUILD(fn,
WASM_SIMD_BINOP(kExprI16x8Add, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
BUILD(r,
WASM_SIMD_I16x8_EXTRACT_LANE(
0, WASM_CALL_FUNCTION(fn.function_index(),
WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0)))));
CHECK_EQ(2, r.Call(1));
}
WASM_SIMD_TEST(I64x2_Call_Return) {
// Check that calling a function with i64x2 arguments, and returns i64x2, is
// correctly lowered. The signature of the functions are always lowered to 4
// Word32, so each i64x2 needs to be correctly converted.
TestSignatures sigs;
WasmRunner<uint64_t, uint64_t> r(execution_tier, lower_simd);
WasmFunctionCompiler& fn = r.NewFunction(sigs.s_ss());
BUILD(fn,
WASM_SIMD_BINOP(kExprI64x2Add, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
BUILD(r,
WASM_SIMD_I64x2_EXTRACT_LANE(
0, WASM_CALL_FUNCTION(fn.function_index(),
WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0)))));
CHECK_EQ(2, r.Call(1));
}
WASM_SIMD_TEST(I8x16Eq_ToTest_S128Const) {
// Test implementation of S128Const in scalar lowering, this test case was
// causing a crash.
TestSignatures sigs;
WasmRunner<uint32_t> r(execution_tier, lower_simd);
byte c1[16] = {0x00, 0x00, 0x80, 0xbf, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x40};
byte c2[16] = {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02};
byte c3[16] = {0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
BUILD(r,
WASM_SIMD_BINOP(kExprI8x16Eq, WASM_SIMD_CONSTANT(c1),
WASM_SIMD_CONSTANT(c2)),
WASM_SIMD_CONSTANT(c3), WASM_SIMD_OP(kExprI8x16Eq),
WASM_SIMD_OP(kExprI8x16ExtractLaneS), TO_BYTE(4));
CHECK_EQ(0xffffffff, r.Call());
}
WASM_SIMD_TEST(F32x4_S128Const) {
// Test that S128Const lowering is done correctly when it is used as an input
// into a f32x4 operation. This was triggering a CHECK failure in the
// register-allocator-verifier.
TestSignatures sigs;
WasmRunner<float> r(execution_tier, lower_simd);
// f32x4(1.0, 2.0, 3.0, 4.0)
byte c1[16] = {0x00, 0x00, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x40,
0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x40};
// f32x4(5.0, 6.0, 7.0, 8.0)
byte c2[16] = {0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0xc0, 0x40,
0x00, 0x00, 0xe0, 0x40, 0x00, 0x00, 0x00, 0x41};
BUILD(r,
WASM_SIMD_BINOP(kExprF32x4Min, WASM_SIMD_CONSTANT(c1),
WASM_SIMD_CONSTANT(c2)),
WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(0));
CHECK_EQ(1.0, r.Call());
}
WASM_SIMD_TEST(AllTrue_DifferentShapes) {
// Test all_true lowring with splats of different shapes.
{
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprI8x16AllTrue));
CHECK_EQ(0, r.Call(0x00FF00FF));
}
{
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprI16x8AllTrue));
CHECK_EQ(0, r.Call(0x000000FF));
}
// Check float input to all_true.
{
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprI16x8AllTrue));
CHECK_EQ(1, r.Call(0x000F000F));
}
}
WASM_SIMD_TEST(AnyTrue_DifferentShapes) {
// Test any_true lowring with splats of different shapes.
{
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprV128AnyTrue));
CHECK_EQ(0, r.Call(0x00000000));
}
{
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprV128AnyTrue));
CHECK_EQ(1, r.Call(0x000000FF));
}
// Check float input to any_true.
{
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprV128AnyTrue));
CHECK_EQ(0, r.Call(0x00000000));
}
}
WASM_SIMD_TEST(V128_I64_PARAMS) {
// This test exercises interaction between simd and int64 lowering. The
// parameter indices were not correctly lowered because simd lowered a v128 in
// the function signature into 4 word32, and int64 was still treating it as 1
// parameter.
WasmRunner<uint64_t, uint64_t> r(execution_tier, lower_simd);
FunctionSig::Builder builder(r.zone(), 1, 2);
builder.AddParam(kWasmS128);
builder.AddParam(kWasmI64);
builder.AddReturn(kWasmS128);
FunctionSig* sig = builder.Build();
WasmFunctionCompiler& fn = r.NewFunction(sig);
// Build a function that has both V128 and I64 arguments.
BUILD(fn,
WASM_SIMD_I64x2_REPLACE_LANE(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
BUILD(r, WASM_SIMD_I64x2_EXTRACT_LANE(
0, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))));
CHECK_EQ(0, r.Call(0));
}
WASM_SIMD_TEST(I8x16WidenS_I16x8NarrowU) {
// Test any_true lowring with splats of different shapes.
{
WasmRunner<int32_t, int16_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0)),
WASM_SIMD_OP(kExprI8x16UConvertI16x8),
WASM_SIMD_OP(kExprI16x8SConvertI8x16Low),
WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(0));
CHECK_EQ(bit_cast<int32_t>(0xffffffff), r.Call(0x7fff));
}
}
WASM_SIMD_TEST(S128SelectWithF32x4) {
WasmRunner<float, int32_t, float, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(kExprI32x4Splat), WASM_LOCAL_GET(1),
WASM_SIMD_OP(kExprF32x4Splat), WASM_LOCAL_GET(2),
WASM_SIMD_OP(kExprI32x4Splat), WASM_SIMD_OP(kExprS128Select),
WASM_SIMD_OP(kExprF32x4ExtractLane), 0);
// Selection mask is all 0, so always select 2.0.
CHECK_EQ(2.0, r.Call(1, 2.0, 0));
}
WASM_SIMD_TEST(S128AndNotWithF32x4) {
WasmRunner<float, int32_t, float> r(execution_tier, lower_simd);
BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(kExprI32x4Splat), WASM_LOCAL_GET(1),
WASM_SIMD_OP(kExprF32x4Splat), WASM_SIMD_OP(kExprS128AndNot),
WASM_SIMD_OP(kExprF32x4ExtractLane), 0);
// 0x00700000 & !0x40800000 = 0x00700000
CHECK_EQ(bit_cast<float>(0x700000),
r.Call(0x00700000, bit_cast<float>(0x40800000)));
}
WASM_SIMD_TEST(FunctionCallWithExtractLaneOutputAsArgument) {
// This uses the result of an extract lane as an argument to a function call
// to exercise lowering for kCall and make sure the the extract lane is
// correctly replaced with a scalar.
TestSignatures sigs;
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
WasmFunctionCompiler& fn = r.NewFunction(sigs.f_f());
BUILD(fn, WASM_LOCAL_GET(0), WASM_LOCAL_GET(0), kExprF32Add);
BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(kExprI32x4Splat),
WASM_SIMD_OP(kExprF32x4ExtractLane), 0, kExprCallFunction,
fn.function_index(), WASM_SIMD_OP(kExprF32x4Splat), WASM_LOCAL_GET(0),
WASM_SIMD_OP(kExprI32x4Splat), WASM_SIMD_OP(kExprI32x4Add),
WASM_SIMD_OP(kExprI32x4ExtractLane), 0);
CHECK_EQ(15, r.Call(5));
}
} // namespace test_run_wasm_simd
} // namespace wasm
} // namespace internal
} // namespace v8