[wasm simd] Implement F64x2Splat on x64

Bug: v8:8460
Change-Id: I0925f6ad9425b194a1274b14057cc6d36ce3dca1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1670169
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62478}
This commit is contained in:
Ng Zhi An 2019-07-01 10:17:31 -07:00 committed by Commit Bot
parent 1f09234a8e
commit 8b40f97fb8
16 changed files with 72 additions and 11 deletions

View File

@ -1811,6 +1811,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
case IrOpcode::kF64x2Splat:
return MarkAsSimd128(node), VisitF64x2Splat(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@ -2495,6 +2497,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_X64
void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64

View File

@ -2235,6 +2235,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kX64F64x2Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsFPRegister()) {
__ pshufd(dst, i.InputDoubleRegister(0), 0x44);
} else {
__ pshufd(dst, i.InputOperand(0), 0x44);
}
break;
}
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();

View File

@ -158,6 +158,7 @@ namespace compiler {
V(X64Poke) \
V(X64Peek) \
V(X64StackCheck) \
V(X64F64x2Splat) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \

View File

@ -124,6 +124,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
case kX64F64x2Splat:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:

View File

@ -2668,6 +2668,7 @@ void InstructionSelector::VisitS128Zero(Node* node) {
g.Use(node->InputAt(0))); \
}
SIMD_TYPES(VISIT_SIMD_SPLAT)
VISIT_SIMD_SPLAT(F64x2)
VISIT_SIMD_SPLAT(I64x2)
#undef VISIT_SIMD_SPLAT

View File

@ -245,6 +245,7 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \

View File

@ -468,6 +468,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float64SilenceNaN();
// SIMD operators.
const Operator* F64x2Splat();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
const Operator* F32x4ReplaceLane(int32_t);

View File

@ -724,6 +724,7 @@
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
V(F64x2Splat) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \

View File

@ -122,6 +122,8 @@ void SimdScalarLowering::LowerGraph() {
V(S1x16AnyTrue) \
V(S1x16AllTrue)
#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
@ -211,10 +213,12 @@ void SimdScalarLowering::LowerGraph() {
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
case SimdType::kInt64x2:
return MachineType::Int64();
case SimdType::kFloat64x2:
return MachineType::Float64();
case SimdType::kFloat32x4:
return MachineType::Float32();
case SimdType::kInt64x2:
return MachineType::Int64();
case SimdType::kInt32x4:
return MachineType::Int32();
case SimdType::kInt16x8:
@ -228,6 +232,10 @@ MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
FOREACH_FLOAT64X2_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kFloat64x2;
break;
}
FOREACH_INT64X2_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt64x2;
break;
@ -335,7 +343,7 @@ static int GetReturnCountAfterLoweringSimd128(
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
if (type == SimdType::kInt64x2) {
if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) {
num_lanes = kNumLanes64;
} else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
num_lanes = kNumLanes32;
@ -1234,9 +1242,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
case IrOpcode::kI64x2Splat:
case IrOpcode::kI32x4Splat:
case IrOpcode::kF32x4Splat:
case IrOpcode::kI16x8Splat:
case IrOpcode::kI8x16Splat: {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);

View File

@ -33,8 +33,9 @@ class SimdScalarLowering {
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
enum class SimdType : uint8_t {
kInt64x2,
kFloat64x2,
kFloat32x4,
kInt64x2,
kInt32x4,
kInt16x8,
kInt8x16

View File

@ -4001,6 +4001,8 @@ Node* WasmGraphBuilder::S128Zero() {
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprF64x2Splat:
return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:

View File

@ -2134,9 +2134,10 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
SPLAT_CASE(F64x2, float2, double, 2)
SPLAT_CASE(F32x4, float4, float, 4)
SPLAT_CASE(I64x2, int2, int64_t, 2)
SPLAT_CASE(I32x4, int4, int32_t, 4)
SPLAT_CASE(F32x4, float4, float, 4)
SPLAT_CASE(I16x8, int8, int32_t, 8)
SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE

View File

@ -26,6 +26,7 @@ namespace wasm {
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
@ -223,6 +224,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
CASE_F64x2_OP(Splat, "splat")
CASE_F32x4_OP(Abs, "abs")
CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")

View File

@ -274,6 +274,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4Splat, 0xfd0c, s_i) \
V(I64x2Splat, 0xfd0f, s_l) \
V(F32x4Splat, 0xfd12, s_f) \
V(F64x2Splat, 0xfd15, s_d) \
V(I8x16Eq, 0xfd18, s_ss) \
V(I8x16Ne, 0xfd19, s_ss) \
V(I8x16LtS, 0xfd1a, s_ss) \
@ -559,6 +560,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
V(s_f, kWasmS128, kWasmF32) \
V(s_d, kWasmS128, kWasmF64) \
V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
V(s_i, kWasmS128, kWasmI32) \
V(s_l, kWasmS128, kWasmI64) \

View File

@ -15,11 +15,12 @@ namespace v8 {
namespace internal {
namespace wasm {
#define FOREACH_SIMD_TYPE(V) \
V(float, float4, f32x4, 4) \
V(int64_t, int2, i64x2, 2) \
V(int32_t, int4, i32x4, 4) \
V(int16_t, int8, i16x8, 8) \
#define FOREACH_SIMD_TYPE(V) \
V(double, float2, f64x4, 2) \
V(float, float4, f32x4, 4) \
V(int64_t, int2, i64x2, 2) \
V(int32_t, int4, i32x4, 4) \
V(int16_t, int8, i16x8, 8) \
V(int8_t, int16, i8x16, 16)
#define DEFINE_SIMD_TYPE(cType, sType, name, kSize) \

View File

@ -283,6 +283,7 @@ T Sqrt(T a) {
#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
#define WASM_SIMD_F64x2_SPLAT(x) WASM_SIMD_SPLAT(F64x2, x)
#define WASM_SIMD_F32x4_SPLAT(x) WASM_SIMD_SPLAT(F32x4, x)
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
@ -370,6 +371,30 @@ bool IsExtreme(float x) {
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
#if V8_TARGET_ARCH_X64
WASM_SIMD_TEST(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
double* g = r.builder().AddGlobal<double>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_FLOAT64_INPUTS(x) {
r.Call(x);
double expected = x;
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
if (std::isnan(expected)) {
CHECK(std::isnan(actual));
} else {
CHECK_EQ(actual, expected);
}
}
}
}
#endif // V8_TARGET_ARCH_X64
WASM_SIMD_TEST(F32x4Splat) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
// Set up a global to hold output vector.