[wasm-simd] Implement i64x2 shifts for arm
Bug: v8:9813 Change-Id: Ibfac9453a035bb00020b4d062e1445410644f16a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1900662 Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Reviewed-by: Bill Budge <bbudge@chromium.org> Commit-Queue: Zhi An Ng <zhin@chromium.org> Cr-Commit-Position: refs/heads/master@{#65087}
This commit is contained in:
parent
46759fb37e
commit
aafbc13834
@ -1929,6 +1929,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ vqsub(NeonS64, dst, dst, i.InputSimd128Register(0));
|
||||
break;
|
||||
}
|
||||
case kArmI64x2Shl: {
|
||||
QwNeonRegister tmp = i.TempSimd128Register(0);
|
||||
Register shift = i.TempRegister(1);
|
||||
// Take shift value modulo 64.
|
||||
__ and_(shift, i.InputRegister(1), Operand(63));
|
||||
// Only the least significant byte of each lane is used.
|
||||
__ vdup(Neon32, tmp, shift);
|
||||
__ vshl(NeonS64, i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
tmp);
|
||||
break;
|
||||
}
|
||||
case kArmI64x2ShrS: {
|
||||
QwNeonRegister tmp = i.TempSimd128Register(0);
|
||||
Register shift = i.TempRegister(1);
|
||||
// Take shift value modulo 64.
|
||||
__ and_(shift, i.InputRegister(1), Operand(63));
|
||||
// Only the least significant byte of each lane is used.
|
||||
__ vdup(Neon32, tmp, shift);
|
||||
__ vneg(Neon32, tmp, tmp);
|
||||
__ vshl(NeonS64, i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
tmp);
|
||||
break;
|
||||
}
|
||||
case kArmI64x2ShrU: {
|
||||
QwNeonRegister tmp = i.TempSimd128Register(0);
|
||||
Register shift = i.TempRegister(1);
|
||||
// Take shift value modulo 64.
|
||||
__ and_(shift, i.InputRegister(1), Operand(63));
|
||||
// Only the least significant byte of each lane is used.
|
||||
__ vdup(Neon32, tmp, shift);
|
||||
__ vneg(Neon32, tmp, tmp);
|
||||
__ vshl(NeonU64, i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
tmp);
|
||||
break;
|
||||
}
|
||||
case kArmF32x4Splat: {
|
||||
int src_code = i.InputFloatRegister(0).code();
|
||||
__ vdup(Neon32, i.OutputSimd128Register(),
|
||||
|
@ -145,6 +145,9 @@ namespace compiler {
|
||||
V(ArmI64x2SplatI32Pair) \
|
||||
V(ArmI64x2ReplaceLaneI32Pair) \
|
||||
V(ArmI64x2Neg) \
|
||||
V(ArmI64x2Shl) \
|
||||
V(ArmI64x2ShrS) \
|
||||
V(ArmI64x2ShrU) \
|
||||
V(ArmF32x4Splat) \
|
||||
V(ArmF32x4ExtractLane) \
|
||||
V(ArmF32x4ReplaceLane) \
|
||||
|
@ -125,6 +125,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kArmI64x2SplatI32Pair:
|
||||
case kArmI64x2ReplaceLaneI32Pair:
|
||||
case kArmI64x2Neg:
|
||||
case kArmI64x2Shl:
|
||||
case kArmI64x2ShrS:
|
||||
case kArmI64x2ShrU:
|
||||
case kArmF32x4Splat:
|
||||
case kArmF32x4ExtractLane:
|
||||
case kArmF32x4ReplaceLane:
|
||||
|
@ -2431,6 +2431,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
|
||||
V(S1x16AllTrue, kArmS1x16AllTrue)
|
||||
|
||||
#define SIMD_SHIFT_OP_LIST(V) \
|
||||
V(I64x2Shl) \
|
||||
V(I64x2ShrS) \
|
||||
V(I64x2ShrU) \
|
||||
V(I32x4Shl) \
|
||||
V(I32x4ShrS) \
|
||||
V(I32x4ShrU) \
|
||||
|
@ -2625,12 +2625,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
|
||||
#if !V8_TARGET_ARCH_IA32
|
||||
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_IA32
|
||||
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
|
||||
|
@ -4351,6 +4351,9 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
case Neon32:
|
||||
ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
|
||||
break;
|
||||
case Neon64:
|
||||
ShiftByRegister<int64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
@ -4781,6 +4784,10 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm,
|
||||
Vn);
|
||||
break;
|
||||
case Neon64:
|
||||
ShiftByRegister<uint64_t, int64_t, kSimd128Size>(this, Vd, Vm,
|
||||
Vn);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
|
@ -983,7 +983,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
|
||||
base::NegateWithWraparound);
|
||||
}
|
||||
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
|
||||
void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
||||
WasmOpcode opcode, Int64ShiftOp expected_op) {
|
||||
// Intentionally shift by 64, should be no-op.
|
||||
@ -1025,6 +1024,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
|
||||
LogicalShiftRight);
|
||||
}
|
||||
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
|
||||
void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
|
||||
WasmOpcode opcode, Int64BinOp expected_op) {
|
||||
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
|
||||
|
Loading…
Reference in New Issue
Block a user