[wasm-simd] Implement v128.andnot for ia32
Bug: v8:10082 Change-Id: I745cb99ba12d4e8c0ecd9a89bfa596f1bc1f9597 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1980835 Commit-Queue: Zhi An Ng <zhin@chromium.org> Reviewed-by: Deepti Gandluri <gdeepti@chromium.org> Cr-Commit-Position: refs/heads/master@{#65849}
This commit is contained in:
parent
158d3039f9
commit
aa12b60b36
@ -3679,6 +3679,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ vxorps(dst, kScratchDoubleReg, i.InputSimd128Register(2));
|
||||
break;
|
||||
}
|
||||
case kIA32S128AndNot: {
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
DCHECK_EQ(dst, i.InputSimd128Register(0));
|
||||
// The inputs have been inverted by instruction selector, so we can call
|
||||
// andnps here without any modifications.
|
||||
XMMRegister src1 = i.InputSimd128Register(1);
|
||||
__ Andnps(dst, src1);
|
||||
break;
|
||||
}
|
||||
case kIA32S8x16Swizzle: {
|
||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
|
@ -345,6 +345,7 @@ namespace compiler {
|
||||
V(AVXS128Xor) \
|
||||
V(SSES128Select) \
|
||||
V(AVXS128Select) \
|
||||
V(IA32S128AndNot) \
|
||||
V(IA32S8x16Swizzle) \
|
||||
V(IA32S8x16Shuffle) \
|
||||
V(IA32I16x8Load8x8S) \
|
||||
|
@ -326,6 +326,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kAVXS128Xor:
|
||||
case kSSES128Select:
|
||||
case kAVXS128Select:
|
||||
case kIA32S128AndNot:
|
||||
case kIA32S8x16Swizzle:
|
||||
case kIA32S8x16Shuffle:
|
||||
case kIA32S32x4Swizzle:
|
||||
|
@ -2282,6 +2282,13 @@ void InstructionSelector::VisitS128Select(Node* node) {
|
||||
}
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitS128AndNot(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
// andnps a b does ~a & b, but we want a & !b, so flip the input.
|
||||
Emit(kIA32S128AndNot, g.DefineSameAsFirst(node),
|
||||
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
#define VISIT_SIMD_SPLAT(Type) \
|
||||
void InstructionSelector::Visit##Type##Splat(Node* node) { \
|
||||
VisitRO(this, node, kIA32##Type##Splat); \
|
||||
|
@ -2632,9 +2632,6 @@ void InstructionSelector::VisitF64x2SConvertI64x2(Node* node) {
|
||||
void InstructionSelector::VisitF64x2UConvertI64x2(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
#if !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
|
||||
|
@ -1908,12 +1908,10 @@ WASM_SIMD_TEST(S128Xor) {
|
||||
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor, Xor);
|
||||
}
|
||||
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
|
||||
// Bitwise operation, doesn't really matter what simd type we test it with.
|
||||
WASM_SIMD_TEST_NO_LOWERING(S128AndNot) {
|
||||
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128AndNot, AndNot);
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
|
||||
|
||||
WASM_SIMD_TEST(I32x4Eq) {
|
||||
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Eq, Equal);
|
||||
|
Loading…
Reference in New Issue
Block a user