diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc
index 4c97fd6cc5..4bb3e99851 100644
--- a/src/codegen/external-reference.cc
+++ b/src/codegen/external-reference.cc
@@ -135,43 +135,6 @@ constexpr struct alignas(16) {
} wasm_uint32_max_as_double = {uint64_t{0x41efffffffe00000},
uint64_t{0x41efffffffe00000}};
-// Helper masks used for i32x4.widen_i8x16_{s,u}.
-constexpr struct alignas(16) {
- uint64_t a;
- uint64_t b;
-} i32x4_widen_i8x16_s1_mask = {uint64_t{0x05FFFFFF'04FFFFFF},
- uint64_t{0x07FFFFFF'06FFFFFF}};
-
-constexpr struct alignas(16) {
- uint64_t a;
- uint64_t b;
-} i32x4_widen_i8x16_s2_mask = {uint64_t{0x09FFFFFF'08FFFFFF},
- uint64_t{0x0BFFFFFF'0AFFFFFF}};
-
-constexpr struct alignas(16) {
- uint64_t a;
- uint64_t b;
-} i32x4_widen_i8x16_s3_mask = {uint64_t{0x0DFFFFFF'0CFFFFFF},
- uint64_t{0x0FFFFFFF'0EFFFFFF}};
-
-constexpr struct alignas(16) {
- uint64_t a;
- uint64_t b;
-} i32x4_widen_i8x16_u1_mask = {uint64_t{0xFFFFFF05'FFFFFF04},
- uint64_t{0xFFFFFF07'FFFFFF06}};
-
-constexpr struct alignas(16) {
- uint64_t a;
- uint64_t b;
-} i32x4_widen_i8x16_u2_mask = {uint64_t{0xFFFFFF09'FFFFFF08},
- uint64_t{0xFFFFFF0B'FFFFFF0A}};
-
-constexpr struct alignas(16) {
- uint64_t a;
- uint64_t b;
-} i32x4_widen_i8x16_u3_mask = {uint64_t{0xFFFFFF0D'FFFFFF0C},
- uint64_t{0xFFFFFF0F'FFFFFF0E}};
-
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@@ -631,36 +594,6 @@ ExternalReference ExternalReference::address_of_wasm_uint32_max_as_double() {
reinterpret_cast
(&wasm_uint32_max_as_double));
}
-ExternalReference ExternalReference::address_of_i32x4_widen_i8x16_s1_mask() {
- return ExternalReference(
- reinterpret_cast(&i32x4_widen_i8x16_s1_mask));
-}
-
-ExternalReference ExternalReference::address_of_i32x4_widen_i8x16_s2_mask() {
- return ExternalReference(
- reinterpret_cast(&i32x4_widen_i8x16_s2_mask));
-}
-
-ExternalReference ExternalReference::address_of_i32x4_widen_i8x16_s3_mask() {
- return ExternalReference(
- reinterpret_cast(&i32x4_widen_i8x16_s3_mask));
-}
-
-ExternalReference ExternalReference::address_of_i32x4_widen_i8x16_u1_mask() {
- return ExternalReference(
- reinterpret_cast(&i32x4_widen_i8x16_u1_mask));
-}
-
-ExternalReference ExternalReference::address_of_i32x4_widen_i8x16_u2_mask() {
- return ExternalReference(
- reinterpret_cast(&i32x4_widen_i8x16_u2_mask));
-}
-
-ExternalReference ExternalReference::address_of_i32x4_widen_i8x16_u3_mask() {
- return ExternalReference(
- reinterpret_cast(&i32x4_widen_i8x16_u3_mask));
-}
-
ExternalReference
ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
diff --git a/src/codegen/external-reference.h b/src/codegen/external-reference.h
index ab0254da3a..6873546f9d 100644
--- a/src/codegen/external-reference.h
+++ b/src/codegen/external-reference.h
@@ -249,12 +249,6 @@ class StatsCounter;
V(address_of_wasm_double_2_power_52, "wasm_double_2_power_52") \
V(address_of_wasm_int32_max_as_double, "wasm_int32_max_as_double") \
V(address_of_wasm_uint32_max_as_double, "wasm_uint32_max_as_double") \
- V(address_of_i32x4_widen_i8x16_u1_mask, "i32x4_widen_i8x16_u1_mask") \
- V(address_of_i32x4_widen_i8x16_u2_mask, "i32x4_widen_i8x16_u2_mask") \
- V(address_of_i32x4_widen_i8x16_u3_mask, "i32x4_widen_i8x16_u3_mask") \
- V(address_of_i32x4_widen_i8x16_s1_mask, "i32x4_widen_i8x16_s1_mask") \
- V(address_of_i32x4_widen_i8x16_s2_mask, "i32x4_widen_i8x16_s2_mask") \
- V(address_of_i32x4_widen_i8x16_s3_mask, "i32x4_widen_i8x16_s3_mask") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
diff --git a/src/codegen/x64/macro-assembler-x64.cc b/src/codegen/x64/macro-assembler-x64.cc
index 20415c4cc0..091121e426 100644
--- a/src/codegen/x64/macro-assembler-x64.cc
+++ b/src/codegen/x64/macro-assembler-x64.cc
@@ -2115,19 +2115,6 @@ void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
}
}
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpshufb(dst, src, mask);
- } else {
- if (dst != src) {
- movaps(dst, src);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, mask);
- }
-}
-
void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
diff --git a/src/codegen/x64/macro-assembler-x64.h b/src/codegen/x64/macro-assembler-x64.h
index b8f3b851ae..83ed4e5ae0 100644
--- a/src/codegen/x64/macro-assembler-x64.h
+++ b/src/codegen/x64/macro-assembler-x64.h
@@ -285,11 +285,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Ptest, ptest)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
- AVX_OP_SSE4_1(Pmovsxbd, pmovsxbd)
AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
- AVX_OP_SSE4_1(Pmovzxbd, pmovzxbd)
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pextrb, pextrb)
@@ -595,7 +593,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
void Pshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pshufb(XMMRegister dst, XMMRegister src1, Operand src2);
void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
// These Wasm SIMD ops do not have direct lowerings on x64. These
diff --git a/src/codegen/x64/sse-instr.h b/src/codegen/x64/sse-instr.h
index d5f6902bf8..452cc0f690 100644
--- a/src/codegen/x64/sse-instr.h
+++ b/src/codegen/x64/sse-instr.h
@@ -171,11 +171,9 @@
#define SSE4_UNOP_INSTRUCTION_LIST(V) \
V(ptest, 66, 0F, 38, 17) \
V(pmovsxbw, 66, 0F, 38, 20) \
- V(pmovsxbd, 66, 0F, 38, 21) \
V(pmovsxwd, 66, 0F, 38, 23) \
V(pmovsxdq, 66, 0F, 38, 25) \
V(pmovzxbw, 66, 0F, 38, 30) \
- V(pmovzxbd, 66, 0F, 38, 31) \
V(pmovzxwd, 66, 0F, 38, 33) \
V(pmovzxdq, 66, 0F, 38, 35)
diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc
index d8652df8e1..5b9c2e4d4f 100644
--- a/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -2405,66 +2405,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
break;
}
- case kArm64I32x4WidenI8x16S: {
- VRegister dst = i.OutputSimd128Register();
- VRegister src = i.InputSimd128Register(0);
- uint8_t laneidx = MiscField::decode(instr->opcode());
- switch (laneidx) {
- case 0: {
- __ Sxtl(dst.V8H(), src.V8B());
- __ Sxtl(dst.V4S(), dst.V4H());
- break;
- }
- case 1: {
- __ Sxtl(dst.V8H(), src.V8B());
- __ Sxtl2(dst.V4S(), dst.V8H());
- break;
- }
- case 2: {
- __ Sxtl2(dst.V8H(), src.V16B());
- __ Sxtl(dst.V4S(), dst.V4H());
- break;
- }
- case 3: {
- __ Sxtl2(dst.V8H(), src.V16B());
- __ Sxtl2(dst.V4S(), dst.V8H());
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- }
- case kArm64I32x4WidenI8x16U: {
- VRegister dst = i.OutputSimd128Register();
- VRegister src = i.InputSimd128Register(0);
- uint8_t laneidx = MiscField::decode(instr->opcode());
- switch (laneidx) {
- case 0: {
- __ Uxtl(dst.V8H(), src.V8B());
- __ Uxtl(dst.V4S(), dst.V4H());
- break;
- }
- case 1: {
- __ Uxtl(dst.V8H(), src.V8B());
- __ Uxtl2(dst.V4S(), dst.V8H());
- break;
- }
- case 2: {
- __ Uxtl2(dst.V8H(), src.V16B());
- __ Uxtl(dst.V4S(), dst.V4H());
- break;
- }
- case 3: {
- __ Uxtl2(dst.V8H(), src.V16B());
- __ Uxtl2(dst.V4S(), dst.V8H());
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- }
case kArm64I16x8Splat: {
__ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
break;
diff --git a/src/compiler/backend/arm64/instruction-codes-arm64.h b/src/compiler/backend/arm64/instruction-codes-arm64.h
index cadba1501f..ee2c20372e 100644
--- a/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -273,8 +273,6 @@ namespace compiler {
V(Arm64I32x4DotI16x8S) \
V(Arm64I32x4TruncSatF64x2SZero) \
V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64I32x4WidenI8x16S) \
- V(Arm64I32x4WidenI8x16U) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
diff --git a/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 2dd96da2fb..a384a84479 100644
--- a/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -242,8 +242,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4DotI16x8S:
case kArm64I32x4TruncSatF64x2SZero:
case kArm64I32x4TruncSatF64x2UZero:
- case kArm64I32x4WidenI8x16S:
- case kArm64I32x4WidenI8x16U:
case kArm64I16x8Splat:
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
diff --git a/src/compiler/backend/arm64/instruction-selector-arm64.cc b/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 64ced3c4f5..8584fb580a 100644
--- a/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -3966,24 +3966,6 @@ void InstructionSelector::VisitI8x16Popcnt(Node* node) {
VisitRR(this, code, node);
}
-void InstructionSelector::VisitI32x4WidenI8x16S(Node* node) {
- InstructionCode opcode = kArm64I32x4WidenI8x16S;
- uint8_t laneidx = OpParameter(node->op());
- DCHECK_GT(4, laneidx);
- opcode |= LaneSizeField::encode(laneidx);
- Arm64OperandGenerator g(this);
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitI32x4WidenI8x16U(Node* node) {
- InstructionCode opcode = kArm64I32x4WidenI8x16U;
- uint8_t laneidx = OpParameter(node->op());
- DCHECK_GT(4, laneidx);
- opcode |= LaneSizeField::encode(laneidx);
- Arm64OperandGenerator g(this);
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
-
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc
index 2611307429..1cea2aea35 100644
--- a/src/compiler/backend/instruction-selector.cc
+++ b/src/compiler/backend/instruction-selector.cc
@@ -2138,10 +2138,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4TruncSatF64x2SZero(node);
case IrOpcode::kI32x4TruncSatF64x2UZero:
return MarkAsSimd128(node), VisitI32x4TruncSatF64x2UZero(node);
- case IrOpcode::kI32x4WidenI8x16S:
- return MarkAsSimd128(node), VisitI32x4WidenI8x16S(node);
- case IrOpcode::kI32x4WidenI8x16U:
- return MarkAsSimd128(node), VisitI32x4WidenI8x16U(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2794,13 +2790,6 @@ void InstructionSelector::VisitI64x2SignSelect(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
// && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
-// TODO(v8:11297) Prototype i32x4.widen_i8x16_u
-void InstructionSelector::VisitI32x4WidenI8x16S(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4WidenI8x16U(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
-
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
diff --git a/src/compiler/backend/x64/code-generator-x64.cc b/src/compiler/backend/x64/code-generator-x64.cc
index ef42c2574f..2d74277c4f 100644
--- a/src/compiler/backend/x64/code-generator-x64.cc
+++ b/src/compiler/backend/x64/code-generator-x64.cc
@@ -3723,49 +3723,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
/*is_signed=*/false);
break;
}
- case kX64I32x4WidenI8x16S: {
- uint8_t laneidx = static_cast(MiscField::decode(opcode));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (laneidx == 0) {
- __ Pmovsxbd(dst, src);
- break;
- }
-
- ExternalReference mask;
- if (laneidx == 1) {
- mask = ExternalReference::address_of_i32x4_widen_i8x16_s1_mask();
- } else if (laneidx == 2) {
- mask = ExternalReference::address_of_i32x4_widen_i8x16_s2_mask();
- } else {
- DCHECK_EQ(3, laneidx);
- mask = ExternalReference::address_of_i32x4_widen_i8x16_s3_mask();
- }
- __ Pshufb(dst, src, __ ExternalReferenceAsOperand(mask));
- __ Psrad(dst, byte{24});
- break;
- }
- case kX64I32x4WidenI8x16U: {
- uint8_t laneidx = static_cast(MiscField::decode(opcode));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (laneidx == 0) {
- __ Pmovzxbd(dst, src);
- break;
- }
-
- ExternalReference mask;
- if (laneidx == 1) {
- mask = ExternalReference::address_of_i32x4_widen_i8x16_u1_mask();
- } else if (laneidx == 2) {
- mask = ExternalReference::address_of_i32x4_widen_i8x16_u2_mask();
- } else {
- DCHECK_EQ(3, laneidx);
- mask = ExternalReference::address_of_i32x4_widen_i8x16_u3_mask();
- }
- __ Pshufb(dst, src, __ ExternalReferenceAsOperand(mask));
- break;
- }
case kX64I64x2SignSelect: {
__ Blendvpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), i.InputSimd128Register(2));
diff --git a/src/compiler/backend/x64/instruction-codes-x64.h b/src/compiler/backend/x64/instruction-codes-x64.h
index f2cbcb4c78..6c48a04ea1 100644
--- a/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/src/compiler/backend/x64/instruction-codes-x64.h
@@ -266,8 +266,6 @@ namespace compiler {
V(X64I32x4ExtAddPairwiseI16x8U) \
V(X64I32x4TruncSatF64x2SZero) \
V(X64I32x4TruncSatF64x2UZero) \
- V(X64I32x4WidenI8x16S) \
- V(X64I32x4WidenI8x16U) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneS) \
V(X64I16x8SConvertI8x16Low) \
diff --git a/src/compiler/backend/x64/instruction-scheduler-x64.cc b/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 0a5026e370..2ecbab8f50 100644
--- a/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -242,8 +242,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4ExtAddPairwiseI16x8U:
case kX64I32x4TruncSatF64x2SZero:
case kX64I32x4TruncSatF64x2UZero:
- case kX64I32x4WidenI8x16S:
- case kX64I32x4WidenI8x16U:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneS:
case kX64I16x8SConvertI8x16Low:
diff --git a/src/compiler/backend/x64/instruction-selector-x64.cc b/src/compiler/backend/x64/instruction-selector-x64.cc
index 094cb5ec94..a8c24d2a43 100644
--- a/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -3754,26 +3754,6 @@ void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
Emit(kX64I32x4TruncSatF64x2UZero, dst, g.UseRegister(node->InputAt(0)));
}
-namespace {
-void VisitWiden(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- uint8_t laneidx = OpParameter(node->op());
- InstructionOperand dst = CpuFeatures::IsSupported(AVX)
- ? g.DefineAsRegister(node)
- : g.DefineSameAsFirst(node);
- selector->Emit(opcode | MiscField::encode(laneidx), dst,
- g.UseRegister(node->InputAt(0)));
-}
-} // namespace
-
-void InstructionSelector::VisitI32x4WidenI8x16S(Node* node) {
- VisitWiden(this, node, kX64I32x4WidenI8x16S);
-}
-
-void InstructionSelector::VisitI32x4WidenI8x16U(Node* node) {
- VisitWiden(this, node, kX64I32x4WidenI8x16U);
-}
-
void InstructionSelector::VisitI64x2GtS(Node* node) {
X64OperandGenerator g(this);
if (CpuFeatures::IsSupported(AVX)) {
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 692f3d919a..f90f9345a3 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -1821,18 +1821,6 @@ const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
return &cache_.kWord64PoisonOnSpeculation;
}
-const Operator* MachineOperatorBuilder::I32x4WidenI8x16S(uint8_t laneidx) {
- return zone_->New>(IrOpcode::kI32x4WidenI8x16S,
- Operator::kPure, "I32x4WidenI8x16S", 1,
- 0, 0, 1, 0, 0, laneidx);
-}
-
-const Operator* MachineOperatorBuilder::I32x4WidenI8x16U(uint8_t laneidx) {
- return zone_->New>(IrOpcode::kI32x4WidenI8x16U,
- Operator::kPure, "I32x4WidenI8x16U", 1,
- 0, 0, 1, 0, 0, laneidx);
-}
-
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \
int32_t lane_index) { \
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 3d665f4330..c798580845 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -729,8 +729,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4ExtAddPairwiseI16x8U();
const Operator* I32x4TruncSatF64x2SZero();
const Operator* I32x4TruncSatF64x2UZero();
- const Operator* I32x4WidenI8x16S(uint8_t laneidx);
- const Operator* I32x4WidenI8x16U(uint8_t laneidx);
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index 5ba6746bf5..bd2011ada2 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -888,8 +888,6 @@
V(I32x4ExtAddPairwiseI16x8U) \
V(I32x4TruncSatF64x2SZero) \
V(I32x4TruncSatF64x2UZero) \
- V(I32x4WidenI8x16S) \
- V(I32x4WidenI8x16U) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 8b76766c25..86cb5566f2 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -5266,12 +5266,6 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
- case wasm::kExprI32x4WidenI8x16S:
- return graph()->NewNode(mcgraph()->machine()->I32x4WidenI8x16S(lane),
- inputs[0]);
- case wasm::kExprI32x4WidenI8x16U:
- return graph()->NewNode(mcgraph()->machine()->I32x4WidenI8x16U(lane),
- inputs[0]);
case wasm::kExprF64x2ExtractLane:
return graph()->NewNode(mcgraph()->machine()->F64x2ExtractLane(lane),
inputs[0]);
diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h
index bcf13568f8..419e06fdb4 100644
--- a/src/wasm/function-body-decoder-impl.h
+++ b/src/wasm/function-body-decoder-impl.h
@@ -1525,8 +1525,6 @@ class WasmDecoder : public Decoder {
case kExprI32x4ReplaceLane:
case kExprS128Load32Lane:
case kExprS128Store32Lane:
- case kExprI32x4WidenI8x16S:
- case kExprI32x4WidenI8x16U:
num_lanes = 4;
break;
case kExprI16x8ExtractLaneS:
@@ -2094,7 +2092,6 @@ class WasmDecoder : public Decoder {
opcode = this->read_prefixed_opcode(pc);
switch (opcode) {
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1};
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_SIMD_MASK_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
@@ -3857,10 +3854,6 @@ class WasmFullDecoder : public WasmDecoder {
case kExprPrefetchNT: {
return SimdPrefetch(opcode_length, /*temporal=*/false);
}
- case kExprI32x4WidenI8x16S:
- case kExprI32x4WidenI8x16U: {
- return SimdExtractLane(opcode, kWasmS128, opcode_length);
- }
default: {
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
diff --git a/src/wasm/wasm-opcodes-inl.h b/src/wasm/wasm-opcodes-inl.h
index 194baf3efe..d1336e7b66 100644
--- a/src/wasm/wasm-opcodes-inl.h
+++ b/src/wasm/wasm-opcodes-inl.h
@@ -371,9 +371,6 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(PrefetchT, "prefetch_t")
CASE_OP(PrefetchNT, "prefetch_nt")
- CASE_I32x4_OP(WidenI8x16S, "widen_i8x16_s")
- CASE_I32x4_OP(WidenI8x16U, "widen_i8x16_u")
-
CASE_F64x2_OP(ConvertLowI32x4S, "convert_low_i32x4_s")
CASE_F64x2_OP(ConvertLowI32x4U, "convert_low_i32x4_u")
CASE_I32x4_OP(TruncSatF64x2SZero, "trunc_sat_f64x2_s_zero")
@@ -560,7 +557,6 @@ constexpr bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
FOREACH_SIMD_POST_MVP_MEM_OPCODE(CHECK_OPCODE)
- FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(CHECK_OPCODE)
#undef CHECK_OPCODE
return true;
default:
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index 63b232cf3e..6c992dcc57 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -530,10 +530,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(F32x4RecipApprox, 0xfdb3, s_s) \
V(F32x4RecipSqrtApprox, 0xfdbc, s_s)
-#define FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(V) \
- V(I32x4WidenI8x16S, 0xfd67, s_s) \
- V(I32x4WidenI8x16U, 0xfd68, s_s)
-
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd15, _) \
V(I8x16ExtractLaneU, 0xfd16, _) \
@@ -558,8 +554,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
- FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
- FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(V)
+ FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
#define FOREACH_SIMD_OPCODE(V) \
FOREACH_SIMD_0_OPERAND_OPCODE(V) \
diff --git a/test/cctest/wasm/test-run-wasm-simd.cc b/test/cctest/wasm/test-run-wasm-simd.cc
index 15c8d90708..7117c4fe32 100644
--- a/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/test/cctest/wasm/test-run-wasm-simd.cc
@@ -2220,82 +2220,6 @@ WASM_SIMD_TEST(I32x4ShrU) {
LogicalShiftRight);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
-// TODO(v8:11297) Prototype i32x4.widen_i8x16_{u,s}
-WASM_SIMD_TEST_NO_LOWERING(I32x4WidenI8x16U) {
- FLAG_SCOPE(wasm_simd_post_mvp);
-
- WasmRunner r(execution_tier, lower_simd);
- uint32_t* g0 = r.builder().AddGlobal(kWasmS128);
- uint32_t* g1 = r.builder().AddGlobal(kWasmS128);
- uint32_t* g2 = r.builder().AddGlobal(kWasmS128);
- uint32_t* g3 = r.builder().AddGlobal(kWasmS128);
- byte arg = 0;
-
-#define COPY_PARAM_TO_I32X4_LANE(idx) \
- WASM_SIMD_I32x4_REPLACE_LANE(idx, WASM_GLOBAL_GET(idx), WASM_LOCAL_GET(arg))
-#define WIDEN(idx) WASM_SIMD_OP(kExprI32x4WidenI8x16U), idx, kExprGlobalSet, idx
- BUILD(r,
- // g0 = widen_u([arg, 0, 0, 0], 0)
- COPY_PARAM_TO_I32X4_LANE(0), WIDEN(0),
- // g1 = widen_u([0, arg, 0, 0], 1)
- COPY_PARAM_TO_I32X4_LANE(1), WIDEN(1),
- // g2 = widen_u([0, 0, arg, 0], 2)
- COPY_PARAM_TO_I32X4_LANE(2), WIDEN(2),
- // g3 = widen_u([0, 0, 0, arg], 3)
- COPY_PARAM_TO_I32X4_LANE(3), WIDEN(3), WASM_ONE);
-#undef WIDEN
-#undef COPY_PARAM_TO_I32X4_LANE
-
- FOR_UINT8_INPUTS(x) {
- r.Call(x << 24 | x << 16 | x << 8 | x);
- uint32_t expected = static_cast(x);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue(&g0[i]));
- CHECK_EQ(expected, ReadLittleEndianValue(&g1[i]));
- CHECK_EQ(expected, ReadLittleEndianValue(&g2[i]));
- CHECK_EQ(expected, ReadLittleEndianValue(&g3[i]));
- }
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I32x4WidenI8x16S) {
- FLAG_SCOPE(wasm_simd_post_mvp);
-
- WasmRunner r(execution_tier, lower_simd);
- int32_t* g0 = r.builder().AddGlobal(kWasmS128);
- int32_t* g1 = r.builder().AddGlobal(kWasmS128);
- int32_t* g2 = r.builder().AddGlobal(kWasmS128);
- int32_t* g3 = r.builder().AddGlobal(kWasmS128);
- byte arg = 0;
-#define COPY_PARAM_TO_I32X4_LANE(idx) \
- WASM_SIMD_I32x4_REPLACE_LANE(idx, WASM_GLOBAL_GET(idx), WASM_LOCAL_GET(arg))
-#define WIDEN(idx) WASM_SIMD_OP(kExprI32x4WidenI8x16S), idx, kExprGlobalSet, idx
- BUILD(r,
- // g0 = widen_s([arg, 0, 0, 0], 0)
- COPY_PARAM_TO_I32X4_LANE(0), WIDEN(0),
- // g1 = widen_s([0, arg, 0, 0], 1)
- COPY_PARAM_TO_I32X4_LANE(1), WIDEN(1),
- // g2 = widen_s([0, 0, arg, 0], 2)
- COPY_PARAM_TO_I32X4_LANE(2), WIDEN(2),
- // g3 = widen_s([0, 0, 0, arg], 3)
- COPY_PARAM_TO_I32X4_LANE(3), WIDEN(3), WASM_ONE);
-#undef WIDEN
-#undef COPY_PARAM_TO_I32X4_LANE
-
- FOR_UINT8_INPUTS(x) {
- r.Call(x << 24 | x << 16 | x << 8 | x);
- int32_t expected_signed = static_cast(bit_cast((x)));
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected_signed, ReadLittleEndianValue(&g0[i]));
- CHECK_EQ(expected_signed, ReadLittleEndianValue(&g1[i]));
- CHECK_EQ(expected_signed, ReadLittleEndianValue(&g2[i]));
- CHECK_EQ(expected_signed, ReadLittleEndianValue(&g3[i]));
- }
- }
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
-
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_TEST(I16x8ConvertI8x16) {
WasmRunner r(execution_tier, lower_simd);
diff --git a/test/common/wasm/wasm-interpreter.cc b/test/common/wasm/wasm-interpreter.cc
index bcbf644db5..f4544e666f 100644
--- a/test/common/wasm/wasm-interpreter.cc
+++ b/test/common/wasm/wasm-interpreter.cc
@@ -29,7 +29,6 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes-inl.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -2260,25 +2259,6 @@ class WasmInterpreterInternals {
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
pc_t pc, int* const len) {
switch (opcode) {
-#define WIDEN_CASE(op, expr) \
- case op: { \
- uint8_t lane = \
- decoder->read_u8(code->at(pc + *len), "lane"); \
- *len += 1; \
- int16 s = Pop().to_s128().to_i8x16(); \
- int4 r; \
- for (int i = 0; i < 4; i++) { \
- auto x = s.val[LANE(lane * 4 + i, s)]; \
- r.val[LANE(i, r)] = expr; \
- } \
- Push(WasmValue(Simd128(r))); \
- return true; \
- }
- WIDEN_CASE(kExprI32x4WidenI8x16S, static_cast(x))
- WIDEN_CASE(kExprI32x4WidenI8x16U,
- static_cast(bit_cast(x)))
-#undef WIDEN_CASE
-
#define SPLAT_CASE(format, sType, valType, num) \
case kExpr##format##Splat: { \
WasmValue val = Pop(); \