From 848137c47cb6fca8da6e1faca5fed6452a50fe9d Mon Sep 17 00:00:00 2001 From: Ng Zhi An Date: Fri, 22 Jan 2021 15:54:13 -0800 Subject: [PATCH] [wasm-simd] Move load/store lane out of post-mvp Define a new macro list, since this has 1 immediate operand (lane index) compared to other SIMD load/stores. Also remove all the ifdef guards. Bug: v8:10975 Change-Id: Ib0a1f7bb6c4bdf83d81a65b4e02199b792d13837 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2645568 Reviewed-by: Deepti Gandluri Commit-Queue: Zhi An Ng Cr-Commit-Position: refs/heads/master@{#72377} --- src/compiler/backend/instruction-selector.cc | 8 ----- src/wasm/function-body-decoder-impl.h | 17 ++++------- src/wasm/wasm-opcodes-inl.h | 3 +- src/wasm/wasm-opcodes.h | 31 +++++++++++--------- test/cctest/wasm/test-run-wasm-simd.cc | 9 ------ 5 files changed, 24 insertions(+), 44 deletions(-) diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 1c16aa9602..f6c75e5da5 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -2782,14 +2782,6 @@ void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) { #endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64 // && !V8_TARGET_ARCH_IA32 -#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \ - !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64 -// TODO(v8:10975): Prototyping load lane and store lane. -void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); } -#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 - // && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64 - #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \ !V8_TARGET_ARCH_ARM // TODO(v8:10983) Prototyping sign select. diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index ec2076897f..b934e1df04 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -1839,7 +1839,6 @@ class WasmDecoder : public Decoder { return length; FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE) return length + 1; - // clang-format on FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE) case kExprPrefetchT: case kExprPrefetchNT: { @@ -1848,20 +1847,14 @@ class WasmDecoder : public Decoder { kConservativelyAssumeMemory64); return length + imm.length; } - case kExprS128Load8Lane: - case kExprS128Load16Lane: - case kExprS128Load32Lane: - case kExprS128Load64Lane: - case kExprS128Store8Lane: - case kExprS128Store16Lane: - case kExprS128Store32Lane: - case kExprS128Store64Lane: { - MemoryAccessImmediate imm(decoder, pc + length, - UINT32_MAX, - kConservativelyAssumeMemory64); + FOREACH_SIMD_MEM_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE) { + MemoryAccessImmediate imm( + decoder, pc + length, UINT32_MAX, + kConservativelyAssumeMemory64); // 1 more byte for lane index immediate. return length + imm.length + 1; } + // clang-format on // Shuffles require a byte per lane, or 16 immediate bytes. case kExprS128Const: case kExprI8x16Shuffle: diff --git a/src/wasm/wasm-opcodes-inl.h b/src/wasm/wasm-opcodes-inl.h index dcc9b8f662..eb358c31c7 100644 --- a/src/wasm/wasm-opcodes-inl.h +++ b/src/wasm/wasm-opcodes-inl.h @@ -590,7 +590,8 @@ constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) { constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) { #define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig: return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE) - FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None; + FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE) + FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None; #undef CASE } diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h index bd0f8f9dbe..5b313f87bd 100644 --- a/src/wasm/wasm-opcodes.h +++ b/src/wasm/wasm-opcodes.h @@ -295,6 +295,16 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig, V(S128Load32Zero, 0xfdfc, s_i) \ V(S128Load64Zero, 0xfdfd, s_i) +#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \ + V(S128Load8Lane, 0xfd58, s_is) \ + V(S128Load16Lane, 0xfd59, s_is) \ + V(S128Load32Lane, 0xfd5a, s_is) \ + V(S128Load64Lane, 0xfd5b, s_is) \ + V(S128Store8Lane, 0xfd5c, v_is) \ + V(S128Store16Lane, 0xfd5d, v_is) \ + V(S128Store32Lane, 0xfd5e, v_is) \ + V(S128Store64Lane, 0xfd5f, v_is) + #define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _) #define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(I8x16Shuffle, 0xfd0d, s_ss) @@ -484,14 +494,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig, V(F64x2NearestInt, 0xfddf, s_s) #define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \ - V(S128Load8Lane, 0xfd58, s_is) \ - V(S128Load16Lane, 0xfd59, s_is) \ - V(S128Load32Lane, 0xfd5a, s_is) \ - V(S128Load64Lane, 0xfd5b, s_is) \ - V(S128Store8Lane, 0xfd5c, v_is) \ - V(S128Store16Lane, 0xfd5d, v_is) \ - V(S128Store32Lane, 0xfd5e, v_is) \ - V(S128Store64Lane, 0xfd5f, v_is) \ V(PrefetchT, 0xfdc5, v_i) \ V(PrefetchNT, 0xfdc6, v_i) @@ -553,12 +555,13 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig, FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \ FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(V) -#define FOREACH_SIMD_OPCODE(V) \ - FOREACH_SIMD_0_OPERAND_OPCODE(V) \ - FOREACH_SIMD_1_OPERAND_OPCODE(V) \ - FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \ - FOREACH_SIMD_MEM_OPCODE(V) \ - FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \ +#define FOREACH_SIMD_OPCODE(V) \ + FOREACH_SIMD_0_OPERAND_OPCODE(V) \ + FOREACH_SIMD_1_OPERAND_OPCODE(V) \ + FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \ + FOREACH_SIMD_MEM_OPCODE(V) \ + FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \ + FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \ FOREACH_SIMD_CONST_OPCODE(V) #define FOREACH_NUMERIC_OPCODE(V) \ diff --git a/test/cctest/wasm/test-run-wasm-simd.cc b/test/cctest/wasm/test-run-wasm-simd.cc index ba722b343d..6cb39ccc04 100644 --- a/test/cctest/wasm/test-run-wasm-simd.cc +++ b/test/cctest/wasm/test-run-wasm-simd.cc @@ -4175,13 +4175,9 @@ WASM_SIMD_TEST(S128Load64Zero) { RunLoadZeroTest(execution_tier, lower_simd, kExprS128Load64Zero); } -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64 -// TODO(v8:10975): Prototyping load lane and store lane. template void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode load_op, WasmOpcode splat_op) { - FLAG_SCOPE(wasm_simd_post_mvp); WasmOpcode const_op = splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const; @@ -4280,8 +4276,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) { template void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode store_op, WasmOpcode splat_op) { - FLAG_SCOPE(wasm_simd_post_mvp); - constexpr int lanes = kSimd128Size / sizeof(T); constexpr int mem_index = 16; // Store to mem index 16 (bytes). constexpr int splat_value = 33; @@ -4372,9 +4366,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) { kExprI64x2Splat); } -#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || - // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64 - #define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \ WASM_SIMD_TEST(S##format##AnyTrue) { \ FLAG_SCOPE(wasm_simd_post_mvp); \