[wasm-simd] Move load/store lane out of post-mvp
Define a new macro list, since this has 1 immediate operand (lane index) compared to other SIMD load/stores. Also remove all the ifdef guards. Bug: v8:10975 Change-Id: Ib0a1f7bb6c4bdf83d81a65b4e02199b792d13837 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2645568 Reviewed-by: Deepti Gandluri <gdeepti@chromium.org> Commit-Queue: Zhi An Ng <zhin@chromium.org> Cr-Commit-Position: refs/heads/master@{#72377}
This commit is contained in:
parent
97a935eeab
commit
848137c47c
@ -2782,14 +2782,6 @@ void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
|
|||||||
#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
|
#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
|
||||||
// && !V8_TARGET_ARCH_IA32
|
// && !V8_TARGET_ARCH_IA32
|
||||||
|
|
||||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
|
|
||||||
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
|
|
||||||
// TODO(v8:10975): Prototyping load lane and store lane.
|
|
||||||
void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
|
|
||||||
void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
|
|
||||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
|
|
||||||
// && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
|
|
||||||
|
|
||||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
|
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
|
||||||
!V8_TARGET_ARCH_ARM
|
!V8_TARGET_ARCH_ARM
|
||||||
// TODO(v8:10983) Prototyping sign select.
|
// TODO(v8:10983) Prototyping sign select.
|
||||||
|
@ -1839,7 +1839,6 @@ class WasmDecoder : public Decoder {
|
|||||||
return length;
|
return length;
|
||||||
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
|
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
|
||||||
return length + 1;
|
return length + 1;
|
||||||
// clang-format on
|
|
||||||
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
|
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
|
||||||
case kExprPrefetchT:
|
case kExprPrefetchT:
|
||||||
case kExprPrefetchNT: {
|
case kExprPrefetchNT: {
|
||||||
@ -1848,20 +1847,14 @@ class WasmDecoder : public Decoder {
|
|||||||
kConservativelyAssumeMemory64);
|
kConservativelyAssumeMemory64);
|
||||||
return length + imm.length;
|
return length + imm.length;
|
||||||
}
|
}
|
||||||
case kExprS128Load8Lane:
|
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE) {
|
||||||
case kExprS128Load16Lane:
|
MemoryAccessImmediate<validate> imm(
|
||||||
case kExprS128Load32Lane:
|
decoder, pc + length, UINT32_MAX,
|
||||||
case kExprS128Load64Lane:
|
kConservativelyAssumeMemory64);
|
||||||
case kExprS128Store8Lane:
|
|
||||||
case kExprS128Store16Lane:
|
|
||||||
case kExprS128Store32Lane:
|
|
||||||
case kExprS128Store64Lane: {
|
|
||||||
MemoryAccessImmediate<validate> imm(decoder, pc + length,
|
|
||||||
UINT32_MAX,
|
|
||||||
kConservativelyAssumeMemory64);
|
|
||||||
// 1 more byte for lane index immediate.
|
// 1 more byte for lane index immediate.
|
||||||
return length + imm.length + 1;
|
return length + imm.length + 1;
|
||||||
}
|
}
|
||||||
|
// clang-format on
|
||||||
// Shuffles require a byte per lane, or 16 immediate bytes.
|
// Shuffles require a byte per lane, or 16 immediate bytes.
|
||||||
case kExprS128Const:
|
case kExprS128Const:
|
||||||
case kExprI8x16Shuffle:
|
case kExprI8x16Shuffle:
|
||||||
|
@ -590,7 +590,8 @@ constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
|
|||||||
constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
|
constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
|
||||||
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
|
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
|
||||||
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
|
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
|
||||||
FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None;
|
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE)
|
||||||
|
FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None;
|
||||||
#undef CASE
|
#undef CASE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,6 +295,16 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
|
|||||||
V(S128Load32Zero, 0xfdfc, s_i) \
|
V(S128Load32Zero, 0xfdfc, s_i) \
|
||||||
V(S128Load64Zero, 0xfdfd, s_i)
|
V(S128Load64Zero, 0xfdfd, s_i)
|
||||||
|
|
||||||
|
#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
|
||||||
|
V(S128Load8Lane, 0xfd58, s_is) \
|
||||||
|
V(S128Load16Lane, 0xfd59, s_is) \
|
||||||
|
V(S128Load32Lane, 0xfd5a, s_is) \
|
||||||
|
V(S128Load64Lane, 0xfd5b, s_is) \
|
||||||
|
V(S128Store8Lane, 0xfd5c, v_is) \
|
||||||
|
V(S128Store16Lane, 0xfd5d, v_is) \
|
||||||
|
V(S128Store32Lane, 0xfd5e, v_is) \
|
||||||
|
V(S128Store64Lane, 0xfd5f, v_is)
|
||||||
|
|
||||||
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
|
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
|
||||||
|
|
||||||
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(I8x16Shuffle, 0xfd0d, s_ss)
|
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(I8x16Shuffle, 0xfd0d, s_ss)
|
||||||
@ -484,14 +494,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
|
|||||||
V(F64x2NearestInt, 0xfddf, s_s)
|
V(F64x2NearestInt, 0xfddf, s_s)
|
||||||
|
|
||||||
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
|
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
|
||||||
V(S128Load8Lane, 0xfd58, s_is) \
|
|
||||||
V(S128Load16Lane, 0xfd59, s_is) \
|
|
||||||
V(S128Load32Lane, 0xfd5a, s_is) \
|
|
||||||
V(S128Load64Lane, 0xfd5b, s_is) \
|
|
||||||
V(S128Store8Lane, 0xfd5c, v_is) \
|
|
||||||
V(S128Store16Lane, 0xfd5d, v_is) \
|
|
||||||
V(S128Store32Lane, 0xfd5e, v_is) \
|
|
||||||
V(S128Store64Lane, 0xfd5f, v_is) \
|
|
||||||
V(PrefetchT, 0xfdc5, v_i) \
|
V(PrefetchT, 0xfdc5, v_i) \
|
||||||
V(PrefetchNT, 0xfdc6, v_i)
|
V(PrefetchNT, 0xfdc6, v_i)
|
||||||
|
|
||||||
@ -553,12 +555,13 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
|
|||||||
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
|
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
|
||||||
FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(V)
|
FOREACH_SIMD_POST_MVP_ONE_OPERAND_OPCODE(V)
|
||||||
|
|
||||||
#define FOREACH_SIMD_OPCODE(V) \
|
#define FOREACH_SIMD_OPCODE(V) \
|
||||||
FOREACH_SIMD_0_OPERAND_OPCODE(V) \
|
FOREACH_SIMD_0_OPERAND_OPCODE(V) \
|
||||||
FOREACH_SIMD_1_OPERAND_OPCODE(V) \
|
FOREACH_SIMD_1_OPERAND_OPCODE(V) \
|
||||||
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
|
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
|
||||||
FOREACH_SIMD_MEM_OPCODE(V) \
|
FOREACH_SIMD_MEM_OPCODE(V) \
|
||||||
FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
|
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
|
||||||
|
FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
|
||||||
FOREACH_SIMD_CONST_OPCODE(V)
|
FOREACH_SIMD_CONST_OPCODE(V)
|
||||||
|
|
||||||
#define FOREACH_NUMERIC_OPCODE(V) \
|
#define FOREACH_NUMERIC_OPCODE(V) \
|
||||||
|
@ -4175,13 +4175,9 @@ WASM_SIMD_TEST(S128Load64Zero) {
|
|||||||
RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Zero);
|
RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Zero);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
|
|
||||||
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64
|
|
||||||
// TODO(v8:10975): Prototyping load lane and store lane.
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
||||||
WasmOpcode load_op, WasmOpcode splat_op) {
|
WasmOpcode load_op, WasmOpcode splat_op) {
|
||||||
FLAG_SCOPE(wasm_simd_post_mvp);
|
|
||||||
WasmOpcode const_op =
|
WasmOpcode const_op =
|
||||||
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
|
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
|
||||||
|
|
||||||
@ -4280,8 +4276,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
|
||||||
WasmOpcode store_op, WasmOpcode splat_op) {
|
WasmOpcode store_op, WasmOpcode splat_op) {
|
||||||
FLAG_SCOPE(wasm_simd_post_mvp);
|
|
||||||
|
|
||||||
constexpr int lanes = kSimd128Size / sizeof(T);
|
constexpr int lanes = kSimd128Size / sizeof(T);
|
||||||
constexpr int mem_index = 16; // Store to mem index 16 (bytes).
|
constexpr int mem_index = 16; // Store to mem index 16 (bytes).
|
||||||
constexpr int splat_value = 33;
|
constexpr int splat_value = 33;
|
||||||
@ -4372,9 +4366,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
|
|||||||
kExprI64x2Splat);
|
kExprI64x2Splat);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
|
|
||||||
// V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64
|
|
||||||
|
|
||||||
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
|
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
|
||||||
WASM_SIMD_TEST(S##format##AnyTrue) { \
|
WASM_SIMD_TEST(S##format##AnyTrue) { \
|
||||||
FLAG_SCOPE(wasm_simd_post_mvp); \
|
FLAG_SCOPE(wasm_simd_post_mvp); \
|
||||||
|
Loading…
Reference in New Issue
Block a user