[riscv32] Add RISCV32 backend
This very large changeset adds support for RISCV32. Bug: v8:13025 Change-Id: Ieacc857131e6620f0fcfd7daa88a0f8d77056aa9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3736732 Reviewed-by: Michael Achenbach <machenbach@chromium.org> Commit-Queue: Yahan Lu <yahan@iscas.ac.cn> Reviewed-by: ji qiu <qiuji@iscas.ac.cn> Reviewed-by: Andreas Haas <ahaas@chromium.org> Reviewed-by: Hannes Payer <hpayer@chromium.org> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org> Cr-Commit-Position: refs/heads/main@{#82053}
This commit is contained in:
parent
bb4a752250
commit
491de34bcc
182
BUILD.gn
182
BUILD.gn
@ -654,7 +654,7 @@ config("internal_config") {
|
||||
defines += [ "BUILDING_V8_SHARED" ]
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "riscv64") {
|
||||
if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
|
||||
libs = [ "atomic" ]
|
||||
}
|
||||
}
|
||||
@ -726,7 +726,7 @@ config("external_config") {
|
||||
defines += [ "USING_V8_SHARED" ]
|
||||
}
|
||||
|
||||
if (current_cpu == "riscv64") {
|
||||
if (current_cpu == "riscv64" || current_cpu == "riscv32") {
|
||||
libs = [ "atomic" ]
|
||||
}
|
||||
}
|
||||
@ -1243,14 +1243,18 @@ config("toolchain") {
|
||||
if (v8_current_cpu == "riscv64") {
|
||||
defines += [ "V8_TARGET_ARCH_RISCV64" ]
|
||||
defines += [ "__riscv_xlen=64" ]
|
||||
|
||||
#FIXME: Temporarily use MIPS macro for the building.
|
||||
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
|
||||
if (target_is_simulator) {
|
||||
defines += [ "CAN_USE_RVV_INSTRUCTIONS" ]
|
||||
}
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "riscv32") {
|
||||
defines += [ "V8_TARGET_ARCH_RISCV32" ]
|
||||
defines += [ "__riscv_xlen=32" ]
|
||||
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "x86") {
|
||||
defines += [ "V8_TARGET_ARCH_IA32" ]
|
||||
if (is_win) {
|
||||
@ -2543,7 +2547,12 @@ v8_source_set("v8_initializers") {
|
||||
} else if (v8_current_cpu == "riscv64") {
|
||||
sources += [
|
||||
### gcmole(arch:riscv64) ###
|
||||
"src/builtins/riscv64/builtins-riscv64.cc",
|
||||
"src/builtins/riscv/builtins-riscv.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "riscv32") {
|
||||
sources += [
|
||||
### gcmole(arch:riscv32) ###
|
||||
"src/builtins/riscv/builtins-riscv.cc",
|
||||
]
|
||||
}
|
||||
|
||||
@ -3978,20 +3987,81 @@ v8_header_set("v8_internal_headers") {
|
||||
]
|
||||
} else if (v8_current_cpu == "riscv64") {
|
||||
sources += [ ### gcmole(arch:riscv64) ###
|
||||
"src/baseline/riscv64/baseline-assembler-riscv64-inl.h",
|
||||
"src/baseline/riscv64/baseline-compiler-riscv64-inl.h",
|
||||
"src/codegen/riscv64/assembler-riscv64-inl.h",
|
||||
"src/codegen/riscv64/assembler-riscv64.h",
|
||||
"src/codegen/riscv64/constants-riscv64.h",
|
||||
"src/codegen/riscv64/macro-assembler-riscv64.h",
|
||||
"src/codegen/riscv64/register-riscv64.h",
|
||||
"src/codegen/riscv64/reglist-riscv64.h",
|
||||
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
|
||||
"src/execution/riscv64/frame-constants-riscv64.h",
|
||||
"src/execution/riscv64/simulator-riscv64.h",
|
||||
"src/regexp/riscv64/regexp-macro-assembler-riscv64.h",
|
||||
"src/baseline/riscv/baseline-assembler-riscv-inl.h",
|
||||
"src/baseline/riscv/baseline-compiler-riscv-inl.h",
|
||||
"src/codegen/riscv/assembler-riscv-inl.h",
|
||||
"src/codegen/riscv/assembler-riscv-inl.h",
|
||||
"src/codegen/riscv/assembler-riscv.h",
|
||||
"src/codegen/riscv/base-assembler-riscv.h",
|
||||
"src/codegen/riscv/base-constants-riscv.h",
|
||||
"src/codegen/riscv/base-riscv-i.h",
|
||||
"src/codegen/riscv/base-riscv-i.h",
|
||||
"src/codegen/riscv/constant-riscv-a.h",
|
||||
"src/codegen/riscv/constant-riscv-c.h",
|
||||
"src/codegen/riscv/constant-riscv-d.h",
|
||||
"src/codegen/riscv/constant-riscv-f.h",
|
||||
"src/codegen/riscv/constant-riscv-m.h",
|
||||
"src/codegen/riscv/constant-riscv-v.h",
|
||||
"src/codegen/riscv/constant-riscv-zicsr.h",
|
||||
"src/codegen/riscv/constant-riscv-zifencei.h",
|
||||
"src/codegen/riscv/constants-riscv.h",
|
||||
"src/codegen/riscv/extension-riscv-a.h",
|
||||
"src/codegen/riscv/extension-riscv-c.h",
|
||||
"src/codegen/riscv/extension-riscv-d.h",
|
||||
"src/codegen/riscv/extension-riscv-d.h",
|
||||
"src/codegen/riscv/extension-riscv-inl.h",
|
||||
"src/codegen/riscv/extension-riscv-m.h",
|
||||
"src/codegen/riscv/extension-riscv-v.h",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.h",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.h",
|
||||
"src/codegen/riscv/interface-descriptors-riscv-inl.h",
|
||||
"src/codegen/riscv/macro-assembler-riscv.h",
|
||||
"src/codegen/riscv/register-riscv.h",
|
||||
"src/codegen/riscv/reglist-riscv.h",
|
||||
"src/compiler/backend/riscv/instruction-codes-riscv.h",
|
||||
"src/execution/riscv/frame-constants-riscv.h",
|
||||
"src/execution/riscv/simulator-riscv.h",
|
||||
"src/regexp/riscv/regexp-macro-assembler-riscv.h",
|
||||
"src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "riscv32") {
|
||||
sources += [ ### gcmole(arch:riscv32) ###
|
||||
"src/baseline/riscv/baseline-assembler-riscv-inl.h",
|
||||
"src/baseline/riscv/baseline-compiler-riscv-inl.h",
|
||||
"src/codegen/riscv/assembler-riscv.h",
|
||||
"src/codegen/riscv/assembler-riscv32-inl.h",
|
||||
"src/codegen/riscv/base-assembler-riscv.h",
|
||||
"src/codegen/riscv/base-constants-riscv.h",
|
||||
"src/codegen/riscv/base-riscv-i.h",
|
||||
"src/codegen/riscv/constant-riscv-a.h",
|
||||
"src/codegen/riscv/constant-riscv-c.h",
|
||||
"src/codegen/riscv/constant-riscv-d.h",
|
||||
"src/codegen/riscv/constant-riscv-f.h",
|
||||
"src/codegen/riscv/constant-riscv-i.h",
|
||||
"src/codegen/riscv/constant-riscv-m.h",
|
||||
"src/codegen/riscv/constant-riscv-v.h",
|
||||
"src/codegen/riscv/constant-riscv-zicsr.h",
|
||||
"src/codegen/riscv/constant-riscv-zifencei.h",
|
||||
"src/codegen/riscv/constants-riscv.h",
|
||||
"src/codegen/riscv/extension-riscv-a.h",
|
||||
"src/codegen/riscv/extension-riscv-c.h",
|
||||
"src/codegen/riscv/extension-riscv-d.h",
|
||||
"src/codegen/riscv/extension-riscv-f.h",
|
||||
"src/codegen/riscv/extension-riscv-inl.h",
|
||||
"src/codegen/riscv/extension-riscv-m.h",
|
||||
"src/codegen/riscv/extension-riscv-v.h",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.h",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.h",
|
||||
"src/codegen/riscv/interface-descriptors-riscv-inl.h",
|
||||
"src/codegen/riscv/macro-assembler-riscv.h",
|
||||
"src/codegen/riscv/register-riscv.h",
|
||||
"src/codegen/riscv/reglist-riscv.h",
|
||||
"src/compiler/backend/riscv/instruction-codes-riscv.h",
|
||||
"src/execution/riscv/frame-constants-riscv.h",
|
||||
"src/execution/riscv/simulator-riscv.h",
|
||||
"src/regexp/riscv/regexp-macro-assembler-riscv.h",
|
||||
"src/wasm/baseline/riscv32/liftoff-assembler-riscv32.h",
|
||||
]
|
||||
}
|
||||
|
||||
public_deps = [
|
||||
@ -5021,23 +5091,55 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "riscv64") {
|
||||
sources += [ ### gcmole(arch:riscv64) ###
|
||||
"src/baseline/riscv64/baseline-assembler-riscv64-inl.h",
|
||||
"src/baseline/riscv64/baseline-compiler-riscv64-inl.h",
|
||||
"src/codegen/riscv64/assembler-riscv64-inl.h",
|
||||
"src/codegen/riscv64/assembler-riscv64.cc",
|
||||
"src/codegen/riscv64/constants-riscv64.cc",
|
||||
"src/codegen/riscv64/cpu-riscv64.cc",
|
||||
"src/codegen/riscv64/interface-descriptors-riscv64-inl.h",
|
||||
"src/codegen/riscv64/macro-assembler-riscv64.cc",
|
||||
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
|
||||
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
|
||||
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
|
||||
"src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
|
||||
"src/diagnostics/riscv64/disasm-riscv64.cc",
|
||||
"src/diagnostics/riscv64/unwinder-riscv64.cc",
|
||||
"src/execution/riscv64/frame-constants-riscv64.cc",
|
||||
"src/execution/riscv64/simulator-riscv64.cc",
|
||||
"src/regexp/riscv64/regexp-macro-assembler-riscv64.cc",
|
||||
"src/codegen/riscv/assembler-riscv.cc",
|
||||
"src/codegen/riscv/base-assembler-riscv.cc",
|
||||
"src/codegen/riscv/base-constants-riscv.cc",
|
||||
"src/codegen/riscv/base-riscv-i.cc",
|
||||
"src/codegen/riscv/cpu-riscv.cc",
|
||||
"src/codegen/riscv/extension-riscv-a.cc",
|
||||
"src/codegen/riscv/extension-riscv-c.cc",
|
||||
"src/codegen/riscv/extension-riscv-d.cc",
|
||||
"src/codegen/riscv/extension-riscv-f.cc",
|
||||
"src/codegen/riscv/extension-riscv-m.cc",
|
||||
"src/codegen/riscv/extension-riscv-v.cc",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.cc",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.cc",
|
||||
"src/codegen/riscv/macro-assembler-riscv.cc",
|
||||
"src/compiler/backend/riscv/code-generator-riscv.cc",
|
||||
"src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
|
||||
"src/compiler/backend/riscv/instruction-selector-riscv64.cc",
|
||||
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
|
||||
"src/diagnostics/riscv/disasm-riscv.cc",
|
||||
"src/diagnostics/riscv/unwinder-riscv.cc",
|
||||
"src/execution/riscv/frame-constants-riscv.cc",
|
||||
"src/execution/riscv/simulator-riscv.cc",
|
||||
"src/regexp/riscv/regexp-macro-assembler-riscv.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "riscv32") {
|
||||
sources += [ ### gcmole(arch:riscv32) ###
|
||||
"src/codegen/riscv/assembler-riscv.cc",
|
||||
"src/codegen/riscv/base-assembler-riscv.cc",
|
||||
"src/codegen/riscv/base-constants-riscv.cc",
|
||||
"src/codegen/riscv/base-riscv-i.cc",
|
||||
"src/codegen/riscv/cpu-riscv.cc",
|
||||
"src/codegen/riscv/extension-riscv-a.cc",
|
||||
"src/codegen/riscv/extension-riscv-c.cc",
|
||||
"src/codegen/riscv/extension-riscv-d.cc",
|
||||
"src/codegen/riscv/extension-riscv-f.cc",
|
||||
"src/codegen/riscv/extension-riscv-m.cc",
|
||||
"src/codegen/riscv/extension-riscv-v.cc",
|
||||
"src/codegen/riscv/extension-riscv-zicsr.cc",
|
||||
"src/codegen/riscv/extension-riscv-zifencei.cc",
|
||||
"src/codegen/riscv/macro-assembler-riscv.cc",
|
||||
"src/compiler/backend/riscv/code-generator-riscv.cc",
|
||||
"src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
|
||||
"src/compiler/backend/riscv/instruction-selector-riscv32.cc",
|
||||
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
|
||||
"src/diagnostics/riscv/disasm-riscv.cc",
|
||||
"src/diagnostics/riscv/unwinder-riscv.cc",
|
||||
"src/execution/riscv/frame-constants-riscv.cc",
|
||||
"src/execution/riscv/simulator-riscv.cc",
|
||||
"src/regexp/riscv/regexp-macro-assembler-riscv.cc",
|
||||
]
|
||||
}
|
||||
|
||||
@ -5126,7 +5228,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
|
||||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
|
||||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
|
||||
v8_current_cpu == "riscv64") {
|
||||
v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
|
||||
libs += [ "atomic" ]
|
||||
}
|
||||
|
||||
@ -5526,7 +5628,7 @@ v8_component("v8_libbase") {
|
||||
sources += [ "src/base/ubsan.cc" ]
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "riscv64") {
|
||||
if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
|
||||
libs += [ "atomic" ]
|
||||
}
|
||||
|
||||
@ -5615,7 +5717,7 @@ v8_component("v8_libplatform") {
|
||||
}
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "riscv64") {
|
||||
if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
|
||||
libs = [ "atomic" ]
|
||||
}
|
||||
}
|
||||
@ -5707,8 +5809,8 @@ v8_source_set("v8_heap_base") {
|
||||
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "loong64") {
|
||||
sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "riscv64") {
|
||||
sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "riscv64" || current_cpu == "riscv32") {
|
||||
sources += [ "src/heap/base/asm/riscv/push_registers_asm.cc" ]
|
||||
}
|
||||
} else if (is_win) {
|
||||
if (current_cpu == "x64") {
|
||||
@ -6531,7 +6633,7 @@ if (want_v8_shell) {
|
||||
v8_executable("cppgc_hello_world") {
|
||||
sources = [ "samples/cppgc/hello-world.cc" ]
|
||||
|
||||
if (v8_current_cpu == "riscv64") {
|
||||
if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
|
||||
libs = [ "atomic" ]
|
||||
}
|
||||
|
||||
|
2
OWNERS
2
OWNERS
@ -27,5 +27,5 @@ per-file ...-loong64*=file:LOONG_OWNERS
|
||||
per-file ...-mips*=file:MIPS_OWNERS
|
||||
per-file ...-mips64*=file:MIPS_OWNERS
|
||||
per-file ...-ppc*=file:PPC_OWNERS
|
||||
per-file ...-riscv64*=file:RISCV_OWNERS
|
||||
per-file ...-riscv*=file:RISCV_OWNERS
|
||||
per-file ...-s390*=file:S390_OWNERS
|
||||
|
@ -96,7 +96,8 @@ if (v8_snapshot_toolchain == "") {
|
||||
} else {
|
||||
_cpus = "x64_v8_${v8_current_cpu}"
|
||||
}
|
||||
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
|
||||
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" ||
|
||||
v8_current_cpu == "riscv32") {
|
||||
_cpus = "x86_v8_${v8_current_cpu}"
|
||||
} else {
|
||||
# This branch should not be reached; leave _cpus blank so the assert
|
||||
|
@ -20,7 +20,7 @@ struct CalleeSavedRegisters {
|
||||
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
|
||||
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_LOONG64
|
||||
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
|
||||
struct CalleeSavedRegisters {};
|
||||
#else
|
||||
#error Target architecture was not detected as supported by v8
|
||||
|
@ -674,6 +674,9 @@ V8 shared library set USING_V8_SHARED.
|
||||
#if __riscv_xlen == 64
|
||||
#define V8_HOST_ARCH_RISCV64 1
|
||||
#define V8_HOST_ARCH_64_BIT 1
|
||||
#elif __riscv_xlen == 32
|
||||
#define V8_HOST_ARCH_RISCV32 1
|
||||
#define V8_HOST_ARCH_32_BIT 1
|
||||
#else
|
||||
#error "Cannot detect Riscv's bitwidth"
|
||||
#endif
|
||||
@ -689,7 +692,8 @@ V8 shared library set USING_V8_SHARED.
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
|
||||
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
|
||||
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
|
||||
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
|
||||
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \
|
||||
!V8_TARGET_ARCH_RISCV32
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
#define V8_TARGET_ARCH_X64 1
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
@ -714,6 +718,8 @@ V8 shared library set USING_V8_SHARED.
|
||||
#elif defined(__riscv) || defined(__riscv__)
|
||||
#if __riscv_xlen == 64
|
||||
#define V8_TARGET_ARCH_RISCV64 1
|
||||
#elif __riscv_xlen == 32
|
||||
#define V8_TARGET_ARCH_RISCV32 1
|
||||
#endif
|
||||
#else
|
||||
#error Target architecture was not detected as supported by v8
|
||||
@ -753,6 +759,8 @@ V8 shared library set USING_V8_SHARED.
|
||||
#endif
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#define V8_TARGET_ARCH_64_BIT 1
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
#define V8_TARGET_ARCH_32_BIT 1
|
||||
#else
|
||||
#error Unknown target architecture pointer size
|
||||
#endif
|
||||
@ -784,6 +792,9 @@ V8 shared library set USING_V8_SHARED.
|
||||
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
|
||||
#error Target architecture riscv64 is only supported on riscv64 and x64 host
|
||||
#endif
|
||||
#if (V8_TARGET_ARCH_RISCV32 && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_RISCV32))
|
||||
#error Target architecture riscv32 is only supported on riscv32 and ia32 host
|
||||
#endif
|
||||
#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
|
||||
#error Target architecture loong64 is only supported on loong64 and x64 host
|
||||
#endif
|
||||
@ -823,7 +834,7 @@ V8 shared library set USING_V8_SHARED.
|
||||
#else
|
||||
#define V8_TARGET_BIG_ENDIAN 1
|
||||
#endif
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif defined(__BYTE_ORDER__)
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
|
@ -98,10 +98,10 @@
|
||||
// do not support adding noexcept to default members.
|
||||
// Disabled on MSVC because constructors of standard containers are not noexcept
|
||||
// there.
|
||||
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
|
||||
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
|
||||
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \
|
||||
!defined(V8_TARGET_ARCH_RISCV64)) || \
|
||||
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
|
||||
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
|
||||
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \
|
||||
!defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \
|
||||
(defined(__clang__) && __cplusplus > 201300L))
|
||||
#define V8_NOEXCEPT noexcept
|
||||
#else
|
||||
|
@ -354,6 +354,10 @@ void* OS::GetRandomMmapAddr() {
|
||||
// TODO(RISCV): We need more information from the kernel to correctly mask
|
||||
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
|
||||
raw_addr &= uint64_t{0xFFFFFF0000};
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
// TODO(RISCV): We need more information from the kernel to correctly mask
|
||||
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
|
||||
raw_addr &= 0x3FFFF000;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
|
||||
// to fulfill request.
|
||||
@ -685,6 +689,8 @@ void OS::DebugBreak() {
|
||||
asm volatile(".word 0x0001");
|
||||
#elif V8_HOST_ARCH_RISCV64
|
||||
asm("ebreak");
|
||||
#elif V8_HOST_ARCH_RISCV32
|
||||
asm("ebreak");
|
||||
#else
|
||||
#error Unsupported host architecture.
|
||||
#endif
|
||||
|
@ -32,8 +32,8 @@
|
||||
#include "src/baseline/ppc/baseline-assembler-ppc-inl.h"
|
||||
#elif V8_TARGET_ARCH_S390X
|
||||
#include "src/baseline/s390/baseline-assembler-s390-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/baseline/riscv/baseline-assembler-riscv-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
|
@ -48,7 +48,9 @@
|
||||
#elif V8_TARGET_ARCH_S390X
|
||||
#include "src/baseline/s390/baseline-compiler-s390-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h"
|
||||
#include "src/baseline/riscv/baseline-compiler-riscv-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
#include "src/baseline/riscv/baseline-compiler-riscv-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
|
@ -2,8 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
|
||||
#define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
|
||||
#ifndef V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_
|
||||
#define V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_
|
||||
|
||||
#include "src/baseline/baseline-assembler.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
@ -79,8 +79,8 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
|
||||
}
|
||||
void BaselineAssembler::RegisterFrameAddress(
|
||||
interpreter::Register interpreter_register, Register rscratch) {
|
||||
return __ Add64(rscratch, fp,
|
||||
interpreter_register.ToOperand() * kSystemPointerSize);
|
||||
return __ AddWord(rscratch, fp,
|
||||
interpreter_register.ToOperand() * kSystemPointerSize);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
@ -163,7 +163,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
|
||||
__ GetObjectType(map, type, type);
|
||||
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
|
||||
}
|
||||
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
||||
__ LoadWord(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
||||
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
|
||||
}
|
||||
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
|
||||
@ -171,7 +171,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
|
||||
Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireScratch();
|
||||
__ Ld(temp, operand);
|
||||
__ LoadWord(temp, operand);
|
||||
__ Branch(target, AsMasmCondition(cc), value, Operand(temp));
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
|
||||
@ -195,7 +195,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
||||
// todo: compress pointer
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ Ld(scratch, operand);
|
||||
__ LoadWord(scratch, operand);
|
||||
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
|
||||
}
|
||||
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
||||
@ -204,7 +204,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
||||
// todo: compress pointer
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ Ld(scratch, operand);
|
||||
__ LoadWord(scratch, operand);
|
||||
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
|
||||
}
|
||||
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
|
||||
@ -219,7 +219,7 @@ void BaselineAssembler::Move(Register output, TaggedIndex value) {
|
||||
__ li(output, Operand(value.ptr()));
|
||||
}
|
||||
void BaselineAssembler::Move(MemOperand output, Register source) {
|
||||
__ Sd(source, output);
|
||||
__ StoreWord(source, output);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, ExternalReference reference) {
|
||||
__ li(output, Operand(reference));
|
||||
@ -446,8 +446,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
__ Add32(interrupt_budget, interrupt_budget, weight);
|
||||
__ Sw(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
if (skip_interrupt_label)
|
||||
if (skip_interrupt_label) {
|
||||
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
@ -510,34 +511,33 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
||||
if (SmiValuesAre31Bits()) {
|
||||
__ Add32(lhs, lhs, Operand(rhs));
|
||||
} else {
|
||||
__ Add64(lhs, lhs, Operand(rhs));
|
||||
__ AddWord(lhs, lhs, Operand(rhs));
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
|
||||
__ And(output, lhs, Operand(rhs));
|
||||
}
|
||||
|
||||
void BaselineAssembler::Switch(Register reg, int case_value_base,
|
||||
Label** labels, int num_labels) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
Label fallthrough;
|
||||
if (case_value_base != 0) {
|
||||
__ Sub64(reg, reg, Operand(case_value_base));
|
||||
__ SubWord(reg, reg, Operand(case_value_base));
|
||||
}
|
||||
|
||||
// Mostly copied from code-generator-riscv64.cc
|
||||
ScratchRegisterScope scope(this);
|
||||
Label table;
|
||||
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
|
||||
reg, Operand(int64_t(num_labels)));
|
||||
reg, Operand(num_labels));
|
||||
int64_t imm64;
|
||||
imm64 = __ branch_long_offset(&table);
|
||||
CHECK(is_int32(imm64 + 0x800));
|
||||
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
|
||||
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
|
||||
__ BlockTrampolinePoolFor(2);
|
||||
__ auipc(t6, Hi20); // Read PC + Hi20 into t6
|
||||
__ auipc(t6, Hi20); // Read PC + Hi20 into t6
|
||||
__ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12
|
||||
|
||||
int entry_size_log2 = 3;
|
||||
@ -582,7 +582,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
||||
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
|
||||
__ masm()->SmiUntag(params_size);
|
||||
|
||||
__ Bind(&skip_interrupt_label);
|
||||
__ Bind(&skip_interrupt_label);
|
||||
}
|
||||
|
||||
BaselineAssembler::ScratchRegisterScope temps(&basm);
|
||||
@ -619,4 +619,4 @@ inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
|
||||
#endif // V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_
|
@ -2,8 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
|
||||
#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
|
||||
#ifndef V8_BASELINE_RISCV_BASELINE_COMPILER_RISCV_INL_H_
|
||||
#define V8_BASELINE_RISCV_BASELINE_COMPILER_RISCV_INL_H_
|
||||
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
|
||||
@ -39,10 +39,10 @@ void BaselineCompiler::PrologueFillFrame() {
|
||||
const bool has_new_target = new_target_index != kMaxInt;
|
||||
if (has_new_target) {
|
||||
DCHECK_LE(new_target_index, register_count);
|
||||
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
|
||||
__ masm()->AddWord(sp, sp, Operand(-(kPointerSize * new_target_index)));
|
||||
for (int i = 0; i < new_target_index; i++) {
|
||||
__ masm()->Sd(kInterpreterAccumulatorRegister,
|
||||
MemOperand(sp, i * kSystemPointerSize));
|
||||
__ masm()->StoreWord(kInterpreterAccumulatorRegister,
|
||||
MemOperand(sp, i * kSystemPointerSize));
|
||||
}
|
||||
// Push new_target_or_generator.
|
||||
__ Push(kJavaScriptCallNewTargetRegister);
|
||||
@ -50,25 +50,25 @@ void BaselineCompiler::PrologueFillFrame() {
|
||||
}
|
||||
if (register_count < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill completely.
|
||||
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
|
||||
__ masm()->AddWord(sp, sp, Operand(-(kPointerSize * register_count)));
|
||||
for (int i = 0; i < register_count; ++i) {
|
||||
__ masm()->Sd(kInterpreterAccumulatorRegister,
|
||||
MemOperand(sp, i * kSystemPointerSize));
|
||||
__ masm()->StoreWord(kInterpreterAccumulatorRegister,
|
||||
MemOperand(sp, i * kSystemPointerSize));
|
||||
}
|
||||
} else {
|
||||
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
|
||||
__ masm()->AddWord(sp, sp, Operand(-(kPointerSize * register_count)));
|
||||
for (int i = 0; i < register_count; ++i) {
|
||||
__ masm()->Sd(kInterpreterAccumulatorRegister,
|
||||
MemOperand(sp, i * kSystemPointerSize));
|
||||
__ masm()->StoreWord(kInterpreterAccumulatorRegister,
|
||||
MemOperand(sp, i * kSystemPointerSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineCompiler::VerifyFrameSize() {
|
||||
ASM_CODE_COMMENT(&masm_);
|
||||
__ masm()->Add64(kScratchReg, sp,
|
||||
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
|
||||
bytecode_->frame_size()));
|
||||
__ masm()->AddWord(kScratchReg, sp,
|
||||
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
|
||||
bytecode_->frame_size()));
|
||||
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
|
||||
Operand(fp));
|
||||
}
|
||||
@ -79,4 +79,4 @@ void BaselineCompiler::VerifyFrameSize() {
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
|
||||
#endif // V8_BASELINE_RISCV_BASELINE_COMPILER_RISCV_INL_H_
|
@ -251,7 +251,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
|
||||
BIND(&u32);
|
||||
Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
|
||||
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
|
||||
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
|
||||
#if (V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6) || V8_TARGET_ARCH_RISCV32
|
||||
BIND(&i64);
|
||||
Goto(&u64);
|
||||
|
||||
@ -268,7 +268,8 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
|
||||
BIND(&u64);
|
||||
Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
|
||||
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
|
||||
#endif
|
||||
#endif //(V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6) ||
|
||||
// V8_TARGET_ARCH_RISCV32
|
||||
|
||||
// This shouldn't happen, we've already validated the type.
|
||||
BIND(&other);
|
||||
@ -523,8 +524,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
|
||||
// This shouldn't happen, we've already validated the type.
|
||||
BIND(&other);
|
||||
Unreachable();
|
||||
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ||
|
||||
// V8_TARGET_ARCH_RISCV64
|
||||
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
|
||||
|
||||
BIND(&detached_or_out_of_bounds);
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,8 +25,8 @@
|
||||
#include "src/codegen/loong64/assembler-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/assembler-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/assembler-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/assembler-riscv.h"
|
||||
#else
|
||||
#error Unknown architecture.
|
||||
#endif
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "src/codegen/loong64/assembler-loong64-inl.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/assembler-s390-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/assembler-riscv64-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/assembler-riscv-inl.h"
|
||||
#else
|
||||
#error Unknown architecture.
|
||||
#endif
|
||||
|
@ -459,7 +459,7 @@ void ConstantPool::MaybeCheck() {
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_RISCV64)
|
||||
#if defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
// Constant Pool.
|
||||
|
||||
@ -706,7 +706,7 @@ void ConstantPool::MaybeCheck() {
|
||||
}
|
||||
}
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_RISCV64)
|
||||
#endif // defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -163,7 +163,8 @@ class ConstantPoolBuilder {
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64)
|
||||
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64) || \
|
||||
defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
class ConstantPoolKey {
|
||||
public:
|
||||
@ -345,7 +346,8 @@ class ConstantPool {
|
||||
int blocked_nesting_ = 0;
|
||||
};
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64)
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64) ||
|
||||
// defined(V8_TARGET_ARCH_RISCV32)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -23,8 +23,8 @@
|
||||
#include "src/codegen/s390/constants-s390.h"
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
#include "src/codegen/x64/constants-x64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/constants-riscv.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -76,6 +76,10 @@ enum CpuFeature {
|
||||
FPU,
|
||||
FP64FPU,
|
||||
RISCV_SIMD,
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
FPU,
|
||||
FP64FPU,
|
||||
RISCV_SIMD,
|
||||
#endif
|
||||
|
||||
NUMBER_OF_CPU_FEATURES
|
||||
|
@ -757,7 +757,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
|
||||
#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#define re_stack_check_func RegExpMacroAssemblerRISCV::CheckStackGuardState
|
||||
#else
|
||||
UNREACHABLE();
|
||||
|
@ -29,8 +29,8 @@
|
||||
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/interface-descriptors-riscv-inl.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
@ -336,7 +336,7 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
|
||||
V8_TARGET_ARCH_LOONG64
|
||||
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
|
||||
return RegisterArray(
|
||||
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
|
||||
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
|
||||
@ -357,7 +357,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
|
||||
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
|
||||
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
|
||||
return RegisterArray(ParamsSizeRegister(), WeightRegister());
|
||||
#else
|
||||
return DefaultRegisterArray();
|
||||
|
@ -63,9 +63,9 @@ enum class SmiCheck { kOmit, kInline };
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/constants-s390.h"
|
||||
#include "src/codegen/s390/macro-assembler-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
#include "src/codegen/riscv64/macro-assembler-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/constants-riscv.h"
|
||||
#include "src/codegen/riscv/macro-assembler-riscv.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "src/codegen/loong64/register-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/register-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/register-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#else
|
||||
#error Unknown architecture.
|
||||
#endif
|
||||
|
@ -19,7 +19,7 @@ static const int kMaxAllocatableGeneralRegisterCount =
|
||||
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
|
||||
static const int kMaxAllocatableDoubleRegisterCount =
|
||||
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
|
||||
#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
static const int kMaxAllocatableSIMD128RegisterCount =
|
||||
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
|
||||
#endif
|
||||
@ -38,16 +38,17 @@ static const int kAllocatableNoVFP32DoubleCodes[] = {
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
#undef REGISTER_CODE
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
static const int kAllocatableSIMD128Codes[] = {
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
|
||||
#define REGISTER_CODE(R) kVRCode_##R,
|
||||
#else
|
||||
#define REGISTER_CODE(R) kSimd128Code_##R,
|
||||
#endif
|
||||
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
|
||||
#undef REGISTER_CODE
|
||||
#endif // V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
#endif // V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 ||
|
||||
// V8_TARGET_ARCH_PPC64
|
||||
|
||||
static_assert(RegisterConfiguration::kMaxGeneralRegisters >=
|
||||
Register::kNumRegisters);
|
||||
@ -95,6 +96,8 @@ static int get_num_allocatable_double_registers() {
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
@ -104,7 +107,7 @@ static int get_num_allocatable_double_registers() {
|
||||
|
||||
static int get_num_allocatable_simd128_registers() {
|
||||
return
|
||||
#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
kMaxAllocatableSIMD128RegisterCount;
|
||||
#else
|
||||
0;
|
||||
@ -125,7 +128,7 @@ static const int* get_allocatable_double_codes() {
|
||||
|
||||
static const int* get_allocatable_simd128_codes() {
|
||||
return
|
||||
#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
|
||||
kAllocatableSIMD128Codes;
|
||||
#else
|
||||
kAllocatableDoubleCodes;
|
||||
|
@ -23,8 +23,8 @@
|
||||
#include "src/codegen/loong64/reglist-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/reglist-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/reglist-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv/reglist-riscv.h"
|
||||
#else
|
||||
#error Unknown architecture.
|
||||
#endif
|
||||
|
@ -309,10 +309,11 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
|
||||
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||
defined(V8_TARGET_ARCH_X64)
|
||||
return false;
|
||||
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
|
||||
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
|
||||
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
|
||||
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \
|
||||
defined(V8_TARGET_ARCH_RISCV32)
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
@ -72,7 +72,8 @@ class RelocInfo {
|
||||
EXTERNAL_REFERENCE, // The address of an external C++ function.
|
||||
INTERNAL_REFERENCE, // An address inside the same function.
|
||||
|
||||
// Encoded internal reference, used only on RISCV64, MIPS, MIPS64 and PPC.
|
||||
// Encoded internal reference, used only on RISCV64, RISCV32, MIPS, MIPS64
|
||||
// and PPC.
|
||||
INTERNAL_REFERENCE_ENCODED,
|
||||
|
||||
// An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
|
||||
|
@ -32,11 +32,11 @@
|
||||
// modified significantly by Google Inc.
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
|
||||
#define V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
|
||||
#ifndef V8_CODEGEN_RISCV_ASSEMBLER_RISCV_INL_H_
|
||||
#define V8_CODEGEN_RISCV_ASSEMBLER_RISCV_INL_H_
|
||||
|
||||
#include "src/codegen/assembler-arch.h"
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv64/assembler-riscv64.h"
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
|
||||
@ -45,15 +45,10 @@ namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operand and MemOperand.
|
||||
|
||||
bool Operand::is_reg() const { return rm_.is_valid(); }
|
||||
|
||||
int64_t Operand::immediate() const {
|
||||
DCHECK(!is_reg());
|
||||
DCHECK(!IsHeapObjectRequest());
|
||||
return value_.immediate;
|
||||
void Assembler::CheckBuffer() {
|
||||
if (buffer_space() <= kGap) {
|
||||
GrowBuffer();
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@ -91,7 +86,11 @@ Address RelocInfo::target_address_address() {
|
||||
// place, ready to be patched with the target. After jump optimization,
|
||||
// that is the address of the instruction that follows J/JAL/JR/JALR
|
||||
// instruction.
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
|
||||
#elif defined(V8_TARGET_ARCH_RISCV32)
|
||||
return pc_ + Assembler::kInstructionsFor32BitConstant * kInstrSize;
|
||||
#endif
|
||||
}
|
||||
|
||||
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
|
||||
@ -142,7 +141,11 @@ int Assembler::deserialization_special_target_size(
|
||||
|
||||
void Assembler::set_target_internal_reference_encoded_at(Address pc,
|
||||
Address target) {
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
set_target_value_at(pc, static_cast<uint64_t>(target));
|
||||
#elif defined(V8_TARGET_ARCH_RISCV32)
|
||||
set_target_value_at(pc, static_cast<uint32_t>(target));
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::deserialization_set_target_internal_reference_at(
|
||||
@ -279,49 +282,9 @@ void RelocInfo::WipeOut() {
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Assembler.
|
||||
|
||||
void Assembler::CheckBuffer() {
|
||||
if (buffer_space() <= kGap) {
|
||||
GrowBuffer();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Assembler::EmitHelper(T x) {
|
||||
*reinterpret_cast<T*>(pc_) = x;
|
||||
pc_ += sizeof(x);
|
||||
}
|
||||
|
||||
void Assembler::emit(Instr x) {
|
||||
if (!is_buffer_growth_blocked()) {
|
||||
CheckBuffer();
|
||||
}
|
||||
DEBUG_PRINTF("%p: ", pc_);
|
||||
disassembleInstr(x);
|
||||
EmitHelper(x);
|
||||
CheckTrampolinePoolQuick();
|
||||
}
|
||||
|
||||
void Assembler::emit(ShortInstr x) {
|
||||
if (!is_buffer_growth_blocked()) {
|
||||
CheckBuffer();
|
||||
}
|
||||
DEBUG_PRINTF("%p: ", pc_);
|
||||
disassembleInstr(x);
|
||||
EmitHelper(x);
|
||||
CheckTrampolinePoolQuick();
|
||||
}
|
||||
|
||||
void Assembler::emit(uint64_t data) {
|
||||
if (!is_buffer_growth_blocked()) CheckBuffer();
|
||||
EmitHelper(data);
|
||||
}
|
||||
|
||||
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
|
||||
#endif // V8_CODEGEN_RISCV_ASSEMBLER_RISCV_INL_H_
|
1927
src/codegen/riscv/assembler-riscv.cc
Normal file
1927
src/codegen/riscv/assembler-riscv.cc
Normal file
File diff suppressed because it is too large
Load Diff
848
src/codegen/riscv/assembler-riscv.h
Normal file
848
src/codegen/riscv/assembler-riscv.h
Normal file
@ -0,0 +1,848 @@
|
||||
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
||||
// All Rights Reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// - Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// - Redistribution in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// - Neither the name of Sun Microsystems or the names of contributors may
|
||||
// be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The original source code covered by the above license above has been
|
||||
// modified significantly by Google Inc.
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_ASSEMBLER_RISCV_H_
|
||||
#define V8_CODEGEN_RISCV_ASSEMBLER_RISCV_H_
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/constant-pool.h"
|
||||
#include "src/codegen/constants-arch.h"
|
||||
#include "src/codegen/external-reference.h"
|
||||
#include "src/codegen/flush-instruction-cache.h"
|
||||
#include "src/codegen/label.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/base-riscv-i.h"
|
||||
#include "src/codegen/riscv/extension-riscv-a.h"
|
||||
#include "src/codegen/riscv/extension-riscv-c.h"
|
||||
#include "src/codegen/riscv/extension-riscv-d.h"
|
||||
#include "src/codegen/riscv/extension-riscv-f.h"
|
||||
#include "src/codegen/riscv/extension-riscv-m.h"
|
||||
#include "src/codegen/riscv/extension-riscv-v.h"
|
||||
#include "src/codegen/riscv/extension-riscv-zicsr.h"
|
||||
#include "src/codegen/riscv/extension-riscv-zifencei.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#include "src/objects/contexts.h"
|
||||
#include "src/objects/smi.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define DEBUG_PRINTF(...) \
|
||||
if (FLAG_riscv_debug) { \
|
||||
printf(__VA_ARGS__); \
|
||||
}
|
||||
|
||||
class SafepointTableBuilder;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Machine instruction Operands.
|
||||
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||
constexpr uintptr_t kSmiShiftMask = (1UL << kSmiShift) - 1;
|
||||
// Class Operand represents a shifter operand in data processing instructions.
|
||||
class Operand {
|
||||
public:
|
||||
// Immediate.
|
||||
V8_INLINE explicit Operand(intptr_t immediate,
|
||||
RelocInfo::Mode rmode = RelocInfo::NO_INFO)
|
||||
: rm_(no_reg), rmode_(rmode) {
|
||||
value_.immediate = immediate;
|
||||
}
|
||||
V8_INLINE explicit Operand(const ExternalReference& f)
|
||||
: rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
|
||||
value_.immediate = static_cast<intptr_t>(f.address());
|
||||
}
|
||||
|
||||
explicit Operand(Handle<HeapObject> handle);
|
||||
V8_INLINE explicit Operand(Smi value)
|
||||
: rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
|
||||
value_.immediate = static_cast<intptr_t>(value.ptr());
|
||||
}
|
||||
|
||||
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
|
||||
static Operand EmbeddedStringConstant(const StringConstantBase* str);
|
||||
|
||||
// Register.
|
||||
V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
|
||||
|
||||
// Return true if this is a register operand.
|
||||
V8_INLINE bool is_reg() const { return rm_.is_valid(); }
|
||||
inline intptr_t immediate() const {
|
||||
DCHECK(!is_reg());
|
||||
DCHECK(!IsHeapObjectRequest());
|
||||
return value_.immediate;
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.is_valid(); }
|
||||
|
||||
HeapObjectRequest heap_object_request() const {
|
||||
DCHECK(IsHeapObjectRequest());
|
||||
return value_.heap_object_request;
|
||||
}
|
||||
|
||||
bool IsHeapObjectRequest() const {
|
||||
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
|
||||
DCHECK_IMPLIES(is_heap_object_request_,
|
||||
rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
|
||||
rmode_ == RelocInfo::CODE_TARGET);
|
||||
return is_heap_object_request_;
|
||||
}
|
||||
|
||||
Register rm() const { return rm_; }
|
||||
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
private:
|
||||
Register rm_;
|
||||
union Value {
|
||||
Value() {}
|
||||
HeapObjectRequest heap_object_request; // if is_heap_object_request_
|
||||
intptr_t immediate; // otherwise
|
||||
} value_; // valid if rm_ == no_reg
|
||||
bool is_heap_object_request_ = false;
|
||||
RelocInfo::Mode rmode_;
|
||||
|
||||
friend class Assembler;
|
||||
friend class MacroAssembler;
|
||||
};
|
||||
|
||||
// On RISC-V we have only one addressing mode with base_reg + offset.
|
||||
// Class MemOperand represents a memory operand in load and store instructions.
|
||||
class V8_EXPORT_PRIVATE MemOperand : public Operand {
|
||||
public:
|
||||
// Immediate value attached to offset.
|
||||
enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
|
||||
|
||||
explicit MemOperand(Register rn, int32_t offset = 0);
|
||||
explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
|
||||
OffsetAddend offset_addend = offset_zero);
|
||||
int32_t offset() const { return offset_; }
|
||||
|
||||
void set_offset(int32_t offset) { offset_ = offset; }
|
||||
|
||||
bool OffsetIsInt12Encodable() const { return is_int12(offset_); }
|
||||
|
||||
private:
|
||||
int32_t offset_;
|
||||
|
||||
friend class Assembler;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
|
||||
public AssemblerRISCVI,
|
||||
public AssemblerRISCVA,
|
||||
public AssemblerRISCVF,
|
||||
public AssemblerRISCVD,
|
||||
public AssemblerRISCVM,
|
||||
public AssemblerRISCVC,
|
||||
public AssemblerRISCVZifencei,
|
||||
public AssemblerRISCVZicsr,
|
||||
public AssemblerRISCVV {
|
||||
public:
|
||||
// Create an assembler. Instructions and relocation information are emitted
|
||||
// into a buffer, with the instructions starting from the beginning and the
|
||||
// relocation information starting from the end of the buffer. See CodeDesc
|
||||
// for a detailed comment on the layout (globals.h).
|
||||
//
|
||||
// If the provided buffer is nullptr, the assembler allocates and grows its
|
||||
// own buffer. Otherwise it takes ownership of the provided buffer.
|
||||
explicit Assembler(const AssemblerOptions&,
|
||||
std::unique_ptr<AssemblerBuffer> = {});
|
||||
|
||||
virtual ~Assembler();
|
||||
void AbortedCodeGeneration();
|
||||
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
|
||||
static constexpr int kNoHandlerTable = 0;
|
||||
static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
|
||||
void GetCode(Isolate* isolate, CodeDesc* desc,
|
||||
SafepointTableBuilder* safepoint_table_builder,
|
||||
int handler_table_offset);
|
||||
|
||||
// Convenience wrapper for code without safepoint or handler tables.
|
||||
void GetCode(Isolate* isolate, CodeDesc* desc) {
|
||||
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
|
||||
}
|
||||
|
||||
// Unused on this architecture.
|
||||
void MaybeEmitOutOfLineConstantPool() {}
|
||||
|
||||
// Label operations & relative jumps (PPUM Appendix D).
|
||||
//
|
||||
// Takes a branch opcode (cc) and a label (L) and generates
|
||||
// either a backward branch or a forward branch and links it
|
||||
// to the label fixup chain. Usage:
|
||||
//
|
||||
// Label L; // unbound label
|
||||
// j(cc, &L); // forward branch to unbound label
|
||||
// bind(&L); // bind label to the current pc
|
||||
// j(cc, &L); // backward branch to bound label
|
||||
// bind(&L); // illegal: a label may be bound only once
|
||||
//
|
||||
// Note: The same Label can be used for forward and backward branches
|
||||
// but it may be bound only once.
|
||||
void bind(Label* L); // Binds an unbound label L to current code position.
|
||||
|
||||
// Determines if Label is bound and near enough so that branch instruction
|
||||
// can be used to reach it, instead of jump instruction.
|
||||
bool is_near(Label* L);
|
||||
bool is_near(Label* L, OffsetSize bits);
|
||||
bool is_near_branch(Label* L);
|
||||
|
||||
// Get offset from instr.
|
||||
int BranchOffset(Instr instr);
|
||||
static int BrachlongOffset(Instr auipc, Instr jalr);
|
||||
static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I,
|
||||
int32_t offset);
|
||||
|
||||
// Returns the branch offset to the given label from the current code
|
||||
// position. Links the label to the current position if it is still unbound.
|
||||
// Manages the jump elimination optimization if the second parameter is true.
|
||||
virtual int32_t branch_offset_helper(Label* L, OffsetSize bits);
|
||||
uintptr_t jump_address(Label* L);
|
||||
int32_t branch_long_offset(Label* L);
|
||||
|
||||
// Puts a labels target address at the given position.
|
||||
// The high 8 bits are set to zero.
|
||||
void label_at_put(Label* L, int at_offset);
|
||||
|
||||
// Read/Modify the code target address in the branch/call instruction at pc.
|
||||
// The isolate argument is unused (and may be nullptr) when skipping flushing.
|
||||
static Address target_address_at(Address pc);
|
||||
V8_INLINE static void set_target_address_at(
|
||||
Address pc, Address target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
|
||||
set_target_value_at(pc, target, icache_flush_mode);
|
||||
}
|
||||
|
||||
static Address target_address_at(Address pc, Address constant_pool);
|
||||
|
||||
static void set_target_address_at(
|
||||
Address pc, Address constant_pool, Address target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
||||
|
||||
// Read/Modify the code target address in the branch/call instruction at pc.
|
||||
inline static Tagged_t target_compressed_address_at(Address pc,
|
||||
Address constant_pool);
|
||||
inline static void set_target_compressed_address_at(
|
||||
Address pc, Address constant_pool, Tagged_t target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
||||
|
||||
inline Handle<Object> code_target_object_handle_at(Address pc,
|
||||
Address constant_pool);
|
||||
inline Handle<HeapObject> compressed_embedded_object_handle_at(
|
||||
Address pc, Address constant_pool);
|
||||
|
||||
static bool IsConstantPoolAt(Instruction* instr);
|
||||
static int ConstantPoolSizeAt(Instruction* instr);
|
||||
// See Assembler::CheckConstPool for more info.
|
||||
void EmitPoolGuard();
|
||||
|
||||
static void set_target_value_at(
|
||||
Address pc, uintptr_t target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
||||
|
||||
static void JumpLabelToJumpRegister(Address pc);
|
||||
|
||||
// This sets the branch destination (which gets loaded at the call address).
|
||||
// This is for calls and branches within generated code. The serializer
|
||||
// has already deserialized the lui/ori instructions etc.
|
||||
inline static void deserialization_set_special_target_at(
|
||||
Address instruction_payload, Code code, Address target);
|
||||
|
||||
// Get the size of the special target encoded at 'instruction_payload'.
|
||||
inline static int deserialization_special_target_size(
|
||||
Address instruction_payload);
|
||||
|
||||
// This sets the internal reference at the pc.
|
||||
inline static void deserialization_set_target_internal_reference_at(
|
||||
Address pc, Address target,
|
||||
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
|
||||
|
||||
// Here we are patching the address in the LUI/ADDI instruction pair.
|
||||
// These values are used in the serialization process and must be zero for
|
||||
// RISC-V platform, as Code, Embedded Object or External-reference pointers
|
||||
// are split across two consecutive instructions and don't exist separately
|
||||
// in the code, so the serializer should not step forwards in memory after
|
||||
// a target is resolved and written.
|
||||
static constexpr int kSpecialTargetSize = 0;
|
||||
|
||||
// Number of consecutive instructions used to store 32bit/64bit constant.
|
||||
// This constant was used in RelocInfo::target_address_address() function
|
||||
// to tell serializer address of the instruction that follows
|
||||
// LUI/ADDI instruction pair.
|
||||
static constexpr int kInstructionsFor32BitConstant = 2;
|
||||
static constexpr int kInstructionsFor64BitConstant = 8;
|
||||
|
||||
// Difference between address of current opcode and value read from pc
|
||||
// register.
|
||||
static constexpr int kPcLoadDelta = 4;
|
||||
|
||||
// Bits available for offset field in branches
|
||||
static constexpr int kBranchOffsetBits = 13;
|
||||
|
||||
// Bits available for offset field in jump
|
||||
static constexpr int kJumpOffsetBits = 21;
|
||||
|
||||
// Bits available for offset field in compresed jump
|
||||
static constexpr int kCJalOffsetBits = 12;
|
||||
|
||||
// Bits available for offset field in compressed branch
|
||||
static constexpr int kCBranchOffsetBits = 9;
|
||||
|
||||
// Max offset for b instructions with 12-bit offset field (multiple of 2)
|
||||
static constexpr int kMaxBranchOffset = (1 << (13 - 1)) - 1;
|
||||
|
||||
// Max offset for jal instruction with 20-bit offset field (multiple of 2)
|
||||
static constexpr int kMaxJumpOffset = (1 << (21 - 1)) - 1;
|
||||
|
||||
static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
|
||||
|
||||
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Code generation.
|
||||
|
||||
// Insert the smallest number of nop instructions
|
||||
// possible to align the pc offset to a multiple
|
||||
// of m. m must be a power of 2 (>= 4).
|
||||
void Align(int m);
|
||||
// Insert the smallest number of zero bytes possible to align the pc offset
|
||||
// to a mulitple of m. m must be a power of 2 (>= 2).
|
||||
void DataAlign(int m);
|
||||
// Aligns code to something that's optimal for a jump target for the platform.
|
||||
void CodeTargetAlign();
|
||||
void LoopHeaderAlign() { CodeTargetAlign(); }
|
||||
|
||||
// Different nop operations are used by the code generator to detect certain
|
||||
// states of the generated code.
|
||||
enum NopMarkerTypes {
|
||||
NON_MARKING_NOP = 0,
|
||||
DEBUG_BREAK_NOP,
|
||||
// IC markers.
|
||||
PROPERTY_ACCESS_INLINED,
|
||||
PROPERTY_ACCESS_INLINED_CONTEXT,
|
||||
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
|
||||
// Helper values.
|
||||
LAST_CODE_MARKER,
|
||||
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
|
||||
};
|
||||
|
||||
void NOP();
|
||||
void EBREAK();
|
||||
|
||||
// Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA)
|
||||
void nop();
|
||||
void RV_li(Register rd, intptr_t imm);
|
||||
// Returns the number of instructions required to load the immediate
|
||||
static int li_estimate(intptr_t imm, bool is_get_temp_reg = false);
|
||||
// Loads an immediate, always using 8 instructions, regardless of the value,
|
||||
// so that it can be modified later.
|
||||
void li_constant(Register rd, intptr_t imm);
|
||||
void li_ptr(Register rd, intptr_t imm);
|
||||
|
||||
void break_(uint32_t code, bool break_as_stop = false);
|
||||
void stop(uint32_t code = kMaxStopCode);
|
||||
|
||||
// Check the code size generated from label to here.
|
||||
int SizeOfCodeGeneratedSince(Label* label) {
|
||||
return pc_offset() - label->pos();
|
||||
}
|
||||
|
||||
// Check the number of instructions generated from label to here.
|
||||
int InstructionsGeneratedSince(Label* label) {
|
||||
return SizeOfCodeGeneratedSince(label) / kInstrSize;
|
||||
}
|
||||
|
||||
using BlockConstPoolScope = ConstantPool::BlockScope;
|
||||
// Class for scoping postponing the trampoline pool generation.
|
||||
class BlockTrampolinePoolScope {
|
||||
public:
|
||||
explicit BlockTrampolinePoolScope(Assembler* assem, int margin = 0)
|
||||
: assem_(assem) {
|
||||
assem_->StartBlockTrampolinePool();
|
||||
}
|
||||
|
||||
explicit BlockTrampolinePoolScope(Assembler* assem, PoolEmissionCheck check)
|
||||
: assem_(assem) {
|
||||
assem_->StartBlockTrampolinePool();
|
||||
}
|
||||
~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
|
||||
|
||||
private:
|
||||
Assembler* assem_;
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
|
||||
};
|
||||
|
||||
// Class for postponing the assembly buffer growth. Typically used for
|
||||
// sequences of instructions that must be emitted as a unit, before
|
||||
// buffer growth (and relocation) can occur.
|
||||
// This blocking scope is not nestable.
|
||||
class BlockGrowBufferScope {
|
||||
public:
|
||||
explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
|
||||
assem_->StartBlockGrowBuffer();
|
||||
}
|
||||
~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
|
||||
|
||||
private:
|
||||
Assembler* assem_;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
|
||||
};
|
||||
|
||||
// Record a deoptimization reason that can be used by a log or cpu profiler.
|
||||
// Use --trace-deopt to enable.
|
||||
void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
|
||||
SourcePosition position, int id);
|
||||
|
||||
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
|
||||
intptr_t pc_delta);
|
||||
static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
|
||||
intptr_t pc_delta);
|
||||
|
||||
// Writes a single byte or word of data in the code stream. Used for
|
||||
// inline tables, e.g., jump-tables.
|
||||
void db(uint8_t data);
|
||||
void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
|
||||
void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
|
||||
void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
|
||||
dq(data, rmode);
|
||||
}
|
||||
void dd(Label* label);
|
||||
|
||||
Instruction* pc() const { return reinterpret_cast<Instruction*>(pc_); }
|
||||
|
||||
// Postpone the generation of the trampoline pool for the specified number of
|
||||
// instructions.
|
||||
void BlockTrampolinePoolFor(int instructions);
|
||||
|
||||
// Check if there is less than kGap bytes available in the buffer.
|
||||
// If this is the case, we need to grow the buffer before emitting
|
||||
// an instruction or relocation information.
|
||||
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
|
||||
|
||||
// Get the number of bytes available in the buffer.
|
||||
inline intptr_t available_space() const {
|
||||
return reloc_info_writer.pos() - pc_;
|
||||
}
|
||||
|
||||
// Read/patch instructions.
|
||||
static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
|
||||
static void instr_at_put(Address pc, Instr instr) {
|
||||
*reinterpret_cast<Instr*>(pc) = instr;
|
||||
}
|
||||
Instr instr_at(int pos) {
|
||||
return *reinterpret_cast<Instr*>(buffer_start_ + pos);
|
||||
}
|
||||
void instr_at_put(int pos, Instr instr) {
|
||||
*reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
|
||||
}
|
||||
|
||||
void instr_at_put(int pos, ShortInstr instr) {
|
||||
*reinterpret_cast<ShortInstr*>(buffer_start_ + pos) = instr;
|
||||
}
|
||||
|
||||
Address toAddress(int pos) {
|
||||
return reinterpret_cast<Address>(buffer_start_ + pos);
|
||||
}
|
||||
|
||||
void CheckTrampolinePool();
|
||||
|
||||
// Get the code target object for a pc-relative call or jump.
|
||||
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
|
||||
Address pc_) const;
|
||||
|
||||
inline int UnboundLabelsCount() { return unbound_labels_count_; }
|
||||
|
||||
using BlockPoolsScope = BlockTrampolinePoolScope;
|
||||
|
||||
void RecordConstPool(int size);
|
||||
|
||||
void ForceConstantPoolEmissionWithoutJump() {
|
||||
constpool_.Check(Emission::kForced, Jump::kOmitted);
|
||||
}
|
||||
void ForceConstantPoolEmissionWithJump() {
|
||||
constpool_.Check(Emission::kForced, Jump::kRequired);
|
||||
}
|
||||
// Check if the const pool needs to be emitted while pretending that {margin}
|
||||
// more bytes of instructions have already been emitted.
|
||||
void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
|
||||
constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
|
||||
}
|
||||
|
||||
void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
|
||||
constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
|
||||
}
|
||||
|
||||
void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
|
||||
constpool_.RecordEntry(data, rmode);
|
||||
}
|
||||
|
||||
void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
|
||||
constpool_.RecordEntry(data, rmode);
|
||||
}
|
||||
|
||||
void CheckTrampolinePoolQuick(int extra_instructions = 0) {
|
||||
DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(),
|
||||
next_buffer_check_ - extra_instructions * kInstrSize);
|
||||
if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
|
||||
CheckTrampolinePool();
|
||||
}
|
||||
}
|
||||
|
||||
friend class VectorUnit;
|
||||
class VectorUnit {
|
||||
public:
|
||||
inline int32_t sew() const { return 2 ^ (sew_ + 3); }
|
||||
|
||||
inline int32_t vlmax() const {
|
||||
if ((lmul_ & 0b100) != 0) {
|
||||
return (kRvvVLEN / sew()) >> (lmul_ & 0b11);
|
||||
} else {
|
||||
return ((kRvvVLEN << lmul_) / sew());
|
||||
}
|
||||
}
|
||||
|
||||
explicit VectorUnit(Assembler* assm) : assm_(assm) {}
|
||||
|
||||
void set(Register rd, VSew sew, Vlmul lmul) {
|
||||
if (sew != sew_ || lmul != lmul_ || vl != vlmax()) {
|
||||
sew_ = sew;
|
||||
lmul_ = lmul;
|
||||
vl = vlmax();
|
||||
assm_->vsetvlmax(rd, sew_, lmul_);
|
||||
}
|
||||
}
|
||||
|
||||
void set(Register rd, int8_t sew, int8_t lmul) {
|
||||
DCHECK_GE(sew, E8);
|
||||
DCHECK_LE(sew, E64);
|
||||
DCHECK_GE(lmul, m1);
|
||||
DCHECK_LE(lmul, mf2);
|
||||
set(rd, VSew(sew), Vlmul(lmul));
|
||||
}
|
||||
|
||||
void set(FPURoundingMode mode) {
|
||||
if (mode_ != mode) {
|
||||
assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift);
|
||||
assm_->fscsr(kScratchReg);
|
||||
mode_ = mode;
|
||||
}
|
||||
}
|
||||
void set(Register rd, Register rs1, VSew sew, Vlmul lmul) {
|
||||
if (sew != sew_ || lmul != lmul_) {
|
||||
sew_ = sew;
|
||||
lmul_ = lmul;
|
||||
vl = 0;
|
||||
assm_->vsetvli(rd, rs1, sew_, lmul_);
|
||||
}
|
||||
}
|
||||
|
||||
void set(VSew sew, Vlmul lmul) {
|
||||
if (sew != sew_ || lmul != lmul_) {
|
||||
sew_ = sew;
|
||||
lmul_ = lmul;
|
||||
assm_->vsetvl(sew_, lmul_);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
VSew sew_ = E8;
|
||||
Vlmul lmul_ = m1;
|
||||
int32_t vl = 0;
|
||||
Assembler* assm_;
|
||||
FPURoundingMode mode_ = RNE;
|
||||
};
|
||||
|
||||
VectorUnit VU;
|
||||
|
||||
protected:
|
||||
// Readable constants for base and offset adjustment helper, these indicate if
|
||||
// aside from offset, another value like offset + 4 should fit into int16.
|
||||
enum class OffsetAccessType : bool {
|
||||
SINGLE_ACCESS = false,
|
||||
TWO_ACCESSES = true
|
||||
};
|
||||
|
||||
// Determine whether need to adjust base and offset of memroy load/store
|
||||
bool NeedAdjustBaseAndOffset(
|
||||
const MemOperand& src, OffsetAccessType = OffsetAccessType::SINGLE_ACCESS,
|
||||
int second_Access_add_to_offset = 4);
|
||||
|
||||
// Helper function for memory load/store using base register and offset.
|
||||
void AdjustBaseAndOffset(
|
||||
MemOperand* src, Register scratch,
|
||||
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
|
||||
int second_access_add_to_offset = 4);
|
||||
|
||||
inline static void set_target_internal_reference_encoded_at(Address pc,
|
||||
Address target);
|
||||
|
||||
intptr_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
|
||||
|
||||
// Decode branch instruction at pos and return branch target pos.
|
||||
int target_at(int pos, bool is_internal);
|
||||
|
||||
// Patch branch instruction at pos to branch to given branch target pos.
|
||||
void target_at_put(int pos, int target_pos, bool is_internal,
|
||||
bool trampoline = false);
|
||||
|
||||
// Say if we need to relocate with this mode.
|
||||
bool MustUseReg(RelocInfo::Mode rmode);
|
||||
|
||||
// Record reloc info for current pc_.
|
||||
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
|
||||
|
||||
// Block the emission of the trampoline pool before pc_offset.
|
||||
void BlockTrampolinePoolBefore(int pc_offset) {
|
||||
if (no_trampoline_pool_before_ < pc_offset)
|
||||
no_trampoline_pool_before_ = pc_offset;
|
||||
}
|
||||
|
||||
void StartBlockTrampolinePool() {
|
||||
DEBUG_PRINTF("\tStartBlockTrampolinePool\n");
|
||||
trampoline_pool_blocked_nesting_++;
|
||||
}
|
||||
|
||||
void EndBlockTrampolinePool() {
|
||||
trampoline_pool_blocked_nesting_--;
|
||||
DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n",
|
||||
trampoline_pool_blocked_nesting_);
|
||||
if (trampoline_pool_blocked_nesting_ == 0) {
|
||||
CheckTrampolinePoolQuick(1);
|
||||
}
|
||||
}
|
||||
|
||||
bool is_trampoline_pool_blocked() const {
|
||||
return trampoline_pool_blocked_nesting_ > 0;
|
||||
}
|
||||
|
||||
bool has_exception() const { return internal_trampoline_exception_; }
|
||||
|
||||
bool is_trampoline_emitted() const { return trampoline_emitted_; }
|
||||
|
||||
// Temporarily block automatic assembly buffer growth.
|
||||
void StartBlockGrowBuffer() {
|
||||
DCHECK(!block_buffer_growth_);
|
||||
block_buffer_growth_ = true;
|
||||
}
|
||||
|
||||
void EndBlockGrowBuffer() {
|
||||
DCHECK(block_buffer_growth_);
|
||||
block_buffer_growth_ = false;
|
||||
}
|
||||
|
||||
bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
|
||||
|
||||
private:
|
||||
// Avoid overflows for displacements etc.
|
||||
static const int kMaximalBufferSize = 512 * MB;
|
||||
|
||||
// Buffer size and constant pool distance are checked together at regular
|
||||
// intervals of kBufferCheckInterval emitted bytes.
|
||||
static constexpr int kBufferCheckInterval = 1 * KB / 2;
|
||||
|
||||
// Code generation.
|
||||
// The relocation writer's position is at least kGap bytes below the end of
|
||||
// the generated instructions. This is so that multi-instruction sequences do
|
||||
// not have to check for overflow. The same is true for writes of large
|
||||
// relocation info entries.
|
||||
static constexpr int kGap = 64;
|
||||
static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
|
||||
|
||||
// Repeated checking whether the trampoline pool should be emitted is rather
|
||||
// expensive. By default we only check again once a number of instructions
|
||||
// has been generated.
|
||||
static constexpr int kCheckConstIntervalInst = 32;
|
||||
static constexpr int kCheckConstInterval =
|
||||
kCheckConstIntervalInst * kInstrSize;
|
||||
|
||||
int next_buffer_check_; // pc offset of next buffer check.
|
||||
|
||||
// Emission of the trampoline pool may be blocked in some code sequences.
|
||||
int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
|
||||
int no_trampoline_pool_before_; // Block emission before this pc offset.
|
||||
|
||||
// Keep track of the last emitted pool to guarantee a maximal distance.
|
||||
int last_trampoline_pool_end_; // pc offset of the end of the last pool.
|
||||
|
||||
// Automatic growth of the assembly buffer may be blocked for some sequences.
|
||||
bool block_buffer_growth_; // Block growth when true.
|
||||
|
||||
// Relocation information generation.
|
||||
// Each relocation is encoded as a variable size value.
|
||||
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
|
||||
RelocInfoWriter reloc_info_writer;
|
||||
|
||||
// The bound position, before this we cannot do instruction elimination.
|
||||
int last_bound_pos_;
|
||||
|
||||
// Code emission.
|
||||
inline void CheckBuffer();
|
||||
void GrowBuffer();
|
||||
void emit(Instr x);
|
||||
void emit(ShortInstr x);
|
||||
void emit(uint64_t x);
|
||||
template <typename T>
|
||||
inline void EmitHelper(T x);
|
||||
|
||||
static void disassembleInstr(Instr instr);
|
||||
|
||||
// Labels.
|
||||
void print(const Label* L);
|
||||
void bind_to(Label* L, int pos);
|
||||
void next(Label* L, bool is_internal);
|
||||
|
||||
// One trampoline consists of:
|
||||
// - space for trampoline slots,
|
||||
// - space for labels.
|
||||
//
|
||||
// Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
|
||||
// Space for trampoline slots precedes space for labels. Each label is of one
|
||||
// instruction size, so total amount for labels is equal to
|
||||
// label_count * kInstrSize.
|
||||
class Trampoline {
|
||||
public:
|
||||
Trampoline() {
|
||||
start_ = 0;
|
||||
next_slot_ = 0;
|
||||
free_slot_count_ = 0;
|
||||
end_ = 0;
|
||||
}
|
||||
Trampoline(int start, int slot_count) {
|
||||
start_ = start;
|
||||
next_slot_ = start;
|
||||
free_slot_count_ = slot_count;
|
||||
end_ = start + slot_count * kTrampolineSlotsSize;
|
||||
}
|
||||
int start() { return start_; }
|
||||
int end() { return end_; }
|
||||
int take_slot() {
|
||||
int trampoline_slot = kInvalidSlotPos;
|
||||
if (free_slot_count_ <= 0) {
|
||||
// We have run out of space on trampolines.
|
||||
// Make sure we fail in debug mode, so we become aware of each case
|
||||
// when this happens.
|
||||
DCHECK(0);
|
||||
// Internal exception will be caught.
|
||||
} else {
|
||||
trampoline_slot = next_slot_;
|
||||
free_slot_count_--;
|
||||
next_slot_ += kTrampolineSlotsSize;
|
||||
}
|
||||
return trampoline_slot;
|
||||
}
|
||||
|
||||
private:
|
||||
int start_;
|
||||
int end_;
|
||||
int next_slot_;
|
||||
int free_slot_count_;
|
||||
};
|
||||
|
||||
int32_t get_trampoline_entry(int32_t pos);
|
||||
int unbound_labels_count_;
|
||||
// After trampoline is emitted, long branches are used in generated code for
|
||||
// the forward branches whose target offsets could be beyond reach of branch
|
||||
// instruction. We use this information to trigger different mode of
|
||||
// branch instruction generation, where we use jump instructions rather
|
||||
// than regular branch instructions.
|
||||
bool trampoline_emitted_ = false;
|
||||
static constexpr int kInvalidSlotPos = -1;
|
||||
|
||||
// Internal reference positions, required for unbounded internal reference
|
||||
// labels.
|
||||
std::set<intptr_t> internal_reference_positions_;
|
||||
bool is_internal_reference(Label* L) {
|
||||
return internal_reference_positions_.find(L->pos()) !=
|
||||
internal_reference_positions_.end();
|
||||
}
|
||||
|
||||
Trampoline trampoline_;
|
||||
bool internal_trampoline_exception_;
|
||||
|
||||
RegList scratch_register_list_;
|
||||
|
||||
private:
|
||||
ConstantPool constpool_;
|
||||
|
||||
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
|
||||
|
||||
int WriteCodeComments();
|
||||
|
||||
friend class RegExpMacroAssemblerRISCV;
|
||||
friend class RelocInfo;
|
||||
friend class BlockTrampolinePoolScope;
|
||||
friend class EnsureSpace;
|
||||
friend class ConstantPool;
|
||||
};
|
||||
|
||||
class EnsureSpace {
|
||||
public:
|
||||
explicit inline EnsureSpace(Assembler* assembler);
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE UseScratchRegisterScope {
|
||||
public:
|
||||
explicit UseScratchRegisterScope(Assembler* assembler);
|
||||
~UseScratchRegisterScope();
|
||||
|
||||
Register Acquire();
|
||||
bool hasAvailable() const;
|
||||
void Include(const RegList& list) { *available_ |= list; }
|
||||
void Exclude(const RegList& list) {
|
||||
*available_ &= RegList::FromBits(~list.bits());
|
||||
}
|
||||
void Include(const Register& reg1, const Register& reg2 = no_reg) {
|
||||
RegList list({reg1, reg2});
|
||||
Include(list);
|
||||
}
|
||||
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
|
||||
RegList list({reg1, reg2});
|
||||
Exclude(list);
|
||||
}
|
||||
|
||||
private:
|
||||
RegList* available_;
|
||||
RegList old_available_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_ASSEMBLER_RISCV_H_
|
492
src/codegen/riscv/base-assembler-riscv.cc
Normal file
492
src/codegen/riscv/base-assembler-riscv.cc
Normal file
@ -0,0 +1,492 @@
|
||||
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
||||
// All Rights Reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// - Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// - Redistribution in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// - Neither the name of Sun Microsystems or the names of contributors may
|
||||
// be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The original source code covered by the above license above has been
|
||||
// modified significantly by Google Inc.
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
|
||||
#include "src/base/cpu.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// ----- Top-level instruction formats match those in the ISA manual
|
||||
// (R, I, S, B, U, J). These match the formats defined in the compiler
|
||||
void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
|
||||
BaseOpcode opcode, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
|
||||
BaseOpcode opcode, FPURegister rd,
|
||||
FPURegister rs1, FPURegister rs2) {
|
||||
DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
|
||||
BaseOpcode opcode, Register rd,
|
||||
FPURegister rs1, Register rs2) {
|
||||
DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
|
||||
BaseOpcode opcode, FPURegister rd,
|
||||
Register rs1, Register rs2) {
|
||||
DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
|
||||
BaseOpcode opcode, FPURegister rd,
|
||||
FPURegister rs1, Register rs2) {
|
||||
DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
|
||||
BaseOpcode opcode, Register rd,
|
||||
FPURegister rs1, FPURegister rs2) {
|
||||
DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
|
||||
Register rd, Register rs1, Register rs2,
|
||||
Register rs3, FPURoundingMode frm) {
|
||||
DCHECK(is_uint2(funct2) && rd.is_valid() && rs1.is_valid() &&
|
||||
rs2.is_valid() && rs3.is_valid() && is_uint3(frm));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
|
||||
FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2, FPURegister rs3,
|
||||
FPURoundingMode frm) {
|
||||
DCHECK(is_uint2(funct2) && rd.is_valid() && rs1.is_valid() &&
|
||||
rs2.is_valid() && rs3.is_valid() && is_uint3(frm));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrRAtomic(uint8_t funct5, bool aq, bool rl,
|
||||
uint8_t funct3, Register rd,
|
||||
Register rs1, Register rs2) {
|
||||
DCHECK(is_uint5(funct5) && is_uint3(funct3) && rd.is_valid() &&
|
||||
rs1.is_valid() && rs2.is_valid());
|
||||
Instr instr = AMO | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(rl << kRlShift) | (aq << kAqShift) | (funct5 << kFunct5Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrRFrm(uint8_t funct7, BaseOpcode opcode,
|
||||
Register rd, Register rs1, Register rs2,
|
||||
FPURoundingMode frm) {
|
||||
DCHECK(rd.is_valid() && rs1.is_valid() && rs2.is_valid() && is_uint3(frm));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
|
||||
(funct7 << kFunct7Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, Register rs1, int16_t imm12) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
|
||||
(is_uint12(imm12) || is_int12(imm12)));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rd, Register rs1,
|
||||
int16_t imm12) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
|
||||
(is_uint12(imm12) || is_int12(imm12)));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrIShift(bool arithshift, uint8_t funct3,
|
||||
BaseOpcode opcode, Register rd,
|
||||
Register rs1, uint8_t shamt) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
|
||||
is_uint6(shamt));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (shamt << kShamtShift) |
|
||||
(arithshift << kArithShiftShift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrIShiftW(bool arithshift, uint8_t funct3,
|
||||
BaseOpcode opcode, Register rd,
|
||||
Register rs1, uint8_t shamt) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
|
||||
is_uint5(shamt));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
|
||||
(rs1.code() << kRs1Shift) | (shamt << kShamtWShift) |
|
||||
(arithshift << kArithShiftShift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rs1, Register rs2, int16_t imm12) {
|
||||
DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() &&
|
||||
is_int12(imm12));
|
||||
Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
|
||||
(funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
|
||||
(rs2.code() << kRs2Shift) |
|
||||
((imm12 & 0xfe0) << 20); // bits 11-5
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rs1, FPURegister rs2,
|
||||
int16_t imm12) {
|
||||
DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() &&
|
||||
is_int12(imm12));
|
||||
Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
|
||||
(funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
|
||||
(rs2.code() << kRs2Shift) |
|
||||
((imm12 & 0xfe0) << 20); // bits 11-5
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrB(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rs1, Register rs2, int16_t imm13) {
|
||||
DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() &&
|
||||
is_int13(imm13) && ((imm13 & 1) == 0));
|
||||
Instr instr = opcode | ((imm13 & 0x800) >> 4) | // bit 11
|
||||
((imm13 & 0x1e) << 7) | // bits 4-1
|
||||
(funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
|
||||
(rs2.code() << kRs2Shift) |
|
||||
((imm13 & 0x7e0) << 20) | // bits 10-5
|
||||
((imm13 & 0x1000) << 19); // bit 12
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrU(BaseOpcode opcode, Register rd,
|
||||
int32_t imm20) {
|
||||
DCHECK(rd.is_valid() && (is_int20(imm20) || is_uint20(imm20)));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) | (imm20 << kImm20Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrJ(BaseOpcode opcode, Register rd,
|
||||
int32_t imm21) {
|
||||
DCHECK(rd.is_valid() && is_int21(imm21) && ((imm21 & 1) == 0));
|
||||
Instr instr = opcode | (rd.code() << kRdShift) |
|
||||
(imm21 & 0xff000) | // bits 19-12
|
||||
((imm21 & 0x800) << 9) | // bit 11
|
||||
((imm21 & 0x7fe) << 20) | // bits 10-1
|
||||
((imm21 & 0x100000) << 11); // bit 20
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCR(uint8_t funct4, BaseOpcode opcode,
|
||||
Register rd, Register rs2) {
|
||||
DCHECK(is_uint4(funct4) && rd.is_valid() && rs2.is_valid());
|
||||
ShortInstr instr = opcode | (rs2.code() << kRvcRs2Shift) |
|
||||
(rd.code() << kRvcRdShift) | (funct4 << kRvcFunct4Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCA(uint8_t funct6, BaseOpcode opcode,
|
||||
Register rd, uint8_t funct, Register rs2) {
|
||||
DCHECK(is_uint6(funct6) && rd.is_valid() && rs2.is_valid() &&
|
||||
is_uint2(funct));
|
||||
ShortInstr instr = opcode | ((rs2.code() & 0x7) << kRvcRs2sShift) |
|
||||
((rd.code() & 0x7) << kRvcRs1sShift) |
|
||||
(funct6 << kRvcFunct6Shift) | (funct << kRvcFunct2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCI(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, int8_t imm6) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && is_int6(imm6));
|
||||
ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) |
|
||||
(rd.code() << kRvcRdShift) | ((imm6 & 0x20) << 7) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, uint8_t uimm6) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint6(uimm6));
|
||||
ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
|
||||
(rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rd, uint8_t uimm6) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint6(uimm6));
|
||||
ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
|
||||
(rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCIW(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, uint8_t uimm8) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint8(uimm8));
|
||||
ShortInstr instr = opcode | ((uimm8) << 5) |
|
||||
((rd.code() & 0x7) << kRvcRs2sShift) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rs2, uint8_t uimm6) {
|
||||
DCHECK(is_uint3(funct3) && rs2.is_valid() && is_uint6(uimm6));
|
||||
ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rs2, uint8_t uimm6) {
|
||||
DCHECK(is_uint3(funct3) && rs2.is_valid() && is_uint6(uimm6));
|
||||
ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, Register rs1, uint8_t uimm5) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
|
||||
is_uint5(uimm5));
|
||||
ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
|
||||
((rd.code() & 0x7) << kRvcRs2sShift) |
|
||||
((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
|
||||
((rs1.code() & 0x7) << kRvcRs1sShift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rd, Register rs1,
|
||||
uint8_t uimm5) {
|
||||
DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
|
||||
is_uint5(uimm5));
|
||||
ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
|
||||
((rd.code() & 0x7) << kRvcRs2sShift) |
|
||||
((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
|
||||
((rs1.code() & 0x7) << kRvcRs1sShift);
|
||||
emit(instr);
|
||||
}
|
||||
void AssemblerRiscvBase::GenInstrCJ(uint8_t funct3, BaseOpcode opcode,
|
||||
uint16_t uint11) {
|
||||
DCHECK(is_uint11(uint11));
|
||||
ShortInstr instr = opcode | (funct3 << kRvcFunct3Shift) | (uint11 << 2);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rs2, Register rs1, uint8_t uimm5) {
|
||||
DCHECK(is_uint3(funct3) && rs2.is_valid() && rs1.is_valid() &&
|
||||
is_uint5(uimm5));
|
||||
ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
|
||||
((rs2.code() & 0x7) << kRvcRs2sShift) |
|
||||
((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
|
||||
((rs1.code() & 0x7) << kRvcRs1sShift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rs2, Register rs1,
|
||||
uint8_t uimm5) {
|
||||
DCHECK(is_uint3(funct3) && rs2.is_valid() && rs1.is_valid() &&
|
||||
is_uint5(uimm5));
|
||||
ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
|
||||
((rs2.code() & 0x7) << kRvcRs2sShift) |
|
||||
((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
|
||||
((rs1.code() & 0x7) << kRvcRs1sShift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCB(uint8_t funct3, BaseOpcode opcode,
|
||||
Register rs1, uint8_t uimm8) {
|
||||
DCHECK(is_uint3(funct3) && is_uint8(uimm8));
|
||||
ShortInstr instr = opcode | ((uimm8 & 0x1f) << 2) | ((uimm8 & 0xe0) << 5) |
|
||||
((rs1.code() & 0x7) << kRvcRs1sShift) |
|
||||
(funct3 << kRvcFunct3Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCBA(uint8_t funct3, uint8_t funct2,
|
||||
BaseOpcode opcode, Register rs1,
|
||||
int8_t imm6) {
|
||||
DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6));
|
||||
ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) |
|
||||
((rs1.code() & 0x7) << kRvcRs1sShift) |
|
||||
(funct3 << kRvcFunct3Shift) | (funct2 << 10);
|
||||
emit(instr);
|
||||
}
|
||||
// ----- Instruction class templates match those in the compiler
|
||||
|
||||
void AssemblerRiscvBase::GenInstrBranchCC_rri(uint8_t funct3, Register rs1,
|
||||
Register rs2, int16_t imm13) {
|
||||
GenInstrB(funct3, BRANCH, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrLoad_ri(uint8_t funct3, Register rd,
|
||||
Register rs1, int16_t imm12) {
|
||||
GenInstrI(funct3, LOAD, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrStore_rri(uint8_t funct3, Register rs1,
|
||||
Register rs2, int16_t imm12) {
|
||||
GenInstrS(funct3, STORE, rs1, rs2, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALU_ri(uint8_t funct3, Register rd,
|
||||
Register rs1, int16_t imm12) {
|
||||
GenInstrI(funct3, OP_IMM, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrShift_ri(bool arithshift, uint8_t funct3,
|
||||
Register rd, Register rs1,
|
||||
uint8_t shamt) {
|
||||
DCHECK(is_uint6(shamt));
|
||||
GenInstrI(funct3, OP_IMM, rd, rs1, (arithshift << 10) | shamt);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALU_rr(uint8_t funct7, uint8_t funct3,
|
||||
Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrR(funct7, funct3, OP, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCSR_ir(uint8_t funct3, Register rd,
|
||||
ControlStatusReg csr, Register rs1) {
|
||||
GenInstrI(funct3, SYSTEM, rd, rs1, csr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrCSR_ii(uint8_t funct3, Register rd,
|
||||
ControlStatusReg csr, uint8_t imm5) {
|
||||
GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrShiftW_ri(bool arithshift, uint8_t funct3,
|
||||
Register rd, Register rs1,
|
||||
uint8_t shamt) {
|
||||
GenInstrIShiftW(arithshift, funct3, OP_IMM_32, rd, rs1, shamt);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3,
|
||||
Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrR(funct7, funct3, OP_32, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrPriv(uint8_t funct7, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0), rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd,
|
||||
Register rs1, int16_t imm12) {
|
||||
GenInstrI(funct3, LOAD_FP, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrStoreFP_rri(uint8_t funct3, Register rs1,
|
||||
FPURegister rs2, int16_t imm12) {
|
||||
GenInstrS(funct3, STORE_FP, rs1, rs2, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
|
||||
FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
|
||||
FPURegister rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
|
||||
FPURegister rd, FPURegister rs1,
|
||||
Register rs2) {
|
||||
GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
|
||||
Register rd, FPURegister rs1,
|
||||
Register rs2) {
|
||||
GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
|
||||
Register rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
192
src/codegen/riscv/base-assembler-riscv.h
Normal file
192
src/codegen/riscv/base-assembler-riscv.h
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
||||
// All Rights Reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// - Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// - Redistribution in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// - Neither the name of Sun Microsystems or the names of contributors may
|
||||
// be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The original source code covered by the above license above has been
|
||||
// modified significantly by Google Inc.
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_BASE_ASSEMBLER_RISCV_H_
|
||||
#define V8_CODEGEN_RISCV_BASE_ASSEMBLER_RISCV_H_
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/constant-pool.h"
|
||||
#include "src/codegen/external-reference.h"
|
||||
#include "src/codegen/label.h"
|
||||
#include "src/codegen/riscv/constants-riscv.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#include "src/objects/contexts.h"
|
||||
#include "src/objects/smi.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define DEBUG_PRINTF(...) \
|
||||
if (FLAG_riscv_debug) { \
|
||||
printf(__VA_ARGS__); \
|
||||
}
|
||||
|
||||
class SafepointTableBuilder;
|
||||
|
||||
class AssemblerRiscvBase {
|
||||
protected:
|
||||
// Returns the branch offset to the given label from the current code
|
||||
// position. Links the label to the current position if it is still unbound.
|
||||
// Manages the jump elimination optimization if the second parameter is true.
|
||||
enum OffsetSize : int {
|
||||
kOffset21 = 21, // RISCV jal
|
||||
kOffset12 = 12, // RISCV imm12
|
||||
kOffset20 = 20, // RISCV imm20
|
||||
kOffset13 = 13, // RISCV branch
|
||||
kOffset32 = 32, // RISCV auipc + instr_I
|
||||
kOffset11 = 11, // RISCV C_J
|
||||
kOffset9 = 9 // RISCV compressed branch
|
||||
};
|
||||
virtual int32_t branch_offset_helper(Label* L, OffsetSize bits) = 0;
|
||||
|
||||
virtual void emit(Instr x) = 0;
|
||||
virtual void emit(ShortInstr x) = 0;
|
||||
virtual void emit(uint64_t x) = 0;
|
||||
// Instruction generation.
|
||||
|
||||
// ----- Top-level instruction formats match those in the ISA manual
|
||||
// (R, I, S, B, U, J). These match the formats defined in LLVM's
|
||||
// RISCVInstrFormats.td.
|
||||
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
|
||||
Register rs1, Register rs2);
|
||||
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
|
||||
FPURegister rs1, Register rs2);
|
||||
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rd, Register rs1, Register rs2);
|
||||
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
|
||||
FPURegister rd, FPURegister rs1, Register rs2);
|
||||
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
|
||||
FPURegister rs1, FPURegister rs2);
|
||||
void GenInstrR4(uint8_t funct2, BaseOpcode opcode, Register rd, Register rs1,
|
||||
Register rs2, Register rs3, FPURoundingMode frm);
|
||||
void GenInstrR4(uint8_t funct2, BaseOpcode opcode, FPURegister rd,
|
||||
FPURegister rs1, FPURegister rs2, FPURegister rs3,
|
||||
FPURoundingMode frm);
|
||||
void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3,
|
||||
Register rd, Register rs1, Register rs2);
|
||||
void GenInstrRFrm(uint8_t funct7, BaseOpcode opcode, Register rd,
|
||||
Register rs1, Register rs2, FPURoundingMode frm);
|
||||
void GenInstrI(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
|
||||
int16_t imm12);
|
||||
void GenInstrI(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
|
||||
Register rs1, int16_t imm12);
|
||||
void GenInstrIShift(bool arithshift, uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, Register rs1, uint8_t shamt);
|
||||
void GenInstrIShiftW(bool arithshift, uint8_t funct3, BaseOpcode opcode,
|
||||
Register rd, Register rs1, uint8_t shamt);
|
||||
void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
|
||||
int16_t imm12);
|
||||
void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1,
|
||||
FPURegister rs2, int16_t imm12);
|
||||
void GenInstrB(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
|
||||
int16_t imm12);
|
||||
void GenInstrU(BaseOpcode opcode, Register rd, int32_t imm20);
|
||||
void GenInstrJ(BaseOpcode opcode, Register rd, int32_t imm20);
|
||||
void GenInstrCR(uint8_t funct4, BaseOpcode opcode, Register rd, Register rs2);
|
||||
void GenInstrCA(uint8_t funct6, BaseOpcode opcode, Register rd, uint8_t funct,
|
||||
Register rs2);
|
||||
void GenInstrCI(uint8_t funct3, BaseOpcode opcode, Register rd, int8_t imm6);
|
||||
void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, Register rd,
|
||||
uint8_t uimm6);
|
||||
void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
|
||||
uint8_t uimm6);
|
||||
void GenInstrCIW(uint8_t funct3, BaseOpcode opcode, Register rd,
|
||||
uint8_t uimm8);
|
||||
void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
|
||||
uint8_t uimm6);
|
||||
void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, Register rs2,
|
||||
uint8_t uimm6);
|
||||
void GenInstrCL(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
|
||||
uint8_t uimm5);
|
||||
void GenInstrCL(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
|
||||
Register rs1, uint8_t uimm5);
|
||||
void GenInstrCS(uint8_t funct3, BaseOpcode opcode, Register rs2, Register rs1,
|
||||
uint8_t uimm5);
|
||||
void GenInstrCS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
|
||||
Register rs1, uint8_t uimm5);
|
||||
void GenInstrCJ(uint8_t funct3, BaseOpcode opcode, uint16_t uint11);
|
||||
void GenInstrCB(uint8_t funct3, BaseOpcode opcode, Register rs1,
|
||||
uint8_t uimm8);
|
||||
void GenInstrCBA(uint8_t funct3, uint8_t funct2, BaseOpcode opcode,
|
||||
Register rs1, int8_t imm6);
|
||||
|
||||
// ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
|
||||
void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
|
||||
int16_t imm12);
|
||||
void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
|
||||
int16_t imm12);
|
||||
void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
|
||||
int16_t imm12);
|
||||
void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12);
|
||||
void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
|
||||
Register rs1, uint8_t shamt);
|
||||
void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1,
|
||||
Register rs2);
|
||||
void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr,
|
||||
Register rs1);
|
||||
void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr,
|
||||
uint8_t rs1);
|
||||
void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
|
||||
Register rs1, uint8_t shamt);
|
||||
void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
|
||||
Register rs1, Register rs2);
|
||||
void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2);
|
||||
void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
|
||||
int16_t imm12);
|
||||
void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2,
|
||||
int16_t imm12);
|
||||
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
|
||||
FPURegister rs1, FPURegister rs2);
|
||||
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
|
||||
Register rs1, Register rs2);
|
||||
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
|
||||
FPURegister rs1, Register rs2);
|
||||
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
|
||||
FPURegister rs1, Register rs2);
|
||||
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
|
||||
FPURegister rs1, FPURegister rs2);
|
||||
virtual void BlockTrampolinePoolFor(int instructions) = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_BASE_ASSEMBLER_RISCV_H_
|
@ -1,10 +1,8 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
#include "src/codegen/riscv/constants-riscv.h"
|
||||
#include "src/execution/simulator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -144,6 +142,95 @@ int VRegisters::Number(const char* name) {
|
||||
return kInvalidVRegister;
|
||||
}
|
||||
|
||||
bool InstructionBase::IsShortInstruction() const {
|
||||
uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
|
||||
return (FirstByte & 0x03) <= C2;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
int InstructionGetters<T>::RvcRdValue() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcRdShift + kRvcRdBits - 1, kRvcRdShift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
int InstructionGetters<T>::RvcRs2Value() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcRs2Shift + kRvcRs2Bits - 1, kRvcRs2Shift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
int InstructionGetters<T>::RvcRs1sValue() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return 0b1000 + this->Bits(kRvcRs1sShift + kRvcRs1sBits - 1, kRvcRs1sShift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
int InstructionGetters<T>::RvcRs2sValue() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return 0b1000 + this->Bits(kRvcRs2sShift + kRvcRs2sBits - 1, kRvcRs2sShift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline int InstructionGetters<T>::RvcFunct6Value() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcFunct6Shift + kRvcFunct6Bits - 1, kRvcFunct6Shift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline int InstructionGetters<T>::RvcFunct4Value() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcFunct4Shift + kRvcFunct4Bits - 1, kRvcFunct4Shift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline int InstructionGetters<T>::RvcFunct3Value() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcFunct3Shift + kRvcFunct3Bits - 1, kRvcFunct3Shift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline int InstructionGetters<T>::RvcFunct2Value() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcFunct2Shift + kRvcFunct2Bits - 1, kRvcFunct2Shift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline int InstructionGetters<T>::RvcFunct2BValue() const {
|
||||
DCHECK(this->IsShortInstruction());
|
||||
return this->Bits(kRvcFunct2BShift + kRvcFunct2Bits - 1, kRvcFunct2BShift);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
uint32_t InstructionGetters<T>::Rvvzimm() const {
|
||||
if ((this->InstructionBits() &
|
||||
(kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) {
|
||||
uint32_t Bits = this->InstructionBits();
|
||||
uint32_t zimm = Bits & kRvvZimmMask;
|
||||
return zimm >> kRvvZimmShift;
|
||||
} else {
|
||||
DCHECK_EQ(
|
||||
this->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
|
||||
RO_V_VSETIVLI);
|
||||
uint32_t Bits = this->InstructionBits();
|
||||
uint32_t zimm = Bits & kRvvZimmMask;
|
||||
return (zimm >> kRvvZimmShift) & 0x3FF;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
uint32_t InstructionGetters<T>::Rvvuimm() const {
|
||||
DCHECK_EQ(
|
||||
this->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
|
||||
RO_V_VSETIVLI);
|
||||
uint32_t Bits = this->InstructionBits();
|
||||
uint32_t uimm = Bits & kRvvUimmMask;
|
||||
return uimm >> kRvvUimmShift;
|
||||
}
|
||||
|
||||
template class InstructionGetters<InstructionBase>;
|
||||
template class InstructionGetters<SimInstructionBase>;
|
||||
|
||||
InstructionBase::Type InstructionBase::InstructionType() const {
|
||||
if (IsIllegalInstruction()) {
|
||||
return kUnsupported;
|
||||
@ -155,15 +242,21 @@ InstructionBase::Type InstructionBase::InstructionType() const {
|
||||
return kCIWType;
|
||||
case RO_C_FLD:
|
||||
case RO_C_LW:
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
case RO_C_LD:
|
||||
#endif
|
||||
return kCLType;
|
||||
case RO_C_FSD:
|
||||
case RO_C_SW:
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
case RO_C_SD:
|
||||
#endif
|
||||
return kCSType;
|
||||
case RO_C_NOP_ADDI:
|
||||
case RO_C_ADDIW:
|
||||
case RO_C_LI:
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
case RO_C_ADDIW:
|
||||
#endif
|
||||
case RO_C_LUI_ADD:
|
||||
return kCIType;
|
||||
case RO_C_MISC_ALU:
|
||||
@ -179,13 +272,17 @@ InstructionBase::Type InstructionBase::InstructionType() const {
|
||||
case RO_C_SLLI:
|
||||
case RO_C_FLDSP:
|
||||
case RO_C_LWSP:
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
case RO_C_LDSP:
|
||||
#endif
|
||||
return kCIType;
|
||||
case RO_C_JR_MV_ADD:
|
||||
return kCRType;
|
||||
case RO_C_FSDSP:
|
||||
case RO_C_SWSP:
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
case RO_C_SDSP:
|
||||
#endif
|
||||
return kCSSType;
|
||||
default:
|
||||
break;
|
||||
@ -241,5 +338,3 @@ InstructionBase::Type InstructionBase::InstructionType() const {
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
File diff suppressed because it is too large
Load Diff
330
src/codegen/riscv/base-riscv-i.cc
Normal file
330
src/codegen/riscv/base-riscv-i.cc
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/base-riscv-i.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
|
||||
GenInstrU(LUI, rd, imm20);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
|
||||
GenInstrU(AUIPC, rd, imm20);
|
||||
}
|
||||
|
||||
// Jumps
|
||||
|
||||
void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
|
||||
GenInstrJ(JAL, rd, imm21);
|
||||
BlockTrampolinePoolFor(1);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrI(0b000, JALR, rd, rs1, imm12);
|
||||
BlockTrampolinePoolFor(1);
|
||||
}
|
||||
|
||||
// Branches
|
||||
|
||||
void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
|
||||
GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
|
||||
GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
|
||||
GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
|
||||
GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
|
||||
GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
|
||||
GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
|
||||
}
|
||||
|
||||
// Loads
|
||||
|
||||
void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b000, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b001, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b010, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b100, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b101, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
// Stores
|
||||
|
||||
void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
|
||||
GenInstrStore_rri(0b000, base, source, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
|
||||
GenInstrStore_rri(0b001, base, source, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
|
||||
GenInstrStore_rri(0b010, base, source, imm12);
|
||||
}
|
||||
|
||||
// Arithmetic with immediate
|
||||
|
||||
void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrALU_ri(0b000, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrALU_ri(0b010, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrALU_ri(0b011, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrALU_ri(0b100, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrALU_ri(0b110, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrALU_ri(0b111, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
|
||||
GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
|
||||
GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
|
||||
GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
|
||||
}
|
||||
|
||||
// Arithmetic
|
||||
|
||||
void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
// Memory fences
|
||||
|
||||
void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
|
||||
DCHECK(is_uint4(pred) && is_uint4(succ));
|
||||
uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
|
||||
GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::fence_tso() {
|
||||
uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
|
||||
GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12);
|
||||
}
|
||||
|
||||
// Environment call / break
|
||||
|
||||
void AssemblerRISCVI::ecall() {
|
||||
GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 0);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::ebreak() {
|
||||
GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 1);
|
||||
}
|
||||
|
||||
// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
|
||||
// instruction (i.e., it should always trap, if your implementation has invalid
|
||||
// instruction traps).
|
||||
void AssemblerRISCVI::unimp() {
|
||||
GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsBranch(Instr instr) {
|
||||
return (instr & kBaseOpcodeMask) == BRANCH;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsJump(Instr instr) {
|
||||
int Op = instr & kBaseOpcodeMask;
|
||||
return Op == JAL || Op == JALR;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
|
||||
|
||||
bool AssemblerRISCVI::IsJal(Instr instr) {
|
||||
return (instr & kBaseOpcodeMask) == JAL;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsJalr(Instr instr) {
|
||||
return (instr & kBaseOpcodeMask) == JALR;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsLui(Instr instr) {
|
||||
return (instr & kBaseOpcodeMask) == LUI;
|
||||
}
|
||||
bool AssemblerRISCVI::IsAuipc(Instr instr) {
|
||||
return (instr & kBaseOpcodeMask) == AUIPC;
|
||||
}
|
||||
bool AssemblerRISCVI::IsAddi(Instr instr) {
|
||||
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
|
||||
}
|
||||
bool AssemblerRISCVI::IsOri(Instr instr) {
|
||||
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
|
||||
}
|
||||
bool AssemblerRISCVI::IsSlli(Instr instr) {
|
||||
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
|
||||
}
|
||||
|
||||
int AssemblerRISCVI::JumpOffset(Instr instr) {
|
||||
int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
|
||||
(instr & 0xff000) | ((instr & 0x80000000) >> 11);
|
||||
imm21 = imm21 << 11 >> 11;
|
||||
return imm21;
|
||||
}
|
||||
|
||||
int AssemblerRISCVI::JalrOffset(Instr instr) {
|
||||
DCHECK(IsJalr(instr));
|
||||
int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
|
||||
return imm12;
|
||||
}
|
||||
|
||||
int AssemblerRISCVI::AuipcOffset(Instr instr) {
|
||||
DCHECK(IsAuipc(instr));
|
||||
int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
|
||||
return imm20;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsLw(Instr instr) {
|
||||
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
|
||||
}
|
||||
|
||||
int AssemblerRISCVI::LoadOffset(Instr instr) {
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
DCHECK(IsLd(instr));
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
DCHECK(IsLw(instr));
|
||||
#endif
|
||||
int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
|
||||
return imm12;
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
|
||||
bool AssemblerRISCVI::IsAddiw(Instr instr) {
|
||||
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVI::IsLd(Instr instr) {
|
||||
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b110, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoad_ri(0b011, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
|
||||
GenInstrStore_rri(0b011, base, source, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
|
||||
GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
|
||||
GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
|
||||
GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
|
||||
GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
212
src/codegen/riscv/base-riscv-i.h
Normal file
212
src/codegen/riscv/base-riscv-i.h
Normal file
@ -0,0 +1,212 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-i.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_BASE_RISCV_I_H_
|
||||
#define V8_CODEGEN_RISCV_BASE_RISCV_I_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVI : public AssemblerRiscvBase {
|
||||
public:
|
||||
void lui(Register rd, int32_t imm20);
|
||||
void auipc(Register rd, int32_t imm20);
|
||||
|
||||
// Jumps
|
||||
void jal(Register rd, int32_t imm20);
|
||||
void jalr(Register rd, Register rs1, int16_t imm12);
|
||||
|
||||
// Branches
|
||||
void beq(Register rs1, Register rs2, int16_t imm12);
|
||||
void bne(Register rs1, Register rs2, int16_t imm12);
|
||||
void blt(Register rs1, Register rs2, int16_t imm12);
|
||||
void bge(Register rs1, Register rs2, int16_t imm12);
|
||||
void bltu(Register rs1, Register rs2, int16_t imm12);
|
||||
void bgeu(Register rs1, Register rs2, int16_t imm12);
|
||||
// Loads
|
||||
void lb(Register rd, Register rs1, int16_t imm12);
|
||||
void lh(Register rd, Register rs1, int16_t imm12);
|
||||
void lw(Register rd, Register rs1, int16_t imm12);
|
||||
void lbu(Register rd, Register rs1, int16_t imm12);
|
||||
void lhu(Register rd, Register rs1, int16_t imm12);
|
||||
|
||||
// Stores
|
||||
void sb(Register source, Register base, int16_t imm12);
|
||||
void sh(Register source, Register base, int16_t imm12);
|
||||
void sw(Register source, Register base, int16_t imm12);
|
||||
|
||||
// Arithmetic with immediate
|
||||
void addi(Register rd, Register rs1, int16_t imm12);
|
||||
void slti(Register rd, Register rs1, int16_t imm12);
|
||||
void sltiu(Register rd, Register rs1, int16_t imm12);
|
||||
void xori(Register rd, Register rs1, int16_t imm12);
|
||||
void ori(Register rd, Register rs1, int16_t imm12);
|
||||
void andi(Register rd, Register rs1, int16_t imm12);
|
||||
void slli(Register rd, Register rs1, uint8_t shamt);
|
||||
void srli(Register rd, Register rs1, uint8_t shamt);
|
||||
void srai(Register rd, Register rs1, uint8_t shamt);
|
||||
|
||||
// Arithmetic
|
||||
void add(Register rd, Register rs1, Register rs2);
|
||||
void sub(Register rd, Register rs1, Register rs2);
|
||||
void sll(Register rd, Register rs1, Register rs2);
|
||||
void slt(Register rd, Register rs1, Register rs2);
|
||||
void sltu(Register rd, Register rs1, Register rs2);
|
||||
void xor_(Register rd, Register rs1, Register rs2);
|
||||
void srl(Register rd, Register rs1, Register rs2);
|
||||
void sra(Register rd, Register rs1, Register rs2);
|
||||
void or_(Register rd, Register rs1, Register rs2);
|
||||
void and_(Register rd, Register rs1, Register rs2);
|
||||
|
||||
// Other pseudo instructions that are not part of RISCV pseudo assemly
|
||||
void nor(Register rd, Register rs, Register rt) {
|
||||
or_(rd, rs, rt);
|
||||
not_(rd, rd);
|
||||
}
|
||||
|
||||
// Memory fences
|
||||
void fence(uint8_t pred, uint8_t succ);
|
||||
void fence_tso();
|
||||
|
||||
// Environment call / break
|
||||
void ecall();
|
||||
void ebreak();
|
||||
|
||||
void sync() { fence(0b1111, 0b1111); }
|
||||
|
||||
// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
|
||||
// instruction (i.e., it should always trap, if your implementation has
|
||||
// invalid instruction traps).
|
||||
void unimp();
|
||||
|
||||
static int JumpOffset(Instr instr);
|
||||
static int AuipcOffset(Instr instr);
|
||||
static int JalrOffset(Instr instr);
|
||||
static int LoadOffset(Instr instr);
|
||||
|
||||
// Check if an instruction is a branch of some kind.
|
||||
static bool IsBranch(Instr instr);
|
||||
static bool IsNop(Instr instr);
|
||||
static bool IsJump(Instr instr);
|
||||
static bool IsJal(Instr instr);
|
||||
static bool IsJalr(Instr instr);
|
||||
static bool IsLui(Instr instr);
|
||||
static bool IsAuipc(Instr instr);
|
||||
static bool IsAddi(Instr instr);
|
||||
static bool IsOri(Instr instr);
|
||||
static bool IsSlli(Instr instr);
|
||||
static bool IsLw(Instr instr);
|
||||
|
||||
inline int32_t branch_offset(Label* L) {
|
||||
return branch_offset_helper(L, OffsetSize::kOffset13);
|
||||
}
|
||||
inline int32_t jump_offset(Label* L) {
|
||||
return branch_offset_helper(L, OffsetSize::kOffset21);
|
||||
}
|
||||
|
||||
// Branches
|
||||
void beq(Register rs1, Register rs2, Label* L) {
|
||||
beq(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void bne(Register rs1, Register rs2, Label* L) {
|
||||
bne(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void blt(Register rs1, Register rs2, Label* L) {
|
||||
blt(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void bge(Register rs1, Register rs2, Label* L) {
|
||||
bge(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void bltu(Register rs1, Register rs2, Label* L) {
|
||||
bltu(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void bgeu(Register rs1, Register rs2, Label* L) {
|
||||
bgeu(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
|
||||
void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); }
|
||||
void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); }
|
||||
void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); }
|
||||
void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); }
|
||||
void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); }
|
||||
void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); }
|
||||
void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); }
|
||||
void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); }
|
||||
void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); }
|
||||
void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); }
|
||||
void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); }
|
||||
|
||||
void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); }
|
||||
void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); }
|
||||
void bgt(Register rs1, Register rs2, Label* L) {
|
||||
bgt(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); }
|
||||
void ble(Register rs1, Register rs2, Label* L) {
|
||||
ble(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void bgtu(Register rs1, Register rs2, int16_t imm13) {
|
||||
bltu(rs2, rs1, imm13);
|
||||
}
|
||||
void bgtu(Register rs1, Register rs2, Label* L) {
|
||||
bgtu(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
void bleu(Register rs1, Register rs2, int16_t imm13) {
|
||||
bgeu(rs2, rs1, imm13);
|
||||
}
|
||||
void bleu(Register rs1, Register rs2, Label* L) {
|
||||
bleu(rs1, rs2, branch_offset(L));
|
||||
}
|
||||
|
||||
void j(int32_t imm21) { jal(zero_reg, imm21); }
|
||||
void j(Label* L) { j(jump_offset(L)); }
|
||||
void b(Label* L) { j(L); }
|
||||
void jal(int32_t imm21) { jal(ra, imm21); }
|
||||
void jal(Label* L) { jal(jump_offset(L)); }
|
||||
void jr(Register rs) { jalr(zero_reg, rs, 0); }
|
||||
void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); }
|
||||
void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); }
|
||||
void jalr(Register rs) { jalr(ra, rs, 0); }
|
||||
void ret() { jalr(zero_reg, ra, 0); }
|
||||
void call(int32_t offset) {
|
||||
auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11));
|
||||
jalr(ra, ra, offset << 20 >> 20);
|
||||
}
|
||||
|
||||
void mv(Register rd, Register rs) { addi(rd, rs, 0); }
|
||||
void not_(Register rd, Register rs) { xori(rd, rs, -1); }
|
||||
void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); }
|
||||
void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
|
||||
void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); }
|
||||
void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); }
|
||||
void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); }
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
void lwu(Register rd, Register rs1, int16_t imm12);
|
||||
void ld(Register rd, Register rs1, int16_t imm12);
|
||||
void sd(Register source, Register base, int16_t imm12);
|
||||
void addiw(Register rd, Register rs1, int16_t imm12);
|
||||
void slliw(Register rd, Register rs1, uint8_t shamt);
|
||||
void srliw(Register rd, Register rs1, uint8_t shamt);
|
||||
void sraiw(Register rd, Register rs1, uint8_t shamt);
|
||||
void addw(Register rd, Register rs1, Register rs2);
|
||||
void subw(Register rd, Register rs1, Register rs2);
|
||||
void sllw(Register rd, Register rs1, Register rs2);
|
||||
void srlw(Register rd, Register rs1, Register rs2);
|
||||
void sraw(Register rd, Register rs1, Register rs2);
|
||||
void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); }
|
||||
void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); }
|
||||
|
||||
static bool IsAddiw(Instr instr);
|
||||
static bool IsLd(Instr instr);
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_BASE_RISCV_I_H_
|
43
src/codegen/riscv/constant-riscv-a.h
Normal file
43
src/codegen/riscv/constant-riscv-a.h
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_A_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_A_H_
|
||||
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCVA : uint32_t {
|
||||
// RV32A Standard Extension
|
||||
RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift),
|
||||
RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift),
|
||||
RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift),
|
||||
RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift),
|
||||
RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift),
|
||||
RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift),
|
||||
RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift),
|
||||
RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift),
|
||||
RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift),
|
||||
RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift),
|
||||
RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift),
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64A Standard Extension (in addition to RV32A)
|
||||
RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift),
|
||||
RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift),
|
||||
RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift),
|
||||
RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift),
|
||||
RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift),
|
||||
RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift),
|
||||
RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift),
|
||||
RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift),
|
||||
RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift),
|
||||
RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift),
|
||||
RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift),
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_A_H_
|
62
src/codegen/riscv/constant-riscv-c.h
Normal file
62
src/codegen/riscv/constant-riscv-c.h
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
|
||||
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCVC : uint32_t {
|
||||
|
||||
RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift),
|
||||
RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift),
|
||||
RO_C_LW = C0 | (0b010 << kRvcFunct3Shift),
|
||||
RO_C_SW = C0 | (0b110 << kRvcFunct3Shift),
|
||||
RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift),
|
||||
RO_C_LI = C1 | (0b010 << kRvcFunct3Shift),
|
||||
RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
|
||||
RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
|
||||
RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift),
|
||||
RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift),
|
||||
RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift),
|
||||
RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift),
|
||||
RO_C_J = C1 | (0b101 << kRvcFunct3Shift),
|
||||
RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift),
|
||||
RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift),
|
||||
RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift),
|
||||
RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift),
|
||||
RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift),
|
||||
RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift),
|
||||
RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift),
|
||||
RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift),
|
||||
RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift),
|
||||
RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift),
|
||||
RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
|
||||
|
||||
RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift),
|
||||
RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift),
|
||||
RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift),
|
||||
RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
RO_C_LD = C0 | (0b011 << kRvcFunct3Shift),
|
||||
RO_C_SD = C0 | (0b111 << kRvcFunct3Shift),
|
||||
RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift),
|
||||
RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
|
||||
RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift),
|
||||
RO_C_SUBW =
|
||||
C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
|
||||
RO_C_ADDW =
|
||||
C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
|
||||
#endif
|
||||
#ifdef V8_TARGET_ARCH_RISCV32
|
||||
RO_C_FLWSP = C2 | (0b011 << kRvcFunct3Shift),
|
||||
RO_C_FSWSP = C2 | (0b111 << kRvcFunct3Shift),
|
||||
RO_C_FLW = C0 | (0b011 << kRvcFunct3Shift),
|
||||
RO_C_FSW = C0 | (0b111 << kRvcFunct3Shift),
|
||||
#endif
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
|
55
src/codegen/riscv/constant-riscv-d.h
Normal file
55
src/codegen/riscv/constant-riscv-d.h
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_D_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_D_H_
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCVD : uint32_t {
|
||||
// RV32D Standard Extension
|
||||
RO_FLD = LOAD_FP | (0b011 << kFunct3Shift),
|
||||
RO_FSD = STORE_FP | (0b011 << kFunct3Shift),
|
||||
RO_FMADD_D = MADD | (0b01 << kFunct2Shift),
|
||||
RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift),
|
||||
RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift),
|
||||
RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift),
|
||||
RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift),
|
||||
RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift),
|
||||
RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift),
|
||||
RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift),
|
||||
RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
|
||||
RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
|
||||
RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
|
||||
RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
|
||||
RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
|
||||
RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
|
||||
RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
|
||||
RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
|
||||
RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
|
||||
RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
|
||||
(0b00000 << kRs2Shift),
|
||||
RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift),
|
||||
RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift),
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64D Standard Extension (in addition to RV32D)
|
||||
RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift),
|
||||
RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift),
|
||||
RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
|
||||
(0b00000 << kRs2Shift),
|
||||
RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift),
|
||||
RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift),
|
||||
RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) |
|
||||
(0b00000 << kRs2Shift),
|
||||
#endif
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_D_H_
|
51
src/codegen/riscv/constant-riscv-f.h
Normal file
51
src/codegen/riscv/constant-riscv-f.h
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_F_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_F_H_
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCVF : uint32_t {
|
||||
// RV32F Standard Extension
|
||||
RO_FLW = LOAD_FP | (0b010 << kFunct3Shift),
|
||||
RO_FSW = STORE_FP | (0b010 << kFunct3Shift),
|
||||
RO_FMADD_S = MADD | (0b00 << kFunct2Shift),
|
||||
RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift),
|
||||
RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift),
|
||||
RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift),
|
||||
RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift),
|
||||
RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift),
|
||||
RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift),
|
||||
RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift),
|
||||
RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
|
||||
RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
|
||||
RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
|
||||
RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
|
||||
RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
|
||||
RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
|
||||
RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) |
|
||||
(0b00000 << kRs2Shift),
|
||||
RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
|
||||
RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
|
||||
RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
|
||||
RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift),
|
||||
RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift),
|
||||
RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift),
|
||||
RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift),
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64F Standard Extension (in addition to RV32F)
|
||||
RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift),
|
||||
RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift),
|
||||
RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift),
|
||||
RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift),
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_F_H_
|
73
src/codegen/riscv/constant-riscv-i.h
Normal file
73
src/codegen/riscv/constant-riscv-i.h
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_I_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_I_H_
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCV32I : uint32_t {
|
||||
// Note use RO (RiscV Opcode) prefix
|
||||
// RV32I Base Instruction Set
|
||||
RO_LUI = LUI,
|
||||
RO_AUIPC = AUIPC,
|
||||
RO_JAL = JAL,
|
||||
RO_JALR = JALR | (0b000 << kFunct3Shift),
|
||||
RO_BEQ = BRANCH | (0b000 << kFunct3Shift),
|
||||
RO_BNE = BRANCH | (0b001 << kFunct3Shift),
|
||||
RO_BLT = BRANCH | (0b100 << kFunct3Shift),
|
||||
RO_BGE = BRANCH | (0b101 << kFunct3Shift),
|
||||
RO_BLTU = BRANCH | (0b110 << kFunct3Shift),
|
||||
RO_BGEU = BRANCH | (0b111 << kFunct3Shift),
|
||||
RO_LB = LOAD | (0b000 << kFunct3Shift),
|
||||
RO_LH = LOAD | (0b001 << kFunct3Shift),
|
||||
RO_LW = LOAD | (0b010 << kFunct3Shift),
|
||||
RO_LBU = LOAD | (0b100 << kFunct3Shift),
|
||||
RO_LHU = LOAD | (0b101 << kFunct3Shift),
|
||||
RO_SB = STORE | (0b000 << kFunct3Shift),
|
||||
RO_SH = STORE | (0b001 << kFunct3Shift),
|
||||
RO_SW = STORE | (0b010 << kFunct3Shift),
|
||||
RO_ADDI = OP_IMM | (0b000 << kFunct3Shift),
|
||||
RO_SLTI = OP_IMM | (0b010 << kFunct3Shift),
|
||||
RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift),
|
||||
RO_XORI = OP_IMM | (0b100 << kFunct3Shift),
|
||||
RO_ORI = OP_IMM | (0b110 << kFunct3Shift),
|
||||
RO_ANDI = OP_IMM | (0b111 << kFunct3Shift),
|
||||
RO_SLLI = OP_IMM | (0b001 << kFunct3Shift),
|
||||
RO_SRLI = OP_IMM | (0b101 << kFunct3Shift),
|
||||
// RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
|
||||
RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
|
||||
RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
|
||||
RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift),
|
||||
RO_ECALL = SYSTEM | (0b000 << kFunct3Shift),
|
||||
// RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
// RV64I Base Instruction Set (in addition to RV32I)
|
||||
RO_LWU = LOAD | (0b110 << kFunct3Shift),
|
||||
RO_LD = LOAD | (0b011 << kFunct3Shift),
|
||||
RO_SD = STORE | (0b011 << kFunct3Shift),
|
||||
RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift),
|
||||
RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift),
|
||||
RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift),
|
||||
// RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
|
||||
RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
|
||||
RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
|
||||
RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
|
||||
#endif
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_I_H_
|
34
src/codegen/riscv/constant-riscv-m.h
Normal file
34
src/codegen/riscv/constant-riscv-m.h
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_M_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_M_H_
|
||||
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCVM : uint32_t {
|
||||
// RV32M Standard Extension
|
||||
RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64M Standard Extension (in addition to RV32M)
|
||||
RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
|
||||
#endif
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_M_H_
|
493
src/codegen/riscv/constant-riscv-v.h
Normal file
493
src/codegen/riscv/constant-riscv-v.h
Normal file
@ -0,0 +1,493 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_V_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_V_H_
|
||||
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum OpcodeRISCVV : uint32_t {
|
||||
// RVV Extension
|
||||
OP_IVV = OP_V | (0b000 << kFunct3Shift),
|
||||
OP_FVV = OP_V | (0b001 << kFunct3Shift),
|
||||
OP_MVV = OP_V | (0b010 << kFunct3Shift),
|
||||
OP_IVI = OP_V | (0b011 << kFunct3Shift),
|
||||
OP_IVX = OP_V | (0b100 << kFunct3Shift),
|
||||
OP_FVF = OP_V | (0b101 << kFunct3Shift),
|
||||
OP_MVX = OP_V | (0b110 << kFunct3Shift),
|
||||
|
||||
RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
|
||||
RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
|
||||
RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
|
||||
|
||||
// RVV LOAD/STORE
|
||||
RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
|
||||
RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
|
||||
// THE kFunct6Shift is mop
|
||||
RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
|
||||
RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
|
||||
RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
|
||||
RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
|
||||
RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
|
||||
RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
|
||||
RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
|
||||
|
||||
RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
|
||||
RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
|
||||
RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
|
||||
RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
|
||||
RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
|
||||
RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
|
||||
RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
|
||||
|
||||
RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
|
||||
RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
|
||||
RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
|
||||
RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
|
||||
RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
|
||||
RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
|
||||
RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
|
||||
|
||||
RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
|
||||
RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
|
||||
RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
|
||||
RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
|
||||
RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
|
||||
RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
|
||||
RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
|
||||
|
||||
RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
|
||||
RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
|
||||
RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
|
||||
RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
|
||||
RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
|
||||
RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
|
||||
RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
|
||||
|
||||
RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
|
||||
RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
|
||||
RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
|
||||
RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
|
||||
RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
|
||||
RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
|
||||
RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
|
||||
|
||||
// RVV Vector Arithmetic Instruction
|
||||
VADD_FUNCT6 = 0b000000,
|
||||
RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSUB_FUNCT6 = 0b000010,
|
||||
RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VDIVU_FUNCT6 = 0b100000,
|
||||
RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VDIV_FUNCT6 = 0b100001,
|
||||
RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VREMU_FUNCT6 = 0b100010,
|
||||
RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VREM_FUNCT6 = 0b100011,
|
||||
RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMULHU_FUNCT6 = 0b100100,
|
||||
RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMUL_FUNCT6 = 0b100101,
|
||||
RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWMUL_FUNCT6 = 0b111011,
|
||||
RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWMULU_FUNCT6 = 0b111000,
|
||||
RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMULHSU_FUNCT6 = 0b100110,
|
||||
RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMULH_FUNCT6 = 0b100111,
|
||||
RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWADD_FUNCT6 = 0b110001,
|
||||
RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWADDU_FUNCT6 = 0b110000,
|
||||
RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWADDUW_FUNCT6 = 0b110101,
|
||||
RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VCOMPRESS_FUNCT6 = 0b010111,
|
||||
RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSADDU_FUNCT6 = 0b100000,
|
||||
RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSADD_FUNCT6 = 0b100001,
|
||||
RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSSUB_FUNCT6 = 0b100011,
|
||||
RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSSUBU_FUNCT6 = 0b100010,
|
||||
RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VRSUB_FUNCT6 = 0b000011,
|
||||
RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMINU_FUNCT6 = 0b000100,
|
||||
RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMIN_FUNCT6 = 0b000101,
|
||||
RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMAXU_FUNCT6 = 0b000110,
|
||||
RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMAX_FUNCT6 = 0b000111,
|
||||
RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VAND_FUNCT6 = 0b001001,
|
||||
RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VOR_FUNCT6 = 0b001010,
|
||||
RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VXOR_FUNCT6 = 0b001011,
|
||||
RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VRGATHER_FUNCT6 = 0b001100,
|
||||
RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMV_FUNCT6 = 0b010111,
|
||||
RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
RO_V_VMERGE_VI = RO_V_VMV_VI,
|
||||
RO_V_VMERGE_VV = RO_V_VMV_VV,
|
||||
RO_V_VMERGE_VX = RO_V_VMV_VX,
|
||||
|
||||
VMSEQ_FUNCT6 = 0b011000,
|
||||
RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSNE_FUNCT6 = 0b011001,
|
||||
RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSLTU_FUNCT6 = 0b011010,
|
||||
RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSLT_FUNCT6 = 0b011011,
|
||||
RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSLE_FUNCT6 = 0b011101,
|
||||
RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSLEU_FUNCT6 = 0b011100,
|
||||
RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSGTU_FUNCT6 = 0b011110,
|
||||
RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMSGT_FUNCT6 = 0b011111,
|
||||
RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSLIDEUP_FUNCT6 = 0b001110,
|
||||
RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSLIDEDOWN_FUNCT6 = 0b001111,
|
||||
RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSRL_FUNCT6 = 0b101000,
|
||||
RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSRA_FUNCT6 = 0b101001,
|
||||
RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSLL_FUNCT6 = 0b100101,
|
||||
RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VSMUL_FUNCT6 = 0b100111,
|
||||
RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VADC_FUNCT6 = 0b010000,
|
||||
RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMADC_FUNCT6 = 0b010001,
|
||||
RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWXUNARY0_FUNCT6 = 0b010000,
|
||||
VRXUNARY0_FUNCT6 = 0b010000,
|
||||
VMUNARY0_FUNCT6 = 0b010100,
|
||||
|
||||
RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VID_V = 0b10001,
|
||||
|
||||
VXUNARY0_FUNCT6 = 0b010010,
|
||||
RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VWFUNARY0_FUNCT6 = 0b010000,
|
||||
RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VRFUNARY0_FUNCT6 = 0b010000,
|
||||
RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VREDMAXU_FUNCT6 = 0b000110,
|
||||
RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
|
||||
VREDMAX_FUNCT6 = 0b000111,
|
||||
RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VREDMINU_FUNCT6 = 0b000100,
|
||||
RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
|
||||
VREDMIN_FUNCT6 = 0b000101,
|
||||
RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFUNARY0_FUNCT6 = 0b010010,
|
||||
RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift),
|
||||
VFUNARY1_FUNCT6 = 0b010011,
|
||||
RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFCVT_XU_F_V = 0b00000,
|
||||
VFCVT_X_F_V = 0b00001,
|
||||
VFCVT_F_XU_V = 0b00010,
|
||||
VFCVT_F_X_V = 0b00011,
|
||||
VFWCVT_XU_F_V = 0b01000,
|
||||
VFWCVT_X_F_V = 0b01001,
|
||||
VFWCVT_F_XU_V = 0b01010,
|
||||
VFWCVT_F_X_V = 0b01011,
|
||||
VFWCVT_F_F_V = 0b01100,
|
||||
VFNCVT_F_F_W = 0b10100,
|
||||
VFNCVT_X_F_W = 0b10001,
|
||||
VFNCVT_XU_F_W = 0b10000,
|
||||
|
||||
VFCLASS_V = 0b10000,
|
||||
VFSQRT_V = 0b00000,
|
||||
VFRSQRT7_V = 0b00100,
|
||||
VFREC7_V = 0b00101,
|
||||
|
||||
VFADD_FUNCT6 = 0b000000,
|
||||
RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFSUB_FUNCT6 = 0b000010,
|
||||
RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFDIV_FUNCT6 = 0b100000,
|
||||
RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMUL_FUNCT6 = 0b100100,
|
||||
RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
// Vector Widening Floating-Point Add/Subtract Instructions
|
||||
VFWADD_FUNCT6 = 0b110000,
|
||||
RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWSUB_FUNCT6 = 0b110010,
|
||||
RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWADD_W_FUNCT6 = 0b110100,
|
||||
RO_V_VFWADD_W_VV = OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWADD_W_VF = OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWSUB_W_FUNCT6 = 0b110110,
|
||||
RO_V_VFWSUB_W_VV = OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWSUB_W_VF = OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
// Vector Widening Floating-Point Reduction Instructions
|
||||
VFWREDUSUM_FUNCT6 = 0b110001,
|
||||
RO_V_VFWREDUSUM_VV = OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWREDOSUM_FUNCT6 = 0b110011,
|
||||
RO_V_VFWREDOSUM_VV = OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
// Vector Widening Floating-Point Multiply
|
||||
VFWMUL_FUNCT6 = 0b111000,
|
||||
RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMFEQ_FUNCT6 = 0b011000,
|
||||
RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMFNE_FUNCT6 = 0b011100,
|
||||
RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMFLT_FUNCT6 = 0b011011,
|
||||
RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMFLE_FUNCT6 = 0b011001,
|
||||
RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMFGE_FUNCT6 = 0b011111,
|
||||
RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VMFGT_FUNCT6 = 0b011101,
|
||||
RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMAX_FUNCT6 = 0b000110,
|
||||
RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFREDMAX_FUNCT6 = 0b0001111,
|
||||
RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMIN_FUNCT6 = 0b000100,
|
||||
RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFSGNJ_FUNCT6 = 0b001000,
|
||||
RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFSGNJN_FUNCT6 = 0b001001,
|
||||
RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFSGNJX_FUNCT6 = 0b001010,
|
||||
RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMADD_FUNCT6 = 0b101000,
|
||||
RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFNMADD_FUNCT6 = 0b101001,
|
||||
RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMSUB_FUNCT6 = 0b101010,
|
||||
RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFNMSUB_FUNCT6 = 0b101011,
|
||||
RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMACC_FUNCT6 = 0b101100,
|
||||
RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFNMACC_FUNCT6 = 0b101101,
|
||||
RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFMSAC_FUNCT6 = 0b101110,
|
||||
RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFNMSAC_FUNCT6 = 0b101111,
|
||||
RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
// Vector Widening Floating-Point Fused Multiply-Add Instructions
|
||||
VFWMACC_FUNCT6 = 0b111100,
|
||||
RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWNMACC_FUNCT6 = 0b111101,
|
||||
RO_V_VFWNMACC_VV = OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWNMACC_VF = OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWMSAC_FUNCT6 = 0b111110,
|
||||
RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VFWNMSAC_FUNCT6 = 0b111111,
|
||||
RO_V_VFWNMSAC_VV = OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VFWNMSAC_VF = OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VNCLIP_FUNCT6 = 0b101111,
|
||||
RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
|
||||
|
||||
VNCLIPU_FUNCT6 = 0b101110,
|
||||
RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
|
||||
RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_V_H_
|
30
src/codegen/riscv/constant-riscv-zicsr.h
Normal file
30
src/codegen/riscv/constant-riscv-zicsr.h
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
|
||||
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
// RISCV CSR related bit mask and shift
|
||||
const int kFcsrFlagsBits = 5;
|
||||
const uint32_t kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1;
|
||||
const int kFcsrFrmBits = 3;
|
||||
const int kFcsrFrmShift = kFcsrFlagsBits;
|
||||
const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
|
||||
const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
|
||||
const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
|
||||
|
||||
enum OpcodeRISCVZICSR : uint32_t {
|
||||
// RV32/RV64 Zicsr Standard Extension
|
||||
RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift),
|
||||
RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift),
|
||||
RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift),
|
||||
RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift),
|
||||
RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift),
|
||||
RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift),
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
|
15
src/codegen/riscv/constant-riscv-zifencei.h
Normal file
15
src/codegen/riscv/constant-riscv-zifencei.h
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
|
||||
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
enum OpcodeRISCVIFENCEI : uint32_t {
|
||||
RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift),
|
||||
};
|
||||
}
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
|
20
src/codegen/riscv/constants-riscv.h
Normal file
20
src/codegen/riscv/constants-riscv.h
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_
|
||||
#define V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_
|
||||
#include "src/codegen/riscv/base-constants-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-a.h"
|
||||
#include "src/codegen/riscv/constant-riscv-c.h"
|
||||
#include "src/codegen/riscv/constant-riscv-d.h"
|
||||
#include "src/codegen/riscv/constant-riscv-f.h"
|
||||
#include "src/codegen/riscv/constant-riscv-i.h"
|
||||
#include "src/codegen/riscv/constant-riscv-m.h"
|
||||
#include "src/codegen/riscv/constant-riscv-v.h"
|
||||
#include "src/codegen/riscv/constant-riscv-zicsr.h"
|
||||
#include "src/codegen/riscv/constant-riscv-zifencei.h"
|
||||
namespace v8 {
|
||||
namespace internal {} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_
|
@ -7,8 +7,6 @@
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
|
||||
#include "src/codegen/cpu-features.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -28,5 +26,3 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
121
src/codegen/riscv/extension-riscv-a.cc
Normal file
121
src/codegen/riscv/extension-riscv-a.cc
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-a.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// RV32A Standard Extension
|
||||
void AssemblerRISCVA::lr_w(bool aq, bool rl, Register rd, Register rs1) {
|
||||
GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::sc_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoswap_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoadd_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoxor_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoand_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoor_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amomin_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amomax_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amominu_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amomaxu_w(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
// RV64A Standard Extension (in addition to RV32A)
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVA::lr_d(bool aq, bool rl, Register rd, Register rs1) {
|
||||
GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::sc_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoswap_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoadd_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoxor_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoand_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amoor_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amomin_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amomax_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amominu_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVA::amomaxu_d(bool aq, bool rl, Register rd, Register rs1,
|
||||
Register rs2) {
|
||||
GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
#endif
|
||||
} // namespace internal
|
||||
} // namespace v8
|
45
src/codegen/riscv/extension-riscv-a.h
Normal file
45
src/codegen/riscv/extension-riscv-a.h
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-a.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_A_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_A_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVA : public AssemblerRiscvBase {
|
||||
// RV32A Standard Extension
|
||||
public:
|
||||
void lr_w(bool aq, bool rl, Register rd, Register rs1);
|
||||
void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64A Standard Extension (in addition to RV32A)
|
||||
void lr_d(bool aq, bool rl, Register rd, Register rs1);
|
||||
void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
|
||||
#endif
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_A_H_
|
276
src/codegen/riscv/extension-riscv-c.cc
Normal file
276
src/codegen/riscv/extension-riscv-c.cc
Normal file
@ -0,0 +1,276 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-c.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
// RV64C Standard Extension
|
||||
void AssemblerRISCVC::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); }
|
||||
|
||||
void AssemblerRISCVC::c_addi(Register rd, int8_t imm6) {
|
||||
DCHECK(rd != zero_reg && imm6 != 0);
|
||||
GenInstrCI(0b000, C1, rd, imm6);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVC::c_addiw(Register rd, int8_t imm6) {
|
||||
DCHECK(rd != zero_reg);
|
||||
GenInstrCI(0b001, C1, rd, imm6);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerRISCVC::c_addi16sp(int16_t imm10) {
|
||||
DCHECK(is_int10(imm10) && (imm10 & 0xf) == 0);
|
||||
uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) |
|
||||
((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) |
|
||||
((imm10 & 0x20) >> 5);
|
||||
GenInstrCIU(0b011, C1, sp, uimm6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_addi4spn(Register rd, int16_t uimm10) {
|
||||
DCHECK(is_uint10(uimm10) && (uimm10 != 0));
|
||||
uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) |
|
||||
((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4);
|
||||
GenInstrCIW(0b000, C0, rd, uimm8);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_li(Register rd, int8_t imm6) {
|
||||
DCHECK(rd != zero_reg);
|
||||
GenInstrCI(0b010, C1, rd, imm6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_lui(Register rd, int8_t imm6) {
|
||||
DCHECK(rd != zero_reg && rd != sp && imm6 != 0);
|
||||
GenInstrCI(0b011, C1, rd, imm6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_slli(Register rd, uint8_t shamt6) {
|
||||
DCHECK(rd != zero_reg && shamt6 != 0);
|
||||
GenInstrCIU(0b000, C2, rd, shamt6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_fldsp(FPURegister rd, uint16_t uimm9) {
|
||||
DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
|
||||
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
|
||||
GenInstrCIU(0b001, C2, rd, uimm6);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVC::c_ldsp(Register rd, uint16_t uimm9) {
|
||||
DCHECK(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0);
|
||||
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
|
||||
GenInstrCIU(0b011, C2, rd, uimm6);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerRISCVC::c_lwsp(Register rd, uint16_t uimm8) {
|
||||
DCHECK(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0);
|
||||
uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
|
||||
GenInstrCIU(0b010, C2, rd, uimm6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_jr(Register rs1) {
|
||||
DCHECK(rs1 != zero_reg);
|
||||
GenInstrCR(0b1000, C2, rs1, zero_reg);
|
||||
BlockTrampolinePoolFor(1);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_mv(Register rd, Register rs2) {
|
||||
DCHECK(rd != zero_reg && rs2 != zero_reg);
|
||||
GenInstrCR(0b1000, C2, rd, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); }
|
||||
|
||||
void AssemblerRISCVC::c_jalr(Register rs1) {
|
||||
DCHECK(rs1 != zero_reg);
|
||||
GenInstrCR(0b1001, C2, rs1, zero_reg);
|
||||
BlockTrampolinePoolFor(1);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_add(Register rd, Register rs2) {
|
||||
DCHECK(rd != zero_reg && rs2 != zero_reg);
|
||||
GenInstrCR(0b1001, C2, rd, rs2);
|
||||
}
|
||||
|
||||
// CA Instructions
|
||||
void AssemblerRISCVC::c_sub(Register rd, Register rs2) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs2.code() & 0b11000) == 0b01000));
|
||||
GenInstrCA(0b100011, C1, rd, 0b00, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_xor(Register rd, Register rs2) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs2.code() & 0b11000) == 0b01000));
|
||||
GenInstrCA(0b100011, C1, rd, 0b01, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_or(Register rd, Register rs2) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs2.code() & 0b11000) == 0b01000));
|
||||
GenInstrCA(0b100011, C1, rd, 0b10, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_and(Register rd, Register rs2) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs2.code() & 0b11000) == 0b01000));
|
||||
GenInstrCA(0b100011, C1, rd, 0b11, rs2);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVC::c_subw(Register rd, Register rs2) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs2.code() & 0b11000) == 0b01000));
|
||||
GenInstrCA(0b100111, C1, rd, 0b00, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_addw(Register rd, Register rs2) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs2.code() & 0b11000) == 0b01000));
|
||||
GenInstrCA(0b100111, C1, rd, 0b01, rs2);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerRISCVC::c_swsp(Register rs2, uint16_t uimm8) {
|
||||
DCHECK(is_uint8(uimm8) && (uimm8 & 0x3) == 0);
|
||||
uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
|
||||
GenInstrCSS(0b110, C2, rs2, uimm6);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVC::c_sdsp(Register rs2, uint16_t uimm9) {
|
||||
DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
|
||||
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
|
||||
GenInstrCSS(0b111, C2, rs2, uimm6);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerRISCVC::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
|
||||
DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
|
||||
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
|
||||
GenInstrCSS(0b101, C2, rs2, uimm6);
|
||||
}
|
||||
|
||||
// CL Instructions
|
||||
|
||||
void AssemblerRISCVC::c_lw(Register rd, Register rs1, uint16_t uimm7) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
|
||||
((uimm7 & 0x3) == 0));
|
||||
uint8_t uimm5 =
|
||||
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
|
||||
GenInstrCL(0b010, C0, rd, rs1, uimm5);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVC::c_ld(Register rd, Register rs1, uint16_t uimm8) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
|
||||
((uimm8 & 0x7) == 0));
|
||||
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
|
||||
GenInstrCL(0b011, C0, rd, rs1, uimm5);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerRISCVC::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
|
||||
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
|
||||
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
|
||||
((uimm8 & 0x7) == 0));
|
||||
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
|
||||
GenInstrCL(0b001, C0, rd, rs1, uimm5);
|
||||
}
|
||||
|
||||
// CS Instructions
|
||||
|
||||
void AssemblerRISCVC::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
|
||||
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
|
||||
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
|
||||
((uimm7 & 0x3) == 0));
|
||||
uint8_t uimm5 =
|
||||
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
|
||||
GenInstrCS(0b110, C0, rs2, rs1, uimm5);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void AssemblerRISCVC::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
|
||||
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
|
||||
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
|
||||
((uimm8 & 0x7) == 0));
|
||||
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
|
||||
GenInstrCS(0b111, C0, rs2, rs1, uimm5);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerRISCVC::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
|
||||
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
|
||||
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
|
||||
((uimm8 & 0x7) == 0));
|
||||
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
|
||||
GenInstrCS(0b101, C0, rs2, rs1, uimm5);
|
||||
}
|
||||
|
||||
// CJ Instructions
|
||||
|
||||
void AssemblerRISCVC::c_j(int16_t imm12) {
|
||||
DCHECK(is_int12(imm12));
|
||||
int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) |
|
||||
((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) |
|
||||
((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) |
|
||||
((imm12 & 0x10) << 5) | (imm12 & 0xe);
|
||||
GenInstrCJ(0b101, C1, uimm11);
|
||||
BlockTrampolinePoolFor(1);
|
||||
}
|
||||
|
||||
// CB Instructions
|
||||
|
||||
void AssemblerRISCVC::c_bnez(Register rs1, int16_t imm9) {
|
||||
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
|
||||
uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
|
||||
((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
|
||||
GenInstrCB(0b111, C1, rs1, uimm8);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_beqz(Register rs1, int16_t imm9) {
|
||||
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
|
||||
uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
|
||||
((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
|
||||
GenInstrCB(0b110, C1, rs1, uimm8);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_srli(Register rs1, int8_t shamt6) {
|
||||
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
|
||||
GenInstrCBA(0b100, 0b00, C1, rs1, shamt6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_srai(Register rs1, int8_t shamt6) {
|
||||
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
|
||||
GenInstrCBA(0b100, 0b01, C1, rs1, shamt6);
|
||||
}
|
||||
|
||||
void AssemblerRISCVC::c_andi(Register rs1, int8_t imm6) {
|
||||
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
|
||||
GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
|
||||
}
|
||||
|
||||
bool AssemblerRISCVC::IsCJal(Instr instr) {
|
||||
return (instr & kRvcOpcodeMask) == RO_C_J;
|
||||
}
|
||||
|
||||
bool AssemblerRISCVC::IsCBranch(Instr instr) {
|
||||
int Op = instr & kRvcOpcodeMask;
|
||||
return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
|
||||
}
|
||||
|
||||
int AssemblerRISCVC::CJumpOffset(Instr instr) {
|
||||
int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) |
|
||||
((instr & 0x40) << 1) | ((instr & 0x80) >> 1) |
|
||||
((instr & 0x100) << 2) | ((instr & 0x600) >> 1) |
|
||||
((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1);
|
||||
imm12 = imm12 << 20 >> 20;
|
||||
return imm12;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
76
src/codegen/riscv/extension-riscv-c.h
Normal file
76
src/codegen/riscv/extension-riscv-c.h
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-c.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_C_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_C_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVC : public AssemblerRiscvBase {
|
||||
// RV64C Standard Extension
|
||||
public:
|
||||
void c_nop();
|
||||
void c_addi(Register rd, int8_t imm6);
|
||||
|
||||
void c_addi16sp(int16_t imm10);
|
||||
void c_addi4spn(Register rd, int16_t uimm10);
|
||||
void c_li(Register rd, int8_t imm6);
|
||||
void c_lui(Register rd, int8_t imm6);
|
||||
void c_slli(Register rd, uint8_t shamt6);
|
||||
void c_lwsp(Register rd, uint16_t uimm8);
|
||||
void c_jr(Register rs1);
|
||||
void c_mv(Register rd, Register rs2);
|
||||
void c_ebreak();
|
||||
void c_jalr(Register rs1);
|
||||
void c_j(int16_t imm12);
|
||||
void c_add(Register rd, Register rs2);
|
||||
void c_sub(Register rd, Register rs2);
|
||||
void c_and(Register rd, Register rs2);
|
||||
void c_xor(Register rd, Register rs2);
|
||||
void c_or(Register rd, Register rs2);
|
||||
void c_swsp(Register rs2, uint16_t uimm8);
|
||||
void c_lw(Register rd, Register rs1, uint16_t uimm7);
|
||||
void c_sw(Register rs2, Register rs1, uint16_t uimm7);
|
||||
void c_bnez(Register rs1, int16_t imm9);
|
||||
void c_beqz(Register rs1, int16_t imm9);
|
||||
void c_srli(Register rs1, int8_t shamt6);
|
||||
void c_srai(Register rs1, int8_t shamt6);
|
||||
void c_andi(Register rs1, int8_t imm6);
|
||||
|
||||
void c_fld(FPURegister rd, Register rs1, uint16_t uimm8);
|
||||
void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
|
||||
void c_fldsp(FPURegister rd, uint16_t uimm9);
|
||||
void c_fsdsp(FPURegister rs2, uint16_t uimm9);
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
void c_ld(Register rd, Register rs1, uint16_t uimm8);
|
||||
void c_sd(Register rs2, Register rs1, uint16_t uimm8);
|
||||
void c_subw(Register rd, Register rs2);
|
||||
void c_addw(Register rd, Register rs2);
|
||||
void c_addiw(Register rd, int8_t imm6);
|
||||
void c_ldsp(Register rd, uint16_t uimm9);
|
||||
void c_sdsp(Register rs2, uint16_t uimm9);
|
||||
#endif
|
||||
|
||||
int CJumpOffset(Instr instr);
|
||||
|
||||
static bool IsCBranch(Instr instr);
|
||||
static bool IsCJal(Instr instr);
|
||||
|
||||
inline int16_t cjump_offset(Label* L) {
|
||||
return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
|
||||
}
|
||||
inline int32_t cbranch_offset(Label* L) {
|
||||
return branch_offset_helper(L, OffsetSize::kOffset9);
|
||||
}
|
||||
|
||||
void c_j(Label* L) { c_j(cjump_offset(L)); }
|
||||
void c_bnez(Register rs1, Label* L) { c_bnez(rs1, cbranch_offset(L)); }
|
||||
void c_beqz(Register rs1, Label* L) { c_beqz(rs1, cbranch_offset(L)); }
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_C_H_
|
165
src/codegen/riscv/extension-riscv-d.cc
Normal file
165
src/codegen/riscv/extension-riscv-d.cc
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-d.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
// RV32D Standard Extension
|
||||
|
||||
void AssemblerRISCVD::fld(FPURegister rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoadFP_ri(0b011, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fsd(FPURegister source, Register base, int16_t imm12) {
|
||||
GenInstrStoreFP_rri(0b011, base, source, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fsqrt_d(FPURegister rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fsgnj_d(FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fsgnjn_d(FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fsgnjx_d(FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_s_d(FPURegister rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1));
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_d_s(FPURegister rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::feq_d(Register rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::flt_d(Register rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fle_d(Register rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fclass_d(Register rd, FPURegister rs1) {
|
||||
GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_w_d(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_wu_d(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1));
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1));
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64D Standard Extension (in addition to RV32D)
|
||||
|
||||
void AssemblerRISCVD::fcvt_l_d(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2));
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_lu_d(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3));
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) {
|
||||
GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_d_l(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2));
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fcvt_d_lu(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3));
|
||||
}
|
||||
|
||||
void AssemblerRISCVD::fmv_d_x(FPURegister rd, Register rs1) {
|
||||
GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
67
src/codegen/riscv/extension-riscv-d.h
Normal file
67
src/codegen/riscv/extension-riscv-d.h
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-d.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_D_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_D_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVD : public AssemblerRiscvBase {
|
||||
// RV32D Standard Extension
|
||||
public:
|
||||
void fld(FPURegister rd, Register rs1, int16_t imm12);
|
||||
void fsd(FPURegister source, Register base, int16_t imm12);
|
||||
void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fsqrt_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fcvt_s_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_d_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void feq_d(Register rd, FPURegister rs1, FPURegister rs2);
|
||||
void flt_d(Register rd, FPURegister rs1, FPURegister rs2);
|
||||
void fle_d(Register rd, FPURegister rs1, FPURegister rs2);
|
||||
void fclass_d(Register rd, FPURegister rs1);
|
||||
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64D Standard Extension (in addition to RV32D)
|
||||
void fcvt_l_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_lu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fmv_x_d(Register rd, FPURegister rs1);
|
||||
void fcvt_d_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_d_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
void fmv_d_x(FPURegister rd, Register rs1);
|
||||
#endif
|
||||
|
||||
void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); }
|
||||
void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); }
|
||||
void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); }
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_D_H_
|
156
src/codegen/riscv/extension-riscv-f.cc
Normal file
156
src/codegen/riscv/extension-riscv-f.cc
Normal file
@ -0,0 +1,156 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-f.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// RV32F Standard Extension
|
||||
|
||||
void AssemblerRISCVF::flw(FPURegister rd, Register rs1, int16_t imm12) {
|
||||
GenInstrLoadFP_ri(0b010, rd, rs1, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fsw(FPURegister source, Register base, int16_t imm12) {
|
||||
GenInstrStoreFP_rri(0b010, base, source, imm12);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b00, MADD, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b00, MSUB, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b00, NMSUB, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm) {
|
||||
GenInstrR4(0b00, NMADD, rd, rs1, rs2, rs3, frm);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0000000, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0000100, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0001000, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0001100, frm, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fsqrt_s(FPURegister rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b0101100, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fsgnj_s(FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010000, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fsgnjn_s(FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010000, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fsgnjx_s(FPURegister rd, FPURegister rs1,
|
||||
FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010000, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010100, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b0010100, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_w_s(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100000, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_wu_s(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1));
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmv_x_w(Register rd, FPURegister rs1) {
|
||||
GenInstrALUFP_rr(0b1110000, 0b000, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::feq_s(Register rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b1010000, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::flt_s(Register rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b1010000, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fle_s(Register rd, FPURegister rs1, FPURegister rs2) {
|
||||
GenInstrALUFP_rr(0b1010000, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fclass_s(Register rd, FPURegister rs1) {
|
||||
GenInstrALUFP_rr(0b1110000, 0b001, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_s_w(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101000, frm, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_s_wu(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1));
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fmv_w_x(FPURegister rd, Register rs1) {
|
||||
GenInstrALUFP_rr(0b1111000, 0b000, rd, rs1, zero_reg);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64F Standard Extension (in addition to RV32F)
|
||||
|
||||
void AssemblerRISCVF::fcvt_l_s(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2));
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_lu_s(Register rd, FPURegister rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3));
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_s_l(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2));
|
||||
}
|
||||
|
||||
void AssemblerRISCVF::fcvt_s_lu(FPURegister rd, Register rs1,
|
||||
FPURoundingMode frm) {
|
||||
GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3));
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
65
src/codegen/riscv/extension-riscv-f.h
Normal file
65
src/codegen/riscv/extension-riscv-f.h
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-f.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_F_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_F_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVF : public AssemblerRiscvBase {
|
||||
// RV32F Standard Extension
|
||||
public:
|
||||
void flw(FPURegister rd, Register rs1, int16_t imm12);
|
||||
void fsw(FPURegister source, Register base, int16_t imm12);
|
||||
void fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURegister rs3, FPURoundingMode frm = RNE);
|
||||
void fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
|
||||
FPURoundingMode frm = RNE);
|
||||
void fsqrt_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
|
||||
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fmv_x_w(Register rd, FPURegister rs1);
|
||||
void feq_s(Register rd, FPURegister rs1, FPURegister rs2);
|
||||
void flt_s(Register rd, FPURegister rs1, FPURegister rs2);
|
||||
void fle_s(Register rd, FPURegister rs1, FPURegister rs2);
|
||||
void fclass_s(Register rd, FPURegister rs1);
|
||||
void fcvt_s_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
void fmv_w_x(FPURegister rd, Register rs1);
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64F Standard Extension (in addition to RV32F)
|
||||
void fcvt_l_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_lu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_s_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
void fcvt_s_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
|
||||
#endif
|
||||
|
||||
void fmv_s(FPURegister rd, FPURegister rs) { fsgnj_s(rd, rs, rs); }
|
||||
void fabs_s(FPURegister rd, FPURegister rs) { fsgnjx_s(rd, rs, rs); }
|
||||
void fneg_s(FPURegister rd, FPURegister rs) { fsgnjn_s(rd, rs, rs); }
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_F_H_
|
66
src/codegen/riscv/extension-riscv-m.cc
Normal file
66
src/codegen/riscv/extension-riscv-m.cc
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-m.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
// RV32M Standard Extension
|
||||
|
||||
void AssemblerRISCVM::mul(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::mulh(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b001, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::mulhsu(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b010, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::mulhu(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b011, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::div(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b100, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::divu(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b101, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::rem(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b110, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::remu(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALU_rr(0b0000001, 0b111, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64M Standard Extension (in addition to RV32M)
|
||||
|
||||
void AssemblerRISCVM::mulw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000001, 0b000, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::divw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000001, 0b100, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::divuw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000001, 0b101, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::remw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000001, 0b110, rd, rs1, rs2);
|
||||
}
|
||||
|
||||
void AssemblerRISCVM::remuw(Register rd, Register rs1, Register rs2) {
|
||||
GenInstrALUW_rr(0b0000001, 0b111, rd, rs1, rs2);
|
||||
}
|
||||
#endif
|
||||
} // namespace internal
|
||||
} // namespace v8
|
36
src/codegen/riscv/extension-riscv-m.h
Normal file
36
src/codegen/riscv/extension-riscv-m.h
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-m.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_M_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_M_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVM : public AssemblerRiscvBase {
|
||||
// RV32M Standard Extension
|
||||
public:
|
||||
void mul(Register rd, Register rs1, Register rs2);
|
||||
void mulh(Register rd, Register rs1, Register rs2);
|
||||
void mulhsu(Register rd, Register rs1, Register rs2);
|
||||
void mulhu(Register rd, Register rs1, Register rs2);
|
||||
void div(Register rd, Register rs1, Register rs2);
|
||||
void divu(Register rd, Register rs1, Register rs2);
|
||||
void rem(Register rd, Register rs1, Register rs2);
|
||||
void remu(Register rd, Register rs1, Register rs2);
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// RV64M Standard Extension (in addition to RV32M)
|
||||
void mulw(Register rd, Register rs1, Register rs2);
|
||||
void divw(Register rd, Register rs1, Register rs2);
|
||||
void divuw(Register rd, Register rs1, Register rs2);
|
||||
void remw(Register rd, Register rs1, Register rs2);
|
||||
void remuw(Register rd, Register rs1, Register rs2);
|
||||
#endif
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_M_H_
|
889
src/codegen/riscv/extension-riscv-v.cc
Normal file
889
src/codegen/riscv/extension-riscv-v.cc
Normal file
@ -0,0 +1,889 @@
|
||||
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/riscv/extension-riscv-v.h"
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/constant-riscv-v.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// RVV
|
||||
|
||||
void AssemblerRISCVV::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask) {
|
||||
GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask) {
|
||||
GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask) {
|
||||
GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask) {
|
||||
GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmv_vv(VRegister vd, VRegister vs1) {
|
||||
GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmv_vx(VRegister vd, Register rs1) {
|
||||
GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmv_vi(VRegister vd, uint8_t simm5) {
|
||||
GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmv_xs(Register rd, VRegister vs2) {
|
||||
GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmv_sx(VRegister vd, Register rs1) {
|
||||
GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
|
||||
GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
|
||||
GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
|
||||
GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
|
||||
GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
|
||||
GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
|
||||
GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
|
||||
GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
|
||||
GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
|
||||
GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask) {
|
||||
DCHECK_NE(vd, vs1);
|
||||
DCHECK_NE(vd, vs2);
|
||||
GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
|
||||
MaskType mask) {
|
||||
DCHECK_NE(vd, vs2);
|
||||
GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
|
||||
MaskType mask) {
|
||||
DCHECK_NE(vd, vs2);
|
||||
GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
|
||||
MaskType mask) {
|
||||
GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vid_v(VRegister vd, MaskType mask) {
|
||||
GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
|
||||
}
|
||||
|
||||
#define DEFINE_OPIVV(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFVV(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFWV(name, funct6) \
|
||||
void AssemblerRISCVV::name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFRED(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPIVX(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPIVI(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, vd, imm5, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPMVV(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
// void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, Register
|
||||
// rs1,
|
||||
// VRegister vs2, MaskType mask = NoMask);
|
||||
#define DEFINE_OPMVX(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFVF(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vf(VRegister vd, VRegister vs2, \
|
||||
FPURegister fs1, MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFWF(name, funct6) \
|
||||
void AssemblerRISCVV::name##_wf(VRegister vd, VRegister vs2, \
|
||||
FPURegister fs1, MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFVV_FMA(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
|
||||
MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
#define DEFINE_OPFVF_FMA(name, funct6) \
|
||||
void AssemblerRISCVV::name##_vf(VRegister vd, FPURegister fs1, \
|
||||
VRegister vs2, MaskType mask) { \
|
||||
GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
|
||||
}
|
||||
|
||||
// vector integer extension
|
||||
#define DEFINE_OPMVV_VIE(name, vs1) \
|
||||
void AssemblerRISCVV::name(VRegister vd, VRegister vs2, MaskType mask) { \
|
||||
GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
|
||||
GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vfmv_fs(FPURegister fd, VRegister vs2) {
|
||||
GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vfmv_sf(VRegister vd, FPURegister fs) {
|
||||
GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
|
||||
}
|
||||
|
||||
DEFINE_OPIVV(vadd, VADD_FUNCT6)
|
||||
DEFINE_OPIVX(vadd, VADD_FUNCT6)
|
||||
DEFINE_OPIVI(vadd, VADD_FUNCT6)
|
||||
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
|
||||
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
|
||||
DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
|
||||
DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
|
||||
DEFINE_OPMVX(vmul, VMUL_FUNCT6)
|
||||
DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
|
||||
DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
|
||||
DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
|
||||
DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
|
||||
DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
|
||||
DEFINE_OPMVV(vmul, VMUL_FUNCT6)
|
||||
DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
|
||||
DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
|
||||
DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
|
||||
DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
|
||||
DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
|
||||
DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
|
||||
DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
|
||||
DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
|
||||
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
|
||||
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
|
||||
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
|
||||
DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
|
||||
DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
|
||||
DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
|
||||
DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
|
||||
DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
|
||||
DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
|
||||
DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
|
||||
DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
|
||||
DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
|
||||
DEFINE_OPIVV(vminu, VMINU_FUNCT6)
|
||||
DEFINE_OPIVX(vminu, VMINU_FUNCT6)
|
||||
DEFINE_OPIVV(vmin, VMIN_FUNCT6)
|
||||
DEFINE_OPIVX(vmin, VMIN_FUNCT6)
|
||||
DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
|
||||
DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
|
||||
DEFINE_OPIVV(vmax, VMAX_FUNCT6)
|
||||
DEFINE_OPIVX(vmax, VMAX_FUNCT6)
|
||||
DEFINE_OPIVV(vand, VAND_FUNCT6)
|
||||
DEFINE_OPIVX(vand, VAND_FUNCT6)
|
||||
DEFINE_OPIVI(vand, VAND_FUNCT6)
|
||||
DEFINE_OPIVV(vor, VOR_FUNCT6)
|
||||
DEFINE_OPIVX(vor, VOR_FUNCT6)
|
||||
DEFINE_OPIVI(vor, VOR_FUNCT6)
|
||||
DEFINE_OPIVV(vxor, VXOR_FUNCT6)
|
||||
DEFINE_OPIVX(vxor, VXOR_FUNCT6)
|
||||
DEFINE_OPIVI(vxor, VXOR_FUNCT6)
|
||||
|
||||
DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
|
||||
DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
|
||||
DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
|
||||
DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
|
||||
DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
|
||||
DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
|
||||
DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
|
||||
DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
|
||||
DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
|
||||
DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
|
||||
DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
|
||||
DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
|
||||
DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
|
||||
DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
|
||||
|
||||
DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
|
||||
DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
|
||||
|
||||
DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
|
||||
DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
|
||||
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
|
||||
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsra, VSRA_FUNCT6)
|
||||
DEFINE_OPIVX(vsra, VSRA_FUNCT6)
|
||||
DEFINE_OPIVI(vsra, VSRA_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
|
||||
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
|
||||
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
|
||||
DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
|
||||
|
||||
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
|
||||
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
|
||||
DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
|
||||
DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
|
||||
DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
|
||||
DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
|
||||
DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
|
||||
DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
|
||||
DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
|
||||
DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
|
||||
DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
|
||||
DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
|
||||
DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
|
||||
DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Add/Subtract Instructions
|
||||
DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
|
||||
DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
|
||||
DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
|
||||
DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
|
||||
DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
|
||||
DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
|
||||
DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
|
||||
DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Reduction Instructions
|
||||
DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
|
||||
DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Multiply
|
||||
DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
|
||||
DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
|
||||
|
||||
DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
|
||||
|
||||
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
|
||||
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
|
||||
DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
|
||||
DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
|
||||
DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
|
||||
DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
|
||||
|
||||
// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
|
||||
DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Fused Multiply-Add Instructions
|
||||
DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
|
||||
|
||||
// Vector Narrowing Fixed-Point Clip Instructions
|
||||
DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
|
||||
DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
|
||||
DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
|
||||
DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
|
||||
DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
|
||||
DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
|
||||
|
||||
// Vector Integer Extension
|
||||
DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
|
||||
DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
|
||||
DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
|
||||
DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
|
||||
DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
|
||||
DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
|
||||
|
||||
#undef DEFINE_OPIVI
|
||||
#undef DEFINE_OPIVV
|
||||
#undef DEFINE_OPIVX
|
||||
#undef DEFINE_OPFVV
|
||||
#undef DEFINE_OPFWV
|
||||
#undef DEFINE_OPFVF
|
||||
#undef DEFINE_OPFWF
|
||||
#undef DEFINE_OPFVV_FMA
|
||||
#undef DEFINE_OPFVF_FMA
|
||||
#undef DEFINE_OPMVV_VIE
|
||||
|
||||
void AssemblerRISCVV::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
|
||||
TailAgnosticType tail, MaskAgnosticType mask) {
|
||||
int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
|
||||
Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
|
||||
((rs1.code() & 0x1F) << kRvvRs1Shift) |
|
||||
(((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vsetivli(Register rd, uint8_t uimm, VSew vsew,
|
||||
Vlmul vlmul, TailAgnosticType tail,
|
||||
MaskAgnosticType mask) {
|
||||
DCHECK(is_uint5(uimm));
|
||||
int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
|
||||
Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
|
||||
((uimm & 0x1F) << kRvvUimmShift) |
|
||||
(((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vsetvl(Register rd, Register rs1, Register rs2) {
|
||||
Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
|
||||
((rs1.code() & 0x1F) << kRvvRs1Shift) |
|
||||
((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
uint8_t vsew_switch(VSew vsew) {
|
||||
uint8_t width;
|
||||
switch (vsew) {
|
||||
case E8:
|
||||
width = 0b000;
|
||||
break;
|
||||
case E16:
|
||||
width = 0b101;
|
||||
break;
|
||||
case E32:
|
||||
width = 0b110;
|
||||
break;
|
||||
default:
|
||||
width = 0b111;
|
||||
break;
|
||||
}
|
||||
return width;
|
||||
}
|
||||
|
||||
// OPIVV OPFVV OPMVV
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
VRegister vd, VRegister vs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((vd.code() & 0x1F) << kRvvVdShift) |
|
||||
((vs1.code() & 0x1F) << kRvvVs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
VRegister vd, int8_t vs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((vd.code() & 0x1F) << kRvvVdShift) |
|
||||
((vs1 & 0x1F) << kRvvVs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
// OPMVV OPFVV
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
Register rd, VRegister vs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_MVV || opcode == OP_FVV);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((rd.code() & 0x1F) << kRvvVdShift) |
|
||||
((vs1.code() & 0x1F) << kRvvVs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
// OPFVV
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
FPURegister fd, VRegister vs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_FVV);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((fd.code() & 0x1F) << kRvvVdShift) |
|
||||
((vs1.code() & 0x1F) << kRvvVs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
// OPIVX OPMVX
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
VRegister vd, Register rs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_IVX || opcode == OP_MVX);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((vd.code() & 0x1F) << kRvvVdShift) |
|
||||
((rs1.code() & 0x1F) << kRvvRs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
// OPFVF
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
VRegister vd, FPURegister fs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_FVF);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((vd.code() & 0x1F) << kRvvVdShift) |
|
||||
((fs1.code() & 0x1F) << kRvvRs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
// OPMVX
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, Register rd, Register rs1,
|
||||
VRegister vs2, MaskType mask) {
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
|
||||
((rd.code() & 0x1F) << kRvvVdShift) |
|
||||
((rs1.code() & 0x1F) << kRvvRs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
// OPIVI
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
|
||||
VRegister vs2, MaskType mask) {
|
||||
DCHECK(is_uint5(imm5) || is_int5(imm5));
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
|
||||
((vd.code() & 0x1F) << kRvvVdShift) |
|
||||
(((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
// VL VS
|
||||
void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
|
||||
Register rs1, uint8_t umop, MaskType mask,
|
||||
uint8_t IsMop, bool IsMew, uint8_t Nf) {
|
||||
DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
|
||||
Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
|
||||
((width << kRvvWidthShift) & kRvvWidthMask) |
|
||||
((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
|
||||
((umop << kRvvRs2Shift) & kRvvRs2Mask) |
|
||||
((mask << kRvvVmShift) & kRvvVmMask) |
|
||||
((IsMop << kRvvMopShift) & kRvvMopMask) |
|
||||
((IsMew << kRvvMewShift) & kRvvMewMask) |
|
||||
((Nf << kRvvNfShift) & kRvvNfMask);
|
||||
emit(instr);
|
||||
}
|
||||
void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
|
||||
Register rs1, Register rs2, MaskType mask,
|
||||
uint8_t IsMop, bool IsMew, uint8_t Nf) {
|
||||
DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
|
||||
Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
|
||||
((width << kRvvWidthShift) & kRvvWidthMask) |
|
||||
((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
|
||||
((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
|
||||
((mask << kRvvVmShift) & kRvvVmMask) |
|
||||
((IsMop << kRvvMopShift) & kRvvMopMask) |
|
||||
((IsMew << kRvvMewShift) & kRvvMewMask) |
|
||||
((Nf << kRvvNfShift) & kRvvNfMask);
|
||||
emit(instr);
|
||||
}
|
||||
// VL VS AMO
|
||||
void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
|
||||
Register rs1, VRegister vs2, MaskType mask,
|
||||
uint8_t IsMop, bool IsMew, uint8_t Nf) {
|
||||
DCHECK(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
|
||||
Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
|
||||
((width << kRvvWidthShift) & kRvvWidthMask) |
|
||||
((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
|
||||
((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
|
||||
((mask << kRvvVmShift) & kRvvVmMask) |
|
||||
((IsMop << kRvvMopShift) & kRvvMopMask) |
|
||||
((IsMew << kRvvMewShift) & kRvvMewMask) |
|
||||
((Nf << kRvvNfShift) & kRvvNfMask);
|
||||
emit(instr);
|
||||
}
|
||||
// vmv_xs vcpop_m vfirst_m
|
||||
void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
|
||||
Register rd, uint8_t vs1, VRegister vs2,
|
||||
MaskType mask) {
|
||||
DCHECK(opcode == OP_MVV);
|
||||
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
|
||||
((rd.code() & 0x1F) << kRvvVdShift) |
|
||||
((vs1 & 0x1F) << kRvvVs1Shift) |
|
||||
((vs2.code() & 0x1F) << kRvvVs2Shift);
|
||||
emit(instr);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
|
||||
}
|
||||
void AssemblerRISCVV::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
|
||||
}
|
||||
void AssemblerRISCVV::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
|
||||
}
|
||||
void AssemblerRISCVV::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
|
||||
}
|
||||
void AssemblerRISCVV::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
|
||||
MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg2(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg3(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg4(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg5(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg6(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg7(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlseg8(VRegister vd, Register rs1, uint8_t lumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg2(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg3(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg4(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg5(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg6(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg7(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
|
||||
}
|
||||
void AssemblerRISCVV::vsseg8(VRegister vd, Register rs1, uint8_t sumop,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlsseg2(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
|
||||
}
|
||||
void AssemblerRISCVV::vlsseg3(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
|
||||
}
|
||||
void AssemblerRISCVV::vlsseg4(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
|
||||
}
|
||||
void AssemblerRISCVV::vlsseg5(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
|
||||
}
|
||||
void AssemblerRISCVV::vlsseg6(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
|
||||
}
|
||||
void AssemblerRISCVV::vlsseg7(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
|
||||
}
|
||||
void AssemblerRISCVV::vlsseg8(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg2(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg3(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg4(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg5(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg6(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg7(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
|
||||
}
|
||||
void AssemblerRISCVV::vssseg8(VRegister vd, Register rs1, Register rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vlxseg2(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
|
||||
}
|
||||
void AssemblerRISCVV::vlxseg3(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
|
||||
}
|
||||
void AssemblerRISCVV::vlxseg4(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
|
||||
}
|
||||
void AssemblerRISCVV::vlxseg5(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
|
||||
}
|
||||
void AssemblerRISCVV::vlxseg6(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
|
||||
}
|
||||
void AssemblerRISCVV::vlxseg7(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
|
||||
}
|
||||
void AssemblerRISCVV::vlxseg8(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg2(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg3(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg4(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg5(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg6(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg7(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
|
||||
}
|
||||
void AssemblerRISCVV::vsxseg8(VRegister vd, Register rs1, VRegister rs2,
|
||||
VSew vsew, MaskType mask) {
|
||||
uint8_t width = vsew_switch(vsew);
|
||||
GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vfirst_m(Register rd, VRegister vs2, MaskType mask) {
|
||||
GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask);
|
||||
}
|
||||
|
||||
void AssemblerRISCVV::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
|
||||
GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask);
|
||||
}
|
||||
|
||||
LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
|
||||
uint8_t laneidx) {
|
||||
switch (rep) {
|
||||
case MachineRepresentation::kWord8:
|
||||
*this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
|
||||
break;
|
||||
case MachineRepresentation::kWord16:
|
||||
*this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
|
||||
break;
|
||||
case MachineRepresentation::kWord32:
|
||||
*this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
|
||||
break;
|
||||
case MachineRepresentation::kWord64:
|
||||
*this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
485
src/codegen/riscv/extension-riscv-v.h
Normal file
485
src/codegen/riscv/extension-riscv-v.h
Normal file
@ -0,0 +1,485 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_V_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_V_H_
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-v.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class AssemblerRISCVV : public AssemblerRiscvBase {
|
||||
public:
|
||||
// RVV
|
||||
static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
|
||||
MaskAgnosticType mask = mu) {
|
||||
return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
|
||||
}
|
||||
|
||||
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
|
||||
void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
|
||||
void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define SegInstr(OP) \
|
||||
void OP##seg2(ARG); \
|
||||
void OP##seg3(ARG); \
|
||||
void OP##seg4(ARG); \
|
||||
void OP##seg5(ARG); \
|
||||
void OP##seg6(ARG); \
|
||||
void OP##seg7(ARG); \
|
||||
void OP##seg8(ARG);
|
||||
|
||||
#define ARG \
|
||||
VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
|
||||
|
||||
SegInstr(vl) SegInstr(vs)
|
||||
#undef ARG
|
||||
|
||||
#define ARG \
|
||||
VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
|
||||
|
||||
SegInstr(vls) SegInstr(vss)
|
||||
#undef ARG
|
||||
|
||||
#define ARG \
|
||||
VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
|
||||
|
||||
SegInstr(vsx) SegInstr(vlx)
|
||||
#undef ARG
|
||||
#undef SegInstr
|
||||
|
||||
// RVV Vector Arithmetic Instruction
|
||||
|
||||
void vmv_vv(VRegister vd, VRegister vs1);
|
||||
void vmv_vx(VRegister vd, Register rs1);
|
||||
void vmv_vi(VRegister vd, uint8_t simm5);
|
||||
void vmv_xs(Register rd, VRegister vs2);
|
||||
void vmv_sx(VRegister vd, Register rs1);
|
||||
void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
|
||||
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
|
||||
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
|
||||
|
||||
void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask = NoMask);
|
||||
void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask = NoMask);
|
||||
void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask = NoMask);
|
||||
void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
|
||||
MaskType mask = NoMask);
|
||||
|
||||
void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
|
||||
void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
|
||||
void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
|
||||
|
||||
void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
|
||||
void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
|
||||
void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
|
||||
|
||||
void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
|
||||
void vfmv_fs(FPURegister fd, VRegister vs2);
|
||||
void vfmv_sf(VRegister vd, FPURegister fs);
|
||||
|
||||
void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
|
||||
MaskType mask = NoMask);
|
||||
void vid_v(VRegister vd, MaskType mask = Mask);
|
||||
|
||||
#define DEFINE_OPIVV(name, funct6) \
|
||||
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPIVX(name, funct6) \
|
||||
void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPIVI(name, funct6) \
|
||||
void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPMVV(name, funct6) \
|
||||
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPMVX(name, funct6) \
|
||||
void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFVV(name, funct6) \
|
||||
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFWV(name, funct6) \
|
||||
void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFRED(name, funct6) \
|
||||
void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFVF(name, funct6) \
|
||||
void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFWF(name, funct6) \
|
||||
void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFVV_FMA(name, funct6) \
|
||||
void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPFVF_FMA(name, funct6) \
|
||||
void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
|
||||
MaskType mask = NoMask);
|
||||
|
||||
#define DEFINE_OPMVV_VIE(name) \
|
||||
void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
|
||||
|
||||
DEFINE_OPIVV(vadd, VADD_FUNCT6)
|
||||
DEFINE_OPIVX(vadd, VADD_FUNCT6)
|
||||
DEFINE_OPIVI(vadd, VADD_FUNCT6)
|
||||
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
|
||||
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
|
||||
DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
|
||||
DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
|
||||
DEFINE_OPMVX(vmul, VMUL_FUNCT6)
|
||||
DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
|
||||
DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
|
||||
DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
|
||||
DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
|
||||
DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
|
||||
DEFINE_OPMVV(vmul, VMUL_FUNCT6)
|
||||
DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
|
||||
DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
|
||||
DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
|
||||
DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
|
||||
DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
|
||||
DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
|
||||
DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
|
||||
DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
|
||||
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
|
||||
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
|
||||
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
|
||||
DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
|
||||
DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
|
||||
DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
|
||||
DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
|
||||
DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
|
||||
DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
|
||||
DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
|
||||
DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
|
||||
DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
|
||||
DEFINE_OPIVV(vminu, VMINU_FUNCT6)
|
||||
DEFINE_OPIVX(vminu, VMINU_FUNCT6)
|
||||
DEFINE_OPIVV(vmin, VMIN_FUNCT6)
|
||||
DEFINE_OPIVX(vmin, VMIN_FUNCT6)
|
||||
DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
|
||||
DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
|
||||
DEFINE_OPIVV(vmax, VMAX_FUNCT6)
|
||||
DEFINE_OPIVX(vmax, VMAX_FUNCT6)
|
||||
DEFINE_OPIVV(vand, VAND_FUNCT6)
|
||||
DEFINE_OPIVX(vand, VAND_FUNCT6)
|
||||
DEFINE_OPIVI(vand, VAND_FUNCT6)
|
||||
DEFINE_OPIVV(vor, VOR_FUNCT6)
|
||||
DEFINE_OPIVX(vor, VOR_FUNCT6)
|
||||
DEFINE_OPIVI(vor, VOR_FUNCT6)
|
||||
DEFINE_OPIVV(vxor, VXOR_FUNCT6)
|
||||
DEFINE_OPIVX(vxor, VXOR_FUNCT6)
|
||||
DEFINE_OPIVI(vxor, VXOR_FUNCT6)
|
||||
DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
|
||||
DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
|
||||
DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
|
||||
|
||||
DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
|
||||
DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
|
||||
DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
|
||||
DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
|
||||
DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
|
||||
DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
|
||||
DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
|
||||
DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
|
||||
DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
|
||||
DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
|
||||
DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
|
||||
DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
|
||||
DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
|
||||
DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
|
||||
|
||||
DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
|
||||
DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
|
||||
|
||||
DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
|
||||
DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
|
||||
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
|
||||
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsra, VSRA_FUNCT6)
|
||||
DEFINE_OPIVX(vsra, VSRA_FUNCT6)
|
||||
DEFINE_OPIVI(vsra, VSRA_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
|
||||
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
|
||||
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
|
||||
|
||||
DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
|
||||
DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
|
||||
|
||||
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
|
||||
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
|
||||
DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
|
||||
DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
|
||||
DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
|
||||
DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
|
||||
DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
|
||||
DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Add/Subtract Instructions
|
||||
DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
|
||||
DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
|
||||
DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
|
||||
DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
|
||||
DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
|
||||
DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
|
||||
DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
|
||||
DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Reduction Instructions
|
||||
DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
|
||||
DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Multiply
|
||||
DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
|
||||
DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
|
||||
|
||||
DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
|
||||
DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
|
||||
DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
|
||||
DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
|
||||
DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
|
||||
DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
|
||||
DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
|
||||
|
||||
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
|
||||
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
|
||||
DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
|
||||
DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
|
||||
DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
|
||||
DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
|
||||
|
||||
// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
|
||||
DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
|
||||
|
||||
// Vector Widening Floating-Point Fused Multiply-Add Instructions
|
||||
DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
|
||||
DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
|
||||
DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
|
||||
|
||||
// Vector Narrowing Fixed-Point Clip Instructions
|
||||
DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
|
||||
DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
|
||||
DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
|
||||
DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
|
||||
DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
|
||||
DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
|
||||
|
||||
// Vector Integer Extension
|
||||
DEFINE_OPMVV_VIE(vzext_vf8)
|
||||
DEFINE_OPMVV_VIE(vsext_vf8)
|
||||
DEFINE_OPMVV_VIE(vzext_vf4)
|
||||
DEFINE_OPMVV_VIE(vsext_vf4)
|
||||
DEFINE_OPMVV_VIE(vzext_vf2)
|
||||
DEFINE_OPMVV_VIE(vsext_vf2)
|
||||
|
||||
#undef DEFINE_OPIVI
|
||||
#undef DEFINE_OPIVV
|
||||
#undef DEFINE_OPIVX
|
||||
#undef DEFINE_OPMVV
|
||||
#undef DEFINE_OPMVX
|
||||
#undef DEFINE_OPFVV
|
||||
#undef DEFINE_OPFWV
|
||||
#undef DEFINE_OPFVF
|
||||
#undef DEFINE_OPFWF
|
||||
#undef DEFINE_OPFVV_FMA
|
||||
#undef DEFINE_OPFVF_FMA
|
||||
#undef DEFINE_OPMVV_VIE
|
||||
#undef DEFINE_OPFRED
|
||||
|
||||
#define DEFINE_VFUNARY(name, funct6, vs1) \
|
||||
void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
|
||||
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
|
||||
}
|
||||
|
||||
DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
|
||||
DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
|
||||
DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
|
||||
DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
|
||||
DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
|
||||
DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
|
||||
DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
|
||||
DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
|
||||
DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
|
||||
|
||||
DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
|
||||
DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
|
||||
DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
|
||||
|
||||
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
|
||||
DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
|
||||
DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V)
|
||||
DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V)
|
||||
#undef DEFINE_VFUNARY
|
||||
|
||||
void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
|
||||
vxor_vi(dst, src, -1, mask);
|
||||
}
|
||||
|
||||
void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
|
||||
vrsub_vx(dst, src, zero_reg, mask);
|
||||
}
|
||||
|
||||
void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
|
||||
vfsngjn_vv(dst, src, src, mask);
|
||||
}
|
||||
void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
|
||||
vfsngjx_vv(dst, src, src, mask);
|
||||
}
|
||||
void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask);
|
||||
|
||||
void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
|
||||
|
||||
protected:
|
||||
void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
|
||||
TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
|
||||
|
||||
void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
|
||||
TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
|
||||
|
||||
inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
|
||||
TailAgnosticType tail = tu,
|
||||
MaskAgnosticType mask = mu) {
|
||||
vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
|
||||
}
|
||||
|
||||
inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
|
||||
MaskAgnosticType mask = mu) {
|
||||
vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
|
||||
}
|
||||
|
||||
void vsetvl(Register rd, Register rs1, Register rs2);
|
||||
|
||||
// ----------------------------RVV------------------------------------------
|
||||
// vsetvl
|
||||
void GenInstrV(Register rd, Register rs1, Register rs2);
|
||||
// vsetvli
|
||||
void GenInstrV(Register rd, Register rs1, uint32_t zimm);
|
||||
// OPIVV OPFVV OPMVV
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
|
||||
VRegister vs1, VRegister vs2, MaskType mask = NoMask);
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, int8_t vs1,
|
||||
VRegister vs2, MaskType mask = NoMask);
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
|
||||
VRegister vs2, MaskType mask = NoMask);
|
||||
// OPMVV OPFVV
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd,
|
||||
VRegister vs1, VRegister vs2, MaskType mask = NoMask);
|
||||
// OPFVV
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, FPURegister fd,
|
||||
VRegister vs1, VRegister vs2, MaskType mask = NoMask);
|
||||
|
||||
// OPIVX OPMVX
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
|
||||
Register rs1, VRegister vs2, MaskType mask = NoMask);
|
||||
// OPFVF
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
|
||||
FPURegister fs1, VRegister vs2, MaskType mask = NoMask);
|
||||
// OPMVX
|
||||
void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
|
||||
MaskType mask = NoMask);
|
||||
// OPIVI
|
||||
void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
|
||||
MaskType mask = NoMask);
|
||||
|
||||
// VL VS
|
||||
void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
|
||||
uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
|
||||
uint8_t Nf);
|
||||
|
||||
void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
|
||||
Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
|
||||
uint8_t Nf);
|
||||
// VL VS AMO
|
||||
void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
|
||||
VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
|
||||
uint8_t Nf);
|
||||
// vmv_xs vcpop_m vfirst_m
|
||||
void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd, uint8_t vs1,
|
||||
VRegister vs2, MaskType mask);
|
||||
};
|
||||
|
||||
class LoadStoreLaneParams {
|
||||
public:
|
||||
int sz;
|
||||
uint8_t laneidx;
|
||||
|
||||
LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
|
||||
|
||||
private:
|
||||
LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
|
||||
: sz(sz), laneidx(laneidx % lanes) {}
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_V_H_
|
44
src/codegen/riscv/extension-riscv-zicsr.cc
Normal file
44
src/codegen/riscv/extension-riscv-zicsr.cc
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-zicsr.h"
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/constant-riscv-zicsr.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void AssemblerRISCVZicsr::csrrw(Register rd, ControlStatusReg csr,
|
||||
Register rs1) {
|
||||
GenInstrCSR_ir(0b001, rd, csr, rs1);
|
||||
}
|
||||
|
||||
void AssemblerRISCVZicsr::csrrs(Register rd, ControlStatusReg csr,
|
||||
Register rs1) {
|
||||
GenInstrCSR_ir(0b010, rd, csr, rs1);
|
||||
}
|
||||
|
||||
void AssemblerRISCVZicsr::csrrc(Register rd, ControlStatusReg csr,
|
||||
Register rs1) {
|
||||
GenInstrCSR_ir(0b011, rd, csr, rs1);
|
||||
}
|
||||
|
||||
void AssemblerRISCVZicsr::csrrwi(Register rd, ControlStatusReg csr,
|
||||
uint8_t imm5) {
|
||||
GenInstrCSR_ii(0b101, rd, csr, imm5);
|
||||
}
|
||||
|
||||
void AssemblerRISCVZicsr::csrrsi(Register rd, ControlStatusReg csr,
|
||||
uint8_t imm5) {
|
||||
GenInstrCSR_ii(0b110, rd, csr, imm5);
|
||||
}
|
||||
|
||||
void AssemblerRISCVZicsr::csrrci(Register rd, ControlStatusReg csr,
|
||||
uint8_t imm5) {
|
||||
GenInstrCSR_ii(0b111, rd, csr, imm5);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
56
src/codegen/riscv/extension-riscv-zicsr.h
Normal file
56
src/codegen/riscv/extension-riscv-zicsr.h
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_ZICSR_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_ZICSR_H_
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-zicsr.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class AssemblerRISCVZicsr : public AssemblerRiscvBase {
|
||||
public:
|
||||
// CSR
|
||||
void csrrw(Register rd, ControlStatusReg csr, Register rs1);
|
||||
void csrrs(Register rd, ControlStatusReg csr, Register rs1);
|
||||
void csrrc(Register rd, ControlStatusReg csr, Register rs1);
|
||||
void csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5);
|
||||
void csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5);
|
||||
void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5);
|
||||
|
||||
// Read instructions-retired counter
|
||||
void rdinstret(Register rd) { csrrs(rd, csr_instret, zero_reg); }
|
||||
void rdinstreth(Register rd) { csrrs(rd, csr_instreth, zero_reg); }
|
||||
void rdcycle(Register rd) { csrrs(rd, csr_cycle, zero_reg); }
|
||||
void rdcycleh(Register rd) { csrrs(rd, csr_cycleh, zero_reg); }
|
||||
void rdtime(Register rd) { csrrs(rd, csr_time, zero_reg); }
|
||||
void rdtimeh(Register rd) { csrrs(rd, csr_timeh, zero_reg); }
|
||||
|
||||
void csrr(Register rd, ControlStatusReg csr) { csrrs(rd, csr, zero_reg); }
|
||||
void csrw(ControlStatusReg csr, Register rs) { csrrw(zero_reg, csr, rs); }
|
||||
void csrs(ControlStatusReg csr, Register rs) { csrrs(zero_reg, csr, rs); }
|
||||
void csrc(ControlStatusReg csr, Register rs) { csrrc(zero_reg, csr, rs); }
|
||||
|
||||
void csrwi(ControlStatusReg csr, uint8_t imm) { csrrwi(zero_reg, csr, imm); }
|
||||
void csrsi(ControlStatusReg csr, uint8_t imm) { csrrsi(zero_reg, csr, imm); }
|
||||
void csrci(ControlStatusReg csr, uint8_t imm) { csrrci(zero_reg, csr, imm); }
|
||||
|
||||
void frcsr(Register rd) { csrrs(rd, csr_fcsr, zero_reg); }
|
||||
void fscsr(Register rd, Register rs) { csrrw(rd, csr_fcsr, rs); }
|
||||
void fscsr(Register rs) { csrrw(zero_reg, csr_fcsr, rs); }
|
||||
|
||||
void frrm(Register rd) { csrrs(rd, csr_frm, zero_reg); }
|
||||
void fsrm(Register rd, Register rs) { csrrw(rd, csr_frm, rs); }
|
||||
void fsrm(Register rs) { csrrw(zero_reg, csr_frm, rs); }
|
||||
|
||||
void frflags(Register rd) { csrrs(rd, csr_fflags, zero_reg); }
|
||||
void fsflags(Register rd, Register rs) { csrrw(rd, csr_fflags, rs); }
|
||||
void fsflags(Register rs) { csrrw(zero_reg, csr_fflags, rs); }
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_ZICSR_H_
|
16
src/codegen/riscv/extension-riscv-zifencei.cc
Normal file
16
src/codegen/riscv/extension-riscv-zifencei.cc
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#include "src/codegen/riscv/extension-riscv-zifencei.h"
|
||||
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/constant-riscv-zifencei.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void AssemblerRISCVZifencei::fence_i() {
|
||||
GenInstrI(0b001, MISC_MEM, ToRegister(0), ToRegister(0), 0);
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace v8
|
19
src/codegen/riscv/extension-riscv-zifencei.h
Normal file
19
src/codegen/riscv/extension-riscv-zifencei.h
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_ZIFENCEI_H_
|
||||
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_ZIFENCEI_H_
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv/base-assembler-riscv.h"
|
||||
#include "src/codegen/riscv/register-riscv.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
class AssemblerRISCVZifencei : public AssemblerRiscvBase {
|
||||
public:
|
||||
void fence_i();
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_ZIFENCEI_H_
|
@ -2,10 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
|
||||
#define V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
#ifndef V8_CODEGEN_RISCV_INTERFACE_DESCRIPTORS_RISCV_INL_H_
|
||||
#define V8_CODEGEN_RISCV_INTERFACE_DESCRIPTORS_RISCV_INL_H_
|
||||
|
||||
#include "src/base/template-utils.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
@ -321,6 +319,4 @@ constexpr auto RunMicrotasksEntryDescriptor::registers() {
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
||||
|
||||
#endif // V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
|
||||
#endif // V8_CODEGEN_RISCV_INTERFACE_DESCRIPTORS_RISCV_INL_H_
|
File diff suppressed because it is too large
Load Diff
@ -6,11 +6,11 @@
|
||||
#error This header must be included via macro-assembler.h
|
||||
#endif
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
|
||||
#define V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
|
||||
#ifndef V8_CODEGEN_RISCV_MACRO_ASSEMBLER_RISCV_H_
|
||||
#define V8_CODEGEN_RISCV_MACRO_ASSEMBLER_RISCV_H_
|
||||
|
||||
#include "src/codegen/assembler-arch.h"
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv64/assembler-riscv64.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate-data.h"
|
||||
#include "src/objects/tagged-index.h"
|
||||
@ -18,6 +18,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define xlen (uint8_t(sizeof(void*) * 8))
|
||||
// Forward declarations.
|
||||
enum class AbortReason : uint8_t;
|
||||
|
||||
@ -160,12 +161,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
#undef COND_TYPED_ARGS
|
||||
#undef COND_ARGS
|
||||
|
||||
void AllocateStackSpace(Register bytes) { Sub64(sp, sp, bytes); }
|
||||
void AllocateStackSpace(Register bytes) { SubWord(sp, sp, bytes); }
|
||||
|
||||
void AllocateStackSpace(int bytes) {
|
||||
DCHECK_GE(bytes, 0);
|
||||
if (bytes == 0) return;
|
||||
Sub64(sp, sp, Operand(bytes));
|
||||
SubWord(sp, sp, Operand(bytes));
|
||||
}
|
||||
|
||||
inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); }
|
||||
@ -195,11 +196,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
|
||||
// Load int32 in the rd register.
|
||||
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
|
||||
inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
|
||||
inline void li(Register rd, intptr_t j, LiFlags mode = OPTIMIZE_SIZE) {
|
||||
li(rd, Operand(j), mode);
|
||||
}
|
||||
|
||||
inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
|
||||
inline void Move(Register output, MemOperand operand) {
|
||||
LoadWord(output, operand);
|
||||
}
|
||||
void li(Register dst, Handle<HeapObject> value,
|
||||
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
|
||||
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
|
||||
@ -210,18 +213,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
|
||||
void LoadRootRelative(Register destination, int32_t offset) final;
|
||||
|
||||
inline void GenPCRelativeJump(Register rd, int64_t imm32) {
|
||||
inline void GenPCRelativeJump(Register rd, int32_t imm32) {
|
||||
DCHECK(is_int32(imm32 + 0x800));
|
||||
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
|
||||
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
|
||||
int32_t Hi20 = ((imm32 + 0x800) >> 12);
|
||||
int32_t Lo12 = imm32 << 20 >> 20;
|
||||
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
|
||||
jr(rd, Lo12); // jump PC + Hi20 + Lo12
|
||||
}
|
||||
|
||||
inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
|
||||
inline void GenPCRelativeJumpAndLink(Register rd, int32_t imm32) {
|
||||
DCHECK(is_int32(imm32 + 0x800));
|
||||
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
|
||||
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
|
||||
int32_t Hi20 = ((imm32 + 0x800) >> 12);
|
||||
int32_t Lo12 = imm32 << 20 >> 20;
|
||||
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
|
||||
jalr(rd, Lo12); // jump PC + Hi20 + Lo12
|
||||
}
|
||||
@ -283,12 +286,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
|
||||
|
||||
void Ld(Register rd, const MemOperand& rs);
|
||||
void Sd(Register rd, const MemOperand& rs);
|
||||
|
||||
void push(Register src) {
|
||||
Add64(sp, sp, Operand(-kSystemPointerSize));
|
||||
Sd(src, MemOperand(sp, 0));
|
||||
AddWord(sp, sp, Operand(-kSystemPointerSize));
|
||||
StoreWord(src, MemOperand(sp, 0));
|
||||
}
|
||||
void Push(Register src) { push(src); }
|
||||
void Push(Handle<HeapObject> handle);
|
||||
@ -296,44 +296,44 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
// Push two registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2) {
|
||||
Sub64(sp, sp, Operand(2 * kSystemPointerSize));
|
||||
Sd(src1, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
Sd(src2, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
SubWord(sp, sp, Operand(2 * kSystemPointerSize));
|
||||
StoreWord(src1, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
StoreWord(src2, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Push three registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3) {
|
||||
Sub64(sp, sp, Operand(3 * kSystemPointerSize));
|
||||
Sd(src1, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
Sd(src2, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
Sd(src3, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
SubWord(sp, sp, Operand(3 * kSystemPointerSize));
|
||||
StoreWord(src1, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreWord(src2, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
StoreWord(src3, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Push four registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3, Register src4) {
|
||||
Sub64(sp, sp, Operand(4 * kSystemPointerSize));
|
||||
Sd(src1, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
Sd(src2, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
Sd(src3, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
Sd(src4, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
SubWord(sp, sp, Operand(4 * kSystemPointerSize));
|
||||
StoreWord(src1, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
StoreWord(src2, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreWord(src3, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
StoreWord(src4, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Push five registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3, Register src4,
|
||||
Register src5) {
|
||||
Sub64(sp, sp, Operand(5 * kSystemPointerSize));
|
||||
Sd(src1, MemOperand(sp, 4 * kSystemPointerSize));
|
||||
Sd(src2, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
Sd(src3, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
Sd(src4, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
Sd(src5, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
SubWord(sp, sp, Operand(5 * kSystemPointerSize));
|
||||
StoreWord(src1, MemOperand(sp, 4 * kSystemPointerSize));
|
||||
StoreWord(src2, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
StoreWord(src3, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreWord(src4, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
StoreWord(src5, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
void Push(Register src, Condition cond, Register tst1, Register tst2) {
|
||||
// Since we don't have conditional execution we use a Branch.
|
||||
Branch(3, cond, tst1, Operand(tst2));
|
||||
Sub64(sp, sp, Operand(kSystemPointerSize));
|
||||
Sd(src, MemOperand(sp, 0));
|
||||
SubWord(sp, sp, Operand(kSystemPointerSize));
|
||||
StoreWord(src, MemOperand(sp, 0));
|
||||
}
|
||||
|
||||
enum PushArrayOrder { kNormal, kReverse };
|
||||
@ -377,29 +377,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
Register exclusion3 = no_reg);
|
||||
|
||||
void pop(Register dst) {
|
||||
Ld(dst, MemOperand(sp, 0));
|
||||
Add64(sp, sp, Operand(kSystemPointerSize));
|
||||
LoadWord(dst, MemOperand(sp, 0));
|
||||
AddWord(sp, sp, Operand(kSystemPointerSize));
|
||||
}
|
||||
void Pop(Register dst) { pop(dst); }
|
||||
|
||||
// Pop two registers. Pops rightmost register first (from lower address).
|
||||
void Pop(Register src1, Register src2) {
|
||||
DCHECK(src1 != src2);
|
||||
Ld(src2, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
Ld(src1, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
Add64(sp, sp, 2 * kSystemPointerSize);
|
||||
LoadWord(src2, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
LoadWord(src1, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
AddWord(sp, sp, 2 * kSystemPointerSize);
|
||||
}
|
||||
|
||||
// Pop three registers. Pops rightmost register first (from lower address).
|
||||
void Pop(Register src1, Register src2, Register src3) {
|
||||
Ld(src3, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
Ld(src2, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
Ld(src1, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
Add64(sp, sp, 3 * kSystemPointerSize);
|
||||
LoadWord(src3, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
LoadWord(src2, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
LoadWord(src1, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
AddWord(sp, sp, 3 * kSystemPointerSize);
|
||||
}
|
||||
|
||||
void Pop(uint32_t count = 1) {
|
||||
Add64(sp, sp, Operand(count * kSystemPointerSize));
|
||||
AddWord(sp, sp, Operand(count * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Pops multiple values from the stack and load them in the
|
||||
@ -419,8 +419,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
|
||||
void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
|
||||
|
||||
#define DEFINE_INSTRUCTION3(instr) void instr(Register rd, int64_t imm);
|
||||
#define DEFINE_INSTRUCTION3(instr) void instr(Register rd, intptr_t imm);
|
||||
|
||||
DEFINE_INSTRUCTION(AddWord)
|
||||
DEFINE_INSTRUCTION(SubWord)
|
||||
DEFINE_INSTRUCTION(SllWord)
|
||||
DEFINE_INSTRUCTION(SrlWord)
|
||||
DEFINE_INSTRUCTION(SraWord)
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
DEFINE_INSTRUCTION(Add32)
|
||||
DEFINE_INSTRUCTION(Add64)
|
||||
DEFINE_INSTRUCTION(Div32)
|
||||
@ -441,7 +447,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
DEFINE_INSTRUCTION2(Div64)
|
||||
DEFINE_INSTRUCTION2(Divu32)
|
||||
DEFINE_INSTRUCTION2(Divu64)
|
||||
|
||||
DEFINE_INSTRUCTION(Sll64)
|
||||
DEFINE_INSTRUCTION(Sra64)
|
||||
DEFINE_INSTRUCTION(Srl64)
|
||||
DEFINE_INSTRUCTION(Dror)
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
DEFINE_INSTRUCTION(Add32)
|
||||
DEFINE_INSTRUCTION(Div)
|
||||
DEFINE_INSTRUCTION(Divu)
|
||||
DEFINE_INSTRUCTION(Mod)
|
||||
DEFINE_INSTRUCTION(Modu)
|
||||
DEFINE_INSTRUCTION(Sub32)
|
||||
DEFINE_INSTRUCTION(Mul)
|
||||
DEFINE_INSTRUCTION(Mul32)
|
||||
DEFINE_INSTRUCTION(Mulh)
|
||||
DEFINE_INSTRUCTION2(Div)
|
||||
DEFINE_INSTRUCTION2(Divu)
|
||||
#endif
|
||||
DEFINE_INSTRUCTION(And)
|
||||
DEFINE_INSTRUCTION(Or)
|
||||
DEFINE_INSTRUCTION(Xor)
|
||||
@ -458,10 +480,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
DEFINE_INSTRUCTION(Sgeu)
|
||||
DEFINE_INSTRUCTION(Seq)
|
||||
DEFINE_INSTRUCTION(Sne)
|
||||
|
||||
DEFINE_INSTRUCTION(Sll64)
|
||||
DEFINE_INSTRUCTION(Sra64)
|
||||
DEFINE_INSTRUCTION(Srl64)
|
||||
DEFINE_INSTRUCTION(Sll32)
|
||||
DEFINE_INSTRUCTION(Sra32)
|
||||
DEFINE_INSTRUCTION(Srl32)
|
||||
@ -470,7 +488,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
DEFINE_INSTRUCTION2(Snez)
|
||||
|
||||
DEFINE_INSTRUCTION(Ror)
|
||||
DEFINE_INSTRUCTION(Dror)
|
||||
|
||||
DEFINE_INSTRUCTION3(Li)
|
||||
DEFINE_INSTRUCTION2(Mv)
|
||||
@ -479,14 +496,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
#undef DEFINE_INSTRUCTION2
|
||||
#undef DEFINE_INSTRUCTION3
|
||||
|
||||
void Amosub_w(bool aq, bool rl, Register rd, Register rs1, Register rs2) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.Acquire();
|
||||
sub(temp, zero_reg, rs2);
|
||||
amoadd_w(aq, rl, rd, rs1, temp);
|
||||
}
|
||||
|
||||
void SmiUntag(Register dst, const MemOperand& src);
|
||||
void SmiUntag(Register dst, Register src) {
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
sraiw(dst, src, kSmiShift);
|
||||
} else {
|
||||
srai(dst, src, kSmiShift);
|
||||
}
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
DCHECK(SmiValuesAre31Bits());
|
||||
srai(dst, src, kSmiShift);
|
||||
#endif
|
||||
}
|
||||
|
||||
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
|
||||
@ -557,27 +586,72 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void LoadZeroIfConditionZero(Register dest, Register condition);
|
||||
|
||||
void SignExtendByte(Register rd, Register rs) {
|
||||
slli(rd, rs, 64 - 8);
|
||||
srai(rd, rd, 64 - 8);
|
||||
slli(rd, rs, xlen - 8);
|
||||
srai(rd, rd, xlen - 8);
|
||||
}
|
||||
|
||||
void SignExtendShort(Register rd, Register rs) {
|
||||
slli(rd, rs, 64 - 16);
|
||||
srai(rd, rd, 64 - 16);
|
||||
slli(rd, rs, xlen - 16);
|
||||
srai(rd, rd, xlen - 16);
|
||||
}
|
||||
|
||||
void Clz32(Register rd, Register rs);
|
||||
void Ctz32(Register rd, Register rs);
|
||||
void Popcnt32(Register rd, Register rs, Register scratch);
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); }
|
||||
void ZeroExtendWord(Register rd, Register rs) {
|
||||
slli(rd, rs, 32);
|
||||
srli(rd, rd, 32);
|
||||
}
|
||||
|
||||
void Clz32(Register rd, Register rs);
|
||||
void Clz64(Register rd, Register rs);
|
||||
void Ctz32(Register rd, Register rs);
|
||||
void Ctz64(Register rd, Register rs);
|
||||
void Popcnt32(Register rd, Register rs, Register scratch);
|
||||
void Popcnt64(Register rd, Register rs, Register scratch);
|
||||
void Ctz64(Register rd, Register rs);
|
||||
void Clz64(Register rd, Register rs);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
void AddPair(Register dst_low, Register dst_high, Register left_low,
|
||||
Register left_high, Register right_low, Register right_high,
|
||||
Register scratch1, Register scratch2);
|
||||
|
||||
void SubPair(Register dst_low, Register dst_high, Register left_low,
|
||||
Register left_high, Register right_low, Register right_high,
|
||||
Register scratch1, Register scratch2);
|
||||
|
||||
void AndPair(Register dst_low, Register dst_high, Register left_low,
|
||||
Register left_high, Register right_low, Register right_high);
|
||||
|
||||
void OrPair(Register dst_low, Register dst_high, Register left_low,
|
||||
Register left_high, Register right_low, Register right_high);
|
||||
|
||||
void XorPair(Register dst_low, Register dst_high, Register left_low,
|
||||
Register left_high, Register right_low, Register right_high);
|
||||
|
||||
void MulPair(Register dst_low, Register dst_high, Register left_low,
|
||||
Register left_high, Register right_low, Register right_high,
|
||||
Register scratch1, Register scratch2);
|
||||
|
||||
void ShlPair(Register dst_low, Register dst_high, Register src_low,
|
||||
Register src_high, Register shift, Register scratch1,
|
||||
Register scratch2);
|
||||
void ShlPair(Register dst_low, Register dst_high, Register src_low,
|
||||
Register src_high, int32_t shift, Register scratch1,
|
||||
Register scratch2);
|
||||
|
||||
void ShrPair(Register dst_low, Register dst_high, Register src_low,
|
||||
Register src_high, Register shift, Register scratch1,
|
||||
Register scratch2);
|
||||
|
||||
void ShrPair(Register dst_low, Register dst_high, Register src_low,
|
||||
Register src_high, int32_t shift, Register scratch1,
|
||||
Register scratch2);
|
||||
|
||||
void SarPair(Register dst_low, Register dst_high, Register src_low,
|
||||
Register src_high, Register shift, Register scratch1,
|
||||
Register scratch2);
|
||||
void SarPair(Register dst_low, Register dst_high, Register src_low,
|
||||
Register src_high, int32_t shift, Register scratch1,
|
||||
Register scratch2);
|
||||
#endif
|
||||
|
||||
// Bit field starts at bit pos and extending for size bits is extracted from
|
||||
// rs and stored zero/sign-extended and right-justified in rt
|
||||
@ -617,6 +691,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
template <int NBYTES>
|
||||
void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs,
|
||||
Register scratch);
|
||||
#if V8_TARGET_ARCH_RISCV32
|
||||
void UnalignedDoubleHelper(FPURegister frd, const MemOperand& rs,
|
||||
Register scratch_base);
|
||||
void UnalignedDStoreHelper(FPURegister frd, const MemOperand& rs,
|
||||
Register scratch);
|
||||
#endif
|
||||
|
||||
template <typename Reg_T, typename Func>
|
||||
void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
|
||||
@ -634,7 +714,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Ush(Register rd, const MemOperand& rs);
|
||||
|
||||
void Ulw(Register rd, const MemOperand& rs);
|
||||
void Ulwu(Register rd, const MemOperand& rs);
|
||||
void Usw(Register rd, const MemOperand& rs);
|
||||
|
||||
void Uld(Register rd, const MemOperand& rs);
|
||||
@ -655,9 +734,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Sh(Register rd, const MemOperand& rs);
|
||||
|
||||
void Lw(Register rd, const MemOperand& rs);
|
||||
void Lwu(Register rd, const MemOperand& rs);
|
||||
void Sw(Register rd, const MemOperand& rs);
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
void Ulwu(Register rd, const MemOperand& rs);
|
||||
void Lwu(Register rd, const MemOperand& rs);
|
||||
void Ld(Register rd, const MemOperand& rs);
|
||||
void Sd(Register rd, const MemOperand& rs);
|
||||
void Lld(Register rd, const MemOperand& rs);
|
||||
void Scd(Register rd, const MemOperand& rs);
|
||||
|
||||
inline void Load32U(Register rd, const MemOperand& rs) { Lwu(rd, rs); }
|
||||
inline void LoadWord(Register rd, const MemOperand& rs) { Ld(rd, rs); }
|
||||
inline void StoreWord(Register rd, const MemOperand& rs) { Sd(rd, rs); }
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
inline void Load32U(Register rd, const MemOperand& rs) { Lw(rd, rs); }
|
||||
inline void LoadWord(Register rd, const MemOperand& rs) { Lw(rd, rs); }
|
||||
inline void StoreWord(Register rd, const MemOperand& rs) { Sw(rd, rs); }
|
||||
#endif
|
||||
void LoadFloat(FPURegister fd, const MemOperand& src);
|
||||
void StoreFloat(FPURegister fs, const MemOperand& dst);
|
||||
|
||||
@ -667,9 +761,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Ll(Register rd, const MemOperand& rs);
|
||||
void Sc(Register rd, const MemOperand& rs);
|
||||
|
||||
void Lld(Register rd, const MemOperand& rs);
|
||||
void Scd(Register rd, const MemOperand& rs);
|
||||
|
||||
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2);
|
||||
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2);
|
||||
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2);
|
||||
@ -699,6 +790,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
inline void Move(FPURegister dst, FPURegister src) { MoveDouble(dst, src); }
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
|
||||
fmv_x_d(dst_high, src);
|
||||
fmv_x_w(dst_low, src);
|
||||
@ -708,11 +800,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
inline void Move(Register dst, FPURegister src) { fmv_x_d(dst, src); }
|
||||
|
||||
inline void Move(FPURegister dst, Register src) { fmv_d_x(dst, src); }
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
inline void Move(Register dst, FPURegister src) { fmv_x_w(dst, src); }
|
||||
|
||||
inline void Move(FPURegister dst, Register src) { fmv_w_x(dst, src); }
|
||||
#endif
|
||||
|
||||
// Extract sign-extended word from high-half of FPR to GPR
|
||||
inline void ExtractHighWordFromF64(Register dst_high, FPURegister src) {
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
fmv_x_d(dst_high, src);
|
||||
srai(dst_high, dst_high, 32);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
// todo(riscv32): delete storedouble
|
||||
AddWord(sp, sp, Operand(-8));
|
||||
StoreDouble(src, MemOperand(sp, 0));
|
||||
Lw(dst_high, MemOperand(sp, 4));
|
||||
AddWord(sp, sp, Operand(8));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Insert low-word from GPR (src_high) to the high-half of FPR (dst)
|
||||
@ -734,7 +839,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
}
|
||||
void LoadFPRImmediate(FPURegister dst, uint32_t src);
|
||||
void LoadFPRImmediate(FPURegister dst, uint64_t src);
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
// AddOverflow64 sets overflow register to a negative value if
|
||||
// overflow occured, otherwise it is zero or positive
|
||||
void AddOverflow64(Register dst, Register left, const Operand& right,
|
||||
@ -743,13 +848,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// overflow occured, otherwise it is zero or positive
|
||||
void SubOverflow64(Register dst, Register left, const Operand& right,
|
||||
Register overflow);
|
||||
// MulOverflow32 sets overflow register to zero if no overflow occured
|
||||
void MulOverflow32(Register dst, Register left, const Operand& right,
|
||||
Register overflow);
|
||||
|
||||
// MIPS-style 32-bit unsigned mulh
|
||||
void Mulhu32(Register dst, Register left, const Operand& right,
|
||||
Register left_zero, Register right_zero);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
// AddOverflow sets overflow register to a negative value if
|
||||
// overflow occured, otherwise it is zero or positive
|
||||
void AddOverflow(Register dst, Register left, const Operand& right,
|
||||
Register overflow);
|
||||
// SubOverflow sets overflow register to a negative value if
|
||||
// overflow occured, otherwise it is zero or positive
|
||||
void SubOverflow(Register dst, Register left, const Operand& right,
|
||||
Register overflow);
|
||||
// MIPS-style 32-bit unsigned mulh
|
||||
void Mulhu(Register dst, Register left, const Operand& right,
|
||||
Register left_zero, Register right_zero);
|
||||
#endif
|
||||
// MulOverflow32 sets overflow register to zero if no overflow occured
|
||||
void MulOverflow32(Register dst, Register left, const Operand& right,
|
||||
Register overflow);
|
||||
|
||||
// Number of instructions needed for calculation of switch table entry address
|
||||
static const int kSwitchTablePrologueSize = 6;
|
||||
@ -799,7 +916,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
// Convert single to signed word.
|
||||
void Trunc_w_s(Register rd, FPURegister fs, Register result = no_reg);
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
// Convert double to unsigned long.
|
||||
void Trunc_ul_d(Register rd, FPURegister fs, Register result = no_reg);
|
||||
|
||||
@ -812,6 +929,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// Convert singled to signed long.
|
||||
void Trunc_l_s(Register rd, FPURegister fs, Register result = no_reg);
|
||||
|
||||
// Round double functions
|
||||
void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
#endif
|
||||
// Round single to signed word.
|
||||
void Round_w_s(Register rd, FPURegister fs, Register result = no_reg);
|
||||
|
||||
@ -830,12 +953,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// Floor double to signed word.
|
||||
void Floor_w_d(Register rd, FPURegister fs, Register result = no_reg);
|
||||
|
||||
// Round double functions
|
||||
void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
|
||||
// Round float functions
|
||||
void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
|
||||
@ -860,12 +977,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
VRegister v_scratch);
|
||||
void Round_d(VRegister dst, VRegister src, Register scratch,
|
||||
VRegister v_scratch);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Smi utilities.
|
||||
|
||||
void SmiTag(Register dst, Register src) {
|
||||
static_assert(kSmiTag == 0);
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (SmiValuesAre32Bits()) {
|
||||
// Smi goes to upper 32
|
||||
slli(dst, src, 32);
|
||||
@ -874,6 +991,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// Smi is shifted left by 1
|
||||
Add32(dst, src, src);
|
||||
}
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
|
||||
DCHECK(SmiValuesAre31Bits());
|
||||
// Smi is shifted left by 1
|
||||
slli(dst, src, kSmiShift);
|
||||
#endif
|
||||
}
|
||||
|
||||
void SmiTag(Register reg) { SmiTag(reg, reg); }
|
||||
@ -902,16 +1025,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// This is an alternative to embedding the {CodeObject} handle as a reference.
|
||||
void ComputeCodeStartAddress(Register dst);
|
||||
|
||||
// Control-flow integrity:
|
||||
|
||||
// Define a function entrypoint. This doesn't emit any code for this
|
||||
// architecture, as control-flow integrity is not supported for it.
|
||||
void CodeEntry() {}
|
||||
// Define an exception handler.
|
||||
void ExceptionHandler() {}
|
||||
// Define an exception handler and bind a label.
|
||||
void BindExceptionHandler(Label* label) { bind(label); }
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
// ---------------------------------------------------------------------------
|
||||
// Pointer compression Support
|
||||
|
||||
@ -948,9 +1062,46 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
Sub32(rd, rs1, rs2);
|
||||
} else {
|
||||
Sub64(rd, rs1, rs2);
|
||||
SubWord(rd, rs1, rs2);
|
||||
}
|
||||
}
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
// ---------------------------------------------------------------------------
|
||||
// Pointer compression Support
|
||||
// rv32 don't support Pointer compression. Defines these functions for
|
||||
// simplify builtins.
|
||||
inline void LoadTaggedPointerField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
Lw(destination, field_operand);
|
||||
}
|
||||
inline void LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
Lw(destination, field_operand);
|
||||
}
|
||||
inline void LoadTaggedSignedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
Lw(destination, field_operand);
|
||||
}
|
||||
|
||||
inline void SmiUntagField(Register dst, const MemOperand& src) {
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
// Compresses and stores tagged value to given on-heap location.
|
||||
void StoreTaggedField(const Register& value,
|
||||
const MemOperand& dst_field_operand) {
|
||||
Sw(value, dst_field_operand);
|
||||
}
|
||||
#endif
|
||||
// Control-flow integrity:
|
||||
|
||||
// Define a function entrypoint. This doesn't emit any code for this
|
||||
// architecture, as control-flow integrity is not supported for it.
|
||||
void CodeEntry() {}
|
||||
// Define an exception handler.
|
||||
void ExceptionHandler() {}
|
||||
// Define an exception handler and bind a label.
|
||||
void BindExceptionHandler(Label* label) { bind(label); }
|
||||
// Wasm into RVV
|
||||
void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew,
|
||||
Vlmul lmul) {
|
||||
@ -1017,11 +1168,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
|
||||
Register rs, const Operand& rt);
|
||||
void BranchAndLinkLong(Label* L);
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
template <typename F_TYPE>
|
||||
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
||||
FPURoundingMode mode);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
||||
FPURoundingMode mode);
|
||||
|
||||
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
||||
FPURoundingMode mode);
|
||||
#endif
|
||||
template <typename F>
|
||||
void RoundHelper(VRegister dst, VRegister src, Register scratch,
|
||||
VRegister v_scratch, FPURoundingMode frm);
|
||||
@ -1044,11 +1201,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
// TODO(victorgomes): Remove this function once we stick with the reversed
|
||||
// arguments order.
|
||||
void LoadReceiver(Register dest, Register argc) {
|
||||
Ld(dest, MemOperand(sp, 0));
|
||||
LoadWord(dest, MemOperand(sp, 0));
|
||||
}
|
||||
|
||||
void StoreReceiver(Register rec, Register argc, Register scratch) {
|
||||
Sd(rec, MemOperand(sp, 0));
|
||||
StoreWord(rec, MemOperand(sp, 0));
|
||||
}
|
||||
|
||||
bool IsNear(Label* L, Condition cond, int rs_reg);
|
||||
@ -1245,6 +1402,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
|
||||
// Left-shifted from int32 equivalent of Smi.
|
||||
void SmiScale(Register dst, Register src, int scale) {
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (SmiValuesAre32Bits()) {
|
||||
// The int portion is upper 32-bits of 64-bit word.
|
||||
srai(dst, src, (kSmiShift - scale) & 0x3F);
|
||||
@ -1253,6 +1411,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
DCHECK_GE(scale, kSmiTagSize);
|
||||
slliw(dst, src, scale - kSmiTagSize);
|
||||
}
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
DCHECK(SmiValuesAre31Bits());
|
||||
DCHECK_GE(scale, kSmiTagSize);
|
||||
slli(dst, src, scale - kSmiTagSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Test if the register contains a smi.
|
||||
@ -1275,7 +1438,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
// Jump if the register contains a non-smi.
|
||||
void JumpIfNotSmi(Register value, Label* not_smi_label);
|
||||
|
||||
|
||||
// Abort execution if argument is not a Constructor, enabled via --debug-code.
|
||||
void AssertConstructor(Register object);
|
||||
|
||||
@ -1342,11 +1504,12 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
||||
kSystemPointerSizeLog2); // scratch2 = offset of indexth entry
|
||||
add(scratch2, scratch2,
|
||||
scratch); // scratch2 = (saved PC) + (offset of indexth entry)
|
||||
ld(scratch2, scratch2,
|
||||
6 * kInstrSize); // Add the size of these 6 instructions to the
|
||||
// offset, then load
|
||||
jr(scratch2); // Jump to the address loaded from the table
|
||||
nop(); // For 16-byte alignment
|
||||
LoadWord(scratch2,
|
||||
MemOperand(scratch2,
|
||||
6 * kInstrSize)); // Add the size of these 6 instructions
|
||||
// to the offset, then load
|
||||
jr(scratch2); // Jump to the address loaded from the table
|
||||
nop(); // For 16-byte alignment
|
||||
for (size_t index = 0; index < case_count; ++index) {
|
||||
dd(GetLabelFunction(index));
|
||||
}
|
||||
@ -1363,4 +1526,4 @@ struct MoveCycleState {
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
|
||||
#endif // V8_CODEGEN_RISCV_MACRO_ASSEMBLER_RISCV_H_
|
@ -2,11 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
|
||||
#define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
|
||||
#ifndef V8_CODEGEN_RISCV_REGISTER_RISCV_H_
|
||||
#define V8_CODEGEN_RISCV_REGISTER_RISCV_H_
|
||||
|
||||
#include "src/codegen/register-base.h"
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
#include "src/codegen/riscv/constants-riscv.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -76,7 +76,6 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
|
||||
// Note that the bit values must match those used in actual instruction
|
||||
// encoding.
|
||||
const int kNumRegs = 32;
|
||||
|
||||
const int kUndefIndex = -1;
|
||||
// Map with indexes on stack that corresponds to codes of saved registers.
|
||||
const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
|
||||
@ -228,7 +227,6 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
|
||||
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
|
||||
};
|
||||
|
||||
|
||||
// A few double registers are reserved: one as a scratch register and one to
|
||||
// hold 0.0.
|
||||
// fs9: 0.0
|
||||
@ -311,4 +309,4 @@ constexpr Register kPtrComprCageBaseRegister = kRootRegister;
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
|
||||
#endif // V8_CODEGEN_RISCV_REGISTER_RISCV_H_
|
@ -2,12 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_
|
||||
#define V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_
|
||||
#ifndef V8_CODEGEN_RISCV_REGLIST_RISCV_H_
|
||||
#define V8_CODEGEN_RISCV_REGLIST_RISCV_H_
|
||||
|
||||
#include "src/codegen/register-arch.h"
|
||||
#include "src/codegen/reglist-base.h"
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -61,4 +60,4 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_
|
||||
#endif // V8_CODEGEN_RISCV_REGLIST_RISCV_H_
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -58,6 +58,9 @@ namespace internal {
|
||||
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
|
||||
#define USE_SIMULATOR 1
|
||||
#endif
|
||||
#if (V8_TARGET_ARCH_RISCV32 && !V8_HOST_ARCH_RISCV32)
|
||||
#define USE_SIMULATOR 1
|
||||
#endif
|
||||
#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
|
||||
#define USE_SIMULATOR 1
|
||||
#endif
|
||||
@ -335,7 +338,7 @@ constexpr bool kPlatformRequiresCodeRange = false;
|
||||
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
|
||||
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
|
||||
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_RISCV32
|
||||
constexpr bool kPlatformRequiresCodeRange = false;
|
||||
constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
|
||||
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "src/compiler/backend/ppc/instruction-codes-ppc.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/compiler/backend/s390/instruction-codes-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/compiler/backend/riscv64/instruction-codes-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/compiler/backend/riscv/instruction-codes-riscv.h"
|
||||
#else
|
||||
#define TARGET_ARCH_OPCODE_LIST(V)
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V)
|
||||
|
@ -2702,7 +2702,8 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // V8_TARGET_ARCH_64_BIT
|
||||
|
||||
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
|
||||
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
|
||||
!V8_TARGET_ARCH_RISCV32
|
||||
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -2739,6 +2740,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
|
||||
// && !V8_TARGET_ARCH_RISCV32
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
|
||||
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
|
||||
@ -2782,27 +2784,29 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
|
||||
#if !V8_TARGET_ARCH_ARM64
|
||||
#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && !V8_TARGET_ARCH_RISCV64
|
||||
#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && \
|
||||
!V8_TARGET_ARCH_RISCV32 && !V8_TARGET_ARCH_RISCV64
|
||||
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
|
||||
// !V8_TARGET_ARCH_RISCV64
|
||||
// !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_RISCV32
|
||||
#endif // !V8_TARGET_ARCH_ARM64
|
||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 && \
|
||||
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_RISCV64
|
||||
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 && \
|
||||
!V8_TARGET_ARCH_RISCV32 && !V8_TARGET_ARCH_RISCV64
|
||||
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
|
||||
// && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 &&
|
||||
// !V8_TARGET_ARCH_RISCV64
|
||||
// !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_RISCV32
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
|
||||
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_ARM
|
||||
!V8_TARGET_ARCH_RISCV32 && !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -2832,7 +2836,8 @@ void InstructionSelector::VisitI32x4RelaxedTruncF32x4U(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
|
||||
// && !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARM
|
||||
// && !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARM &&
|
||||
// !V8_TARGET_ARCH_RISCV32
|
||||
|
||||
#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitI16x8RelaxedQ15MulRS(Node* node) {
|
||||
|
File diff suppressed because it is too large
Load Diff
462
src/compiler/backend/riscv/instruction-codes-riscv.h
Normal file
462
src/compiler/backend/riscv/instruction-codes-riscv.h
Normal file
@ -0,0 +1,462 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_BACKEND_RISCV_INSTRUCTION_CODES_RISCV_H_
|
||||
#define V8_COMPILER_BACKEND_RISCV_INSTRUCTION_CODES_RISCV_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// RISC-V-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
|
||||
V(RiscvAdd64) \
|
||||
V(RiscvAddOvf64) \
|
||||
V(RiscvSub64) \
|
||||
V(RiscvSubOvf64) \
|
||||
V(RiscvMulHigh64) \
|
||||
V(RiscvMul64) \
|
||||
V(RiscvDiv64) \
|
||||
V(RiscvDivU64) \
|
||||
V(RiscvMod64) \
|
||||
V(RiscvModU64) \
|
||||
V(RiscvZeroExtendWord) \
|
||||
V(RiscvSignExtendWord) \
|
||||
V(RiscvClz64) \
|
||||
V(RiscvCtz64) \
|
||||
V(RiscvPopcnt64) \
|
||||
V(RiscvShl64) \
|
||||
V(RiscvShr64) \
|
||||
V(RiscvSar64) \
|
||||
V(RiscvRor64) \
|
||||
V(RiscvFloat64RoundDown) \
|
||||
V(RiscvFloat64RoundTruncate) \
|
||||
V(RiscvFloat64RoundUp) \
|
||||
V(RiscvFloat64RoundTiesEven) \
|
||||
V(RiscvTruncLS) \
|
||||
V(RiscvTruncLD) \
|
||||
V(RiscvTruncUlS) \
|
||||
V(RiscvTruncUlD) \
|
||||
V(RiscvCvtSL) \
|
||||
V(RiscvCvtSUl) \
|
||||
V(RiscvCvtDL) \
|
||||
V(RiscvCvtDUl) \
|
||||
V(RiscvLd) \
|
||||
V(RiscvSd) \
|
||||
V(RiscvUsd) \
|
||||
V(RiscvLwu) \
|
||||
V(RiscvUlwu) \
|
||||
V(RiscvBitcastDL) \
|
||||
V(RiscvBitcastLD) \
|
||||
V(RiscvByteSwap64) \
|
||||
V(RiscvWord64AtomicLoadUint64) \
|
||||
V(RiscvWord64AtomicStoreWord64) \
|
||||
V(RiscvWord64AtomicAddUint64) \
|
||||
V(RiscvWord64AtomicSubUint64) \
|
||||
V(RiscvWord64AtomicAndUint64) \
|
||||
V(RiscvWord64AtomicOrUint64) \
|
||||
V(RiscvWord64AtomicXorUint64) \
|
||||
V(RiscvWord64AtomicExchangeUint64) \
|
||||
V(RiscvStoreCompressTagged) \
|
||||
V(RiscvLoadDecompressTaggedSigned) \
|
||||
V(RiscvLoadDecompressTaggedPointer) \
|
||||
V(RiscvLoadDecompressAnyTagged) \
|
||||
V(RiscvWord64AtomicCompareExchangeUint64)
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
|
||||
V(RiscvAddOvf) \
|
||||
V(RiscvSubOvf) \
|
||||
V(RiscvAddPair) \
|
||||
V(RiscvSubPair) \
|
||||
V(RiscvMulPair) \
|
||||
V(RiscvAndPair) \
|
||||
V(RiscvOrPair) \
|
||||
V(RiscvXorPair) \
|
||||
V(RiscvShlPair) \
|
||||
V(RiscvShrPair) \
|
||||
V(RiscvSarPair) \
|
||||
V(RiscvWord32AtomicPairLoad) \
|
||||
V(RiscvWord32AtomicPairStore) \
|
||||
V(RiscvWord32AtomicPairAdd) \
|
||||
V(RiscvWord32AtomicPairSub) \
|
||||
V(RiscvWord32AtomicPairAnd) \
|
||||
V(RiscvWord32AtomicPairOr) \
|
||||
V(RiscvWord32AtomicPairXor) \
|
||||
V(RiscvWord32AtomicPairExchange) \
|
||||
V(RiscvWord32AtomicPairCompareExchange)
|
||||
#endif
|
||||
|
||||
#define TARGET_ARCH_OPCODE_LIST_COMMON(V) \
|
||||
V(RiscvAdd32) \
|
||||
V(RiscvSub32) \
|
||||
V(RiscvMul32) \
|
||||
V(RiscvMulOvf32) \
|
||||
V(RiscvMulHigh32) \
|
||||
V(RiscvMulHighU32) \
|
||||
V(RiscvDiv32) \
|
||||
V(RiscvDivU32) \
|
||||
V(RiscvMod32) \
|
||||
V(RiscvModU32) \
|
||||
V(RiscvAnd) \
|
||||
V(RiscvAnd32) \
|
||||
V(RiscvOr) \
|
||||
V(RiscvOr32) \
|
||||
V(RiscvNor) \
|
||||
V(RiscvNor32) \
|
||||
V(RiscvXor) \
|
||||
V(RiscvXor32) \
|
||||
V(RiscvClz32) \
|
||||
V(RiscvShl32) \
|
||||
V(RiscvShr32) \
|
||||
V(RiscvSar32) \
|
||||
V(RiscvCtz32) \
|
||||
V(RiscvPopcnt32) \
|
||||
V(RiscvRor32) \
|
||||
V(RiscvMov) \
|
||||
V(RiscvTst) \
|
||||
V(RiscvCmp) \
|
||||
V(RiscvCmpZero) \
|
||||
V(RiscvCmpS) \
|
||||
V(RiscvAddS) \
|
||||
V(RiscvSubS) \
|
||||
V(RiscvMulS) \
|
||||
V(RiscvDivS) \
|
||||
V(RiscvModS) \
|
||||
V(RiscvAbsS) \
|
||||
V(RiscvNegS) \
|
||||
V(RiscvSqrtS) \
|
||||
V(RiscvMaxS) \
|
||||
V(RiscvMinS) \
|
||||
V(RiscvCmpD) \
|
||||
V(RiscvAddD) \
|
||||
V(RiscvSubD) \
|
||||
V(RiscvMulD) \
|
||||
V(RiscvDivD) \
|
||||
V(RiscvModD) \
|
||||
V(RiscvAbsD) \
|
||||
V(RiscvNegD) \
|
||||
V(RiscvSqrtD) \
|
||||
V(RiscvMaxD) \
|
||||
V(RiscvMinD) \
|
||||
V(RiscvFloat32RoundDown) \
|
||||
V(RiscvFloat32RoundTruncate) \
|
||||
V(RiscvFloat32RoundUp) \
|
||||
V(RiscvFloat32RoundTiesEven) \
|
||||
V(RiscvCvtSD) \
|
||||
V(RiscvCvtDS) \
|
||||
V(RiscvTruncWD) \
|
||||
V(RiscvRoundWD) \
|
||||
V(RiscvFloorWD) \
|
||||
V(RiscvCeilWD) \
|
||||
V(RiscvTruncWS) \
|
||||
V(RiscvRoundWS) \
|
||||
V(RiscvFloorWS) \
|
||||
V(RiscvCeilWS) \
|
||||
V(RiscvTruncUwD) \
|
||||
V(RiscvTruncUwS) \
|
||||
V(RiscvCvtDW) \
|
||||
V(RiscvCvtSW) \
|
||||
V(RiscvCvtSUw) \
|
||||
V(RiscvCvtDUw) \
|
||||
V(RiscvLb) \
|
||||
V(RiscvLbu) \
|
||||
V(RiscvSb) \
|
||||
V(RiscvLh) \
|
||||
V(RiscvUlh) \
|
||||
V(RiscvLhu) \
|
||||
V(RiscvUlhu) \
|
||||
V(RiscvSh) \
|
||||
V(RiscvUsh) \
|
||||
V(RiscvUld) \
|
||||
V(RiscvLw) \
|
||||
V(RiscvUlw) \
|
||||
V(RiscvSw) \
|
||||
V(RiscvUsw) \
|
||||
V(RiscvLoadFloat) \
|
||||
V(RiscvULoadFloat) \
|
||||
V(RiscvStoreFloat) \
|
||||
V(RiscvUStoreFloat) \
|
||||
V(RiscvLoadDouble) \
|
||||
V(RiscvULoadDouble) \
|
||||
V(RiscvStoreDouble) \
|
||||
V(RiscvUStoreDouble) \
|
||||
V(RiscvBitcastInt32ToFloat32) \
|
||||
V(RiscvBitcastFloat32ToInt32) \
|
||||
V(RiscvFloat64ExtractLowWord32) \
|
||||
V(RiscvFloat64ExtractHighWord32) \
|
||||
V(RiscvFloat64InsertLowWord32) \
|
||||
V(RiscvFloat64InsertHighWord32) \
|
||||
V(RiscvFloat32Max) \
|
||||
V(RiscvFloat64Max) \
|
||||
V(RiscvFloat32Min) \
|
||||
V(RiscvFloat64Min) \
|
||||
V(RiscvFloat64SilenceNaN) \
|
||||
V(RiscvPush) \
|
||||
V(RiscvPeek) \
|
||||
V(RiscvByteSwap32) \
|
||||
V(RiscvStoreToStackSlot) \
|
||||
V(RiscvStackClaim) \
|
||||
V(RiscvSignExtendByte) \
|
||||
V(RiscvSignExtendShort) \
|
||||
V(RiscvSync) \
|
||||
V(RiscvAssertEqual) \
|
||||
V(RiscvS128Const) \
|
||||
V(RiscvS128Zero) \
|
||||
V(RiscvS128AllOnes) \
|
||||
V(RiscvI32x4Splat) \
|
||||
V(RiscvI32x4ExtractLane) \
|
||||
V(RiscvI32x4ReplaceLane) \
|
||||
V(RiscvI32x4Add) \
|
||||
V(RiscvI32x4Sub) \
|
||||
V(RiscvF64x2Abs) \
|
||||
V(RiscvF64x2Neg) \
|
||||
V(RiscvF32x4Splat) \
|
||||
V(RiscvF32x4ExtractLane) \
|
||||
V(RiscvF32x4ReplaceLane) \
|
||||
V(RiscvF32x4SConvertI32x4) \
|
||||
V(RiscvF32x4UConvertI32x4) \
|
||||
V(RiscvI64x2SConvertI32x4Low) \
|
||||
V(RiscvI64x2SConvertI32x4High) \
|
||||
V(RiscvI64x2UConvertI32x4Low) \
|
||||
V(RiscvI64x2UConvertI32x4High) \
|
||||
V(RiscvI32x4Mul) \
|
||||
V(RiscvI32x4MaxS) \
|
||||
V(RiscvI32x4MinS) \
|
||||
V(RiscvI32x4Eq) \
|
||||
V(RiscvI32x4Ne) \
|
||||
V(RiscvI32x4Shl) \
|
||||
V(RiscvI32x4ShrS) \
|
||||
V(RiscvI32x4ShrU) \
|
||||
V(RiscvI32x4MaxU) \
|
||||
V(RiscvI32x4MinU) \
|
||||
V(RiscvI64x2GtS) \
|
||||
V(RiscvI64x2GeS) \
|
||||
V(RiscvI64x2Eq) \
|
||||
V(RiscvI64x2Ne) \
|
||||
V(RiscvF64x2Sqrt) \
|
||||
V(RiscvF64x2Add) \
|
||||
V(RiscvF64x2Sub) \
|
||||
V(RiscvF64x2Mul) \
|
||||
V(RiscvF64x2Div) \
|
||||
V(RiscvF64x2Min) \
|
||||
V(RiscvF64x2Max) \
|
||||
V(RiscvF64x2ConvertLowI32x4S) \
|
||||
V(RiscvF64x2ConvertLowI32x4U) \
|
||||
V(RiscvF64x2PromoteLowF32x4) \
|
||||
V(RiscvF64x2Eq) \
|
||||
V(RiscvF64x2Ne) \
|
||||
V(RiscvF64x2Lt) \
|
||||
V(RiscvF64x2Le) \
|
||||
V(RiscvF64x2Splat) \
|
||||
V(RiscvF64x2ExtractLane) \
|
||||
V(RiscvF64x2ReplaceLane) \
|
||||
V(RiscvF64x2Pmin) \
|
||||
V(RiscvF64x2Pmax) \
|
||||
V(RiscvF64x2Ceil) \
|
||||
V(RiscvF64x2Floor) \
|
||||
V(RiscvF64x2Trunc) \
|
||||
V(RiscvF64x2NearestInt) \
|
||||
V(RiscvI64x2Splat) \
|
||||
V(RiscvI64x2ExtractLane) \
|
||||
V(RiscvI64x2ReplaceLane) \
|
||||
V(RiscvI64x2Add) \
|
||||
V(RiscvI64x2Sub) \
|
||||
V(RiscvI64x2Mul) \
|
||||
V(RiscvI64x2Abs) \
|
||||
V(RiscvI64x2Neg) \
|
||||
V(RiscvI64x2Shl) \
|
||||
V(RiscvI64x2ShrS) \
|
||||
V(RiscvI64x2ShrU) \
|
||||
V(RiscvI64x2BitMask) \
|
||||
V(RiscvF32x4Abs) \
|
||||
V(RiscvF32x4Neg) \
|
||||
V(RiscvF32x4Sqrt) \
|
||||
V(RiscvF32x4Qfma) \
|
||||
V(RiscvF32x4Qfms) \
|
||||
V(RiscvF64x2Qfma) \
|
||||
V(RiscvF64x2Qfms) \
|
||||
V(RiscvF32x4Add) \
|
||||
V(RiscvF32x4Sub) \
|
||||
V(RiscvF32x4Mul) \
|
||||
V(RiscvF32x4Div) \
|
||||
V(RiscvF32x4Max) \
|
||||
V(RiscvF32x4Min) \
|
||||
V(RiscvF32x4Eq) \
|
||||
V(RiscvF32x4Ne) \
|
||||
V(RiscvF32x4Lt) \
|
||||
V(RiscvF32x4Le) \
|
||||
V(RiscvF32x4Pmin) \
|
||||
V(RiscvF32x4Pmax) \
|
||||
V(RiscvF32x4DemoteF64x2Zero) \
|
||||
V(RiscvF32x4Ceil) \
|
||||
V(RiscvF32x4Floor) \
|
||||
V(RiscvF32x4Trunc) \
|
||||
V(RiscvF32x4NearestInt) \
|
||||
V(RiscvI32x4SConvertF32x4) \
|
||||
V(RiscvI32x4UConvertF32x4) \
|
||||
V(RiscvI32x4Neg) \
|
||||
V(RiscvI32x4GtS) \
|
||||
V(RiscvI32x4GeS) \
|
||||
V(RiscvI32x4GtU) \
|
||||
V(RiscvI32x4GeU) \
|
||||
V(RiscvI32x4Abs) \
|
||||
V(RiscvI32x4BitMask) \
|
||||
V(RiscvI32x4TruncSatF64x2SZero) \
|
||||
V(RiscvI32x4TruncSatF64x2UZero) \
|
||||
V(RiscvI16x8Splat) \
|
||||
V(RiscvI16x8ExtractLaneU) \
|
||||
V(RiscvI16x8ExtractLaneS) \
|
||||
V(RiscvI16x8ReplaceLane) \
|
||||
V(RiscvI16x8Neg) \
|
||||
V(RiscvI16x8Shl) \
|
||||
V(RiscvI16x8ShrS) \
|
||||
V(RiscvI16x8ShrU) \
|
||||
V(RiscvI16x8Add) \
|
||||
V(RiscvI16x8AddSatS) \
|
||||
V(RiscvI16x8Sub) \
|
||||
V(RiscvI16x8SubSatS) \
|
||||
V(RiscvI16x8Mul) \
|
||||
V(RiscvI16x8MaxS) \
|
||||
V(RiscvI16x8MinS) \
|
||||
V(RiscvI16x8Eq) \
|
||||
V(RiscvI16x8Ne) \
|
||||
V(RiscvI16x8GtS) \
|
||||
V(RiscvI16x8GeS) \
|
||||
V(RiscvI16x8AddSatU) \
|
||||
V(RiscvI16x8SubSatU) \
|
||||
V(RiscvI16x8MaxU) \
|
||||
V(RiscvI16x8MinU) \
|
||||
V(RiscvI16x8GtU) \
|
||||
V(RiscvI16x8GeU) \
|
||||
V(RiscvI16x8RoundingAverageU) \
|
||||
V(RiscvI16x8Q15MulRSatS) \
|
||||
V(RiscvI16x8Abs) \
|
||||
V(RiscvI16x8BitMask) \
|
||||
V(RiscvI8x16Splat) \
|
||||
V(RiscvI8x16ExtractLaneU) \
|
||||
V(RiscvI8x16ExtractLaneS) \
|
||||
V(RiscvI8x16ReplaceLane) \
|
||||
V(RiscvI8x16Neg) \
|
||||
V(RiscvI8x16Shl) \
|
||||
V(RiscvI8x16ShrS) \
|
||||
V(RiscvI8x16Add) \
|
||||
V(RiscvI8x16AddSatS) \
|
||||
V(RiscvI8x16Sub) \
|
||||
V(RiscvI8x16SubSatS) \
|
||||
V(RiscvI8x16MaxS) \
|
||||
V(RiscvI8x16MinS) \
|
||||
V(RiscvI8x16Eq) \
|
||||
V(RiscvI8x16Ne) \
|
||||
V(RiscvI8x16GtS) \
|
||||
V(RiscvI8x16GeS) \
|
||||
V(RiscvI8x16ShrU) \
|
||||
V(RiscvI8x16AddSatU) \
|
||||
V(RiscvI8x16SubSatU) \
|
||||
V(RiscvI8x16MaxU) \
|
||||
V(RiscvI8x16MinU) \
|
||||
V(RiscvI8x16GtU) \
|
||||
V(RiscvI8x16GeU) \
|
||||
V(RiscvI8x16RoundingAverageU) \
|
||||
V(RiscvI8x16Abs) \
|
||||
V(RiscvI8x16BitMask) \
|
||||
V(RiscvI8x16Popcnt) \
|
||||
V(RiscvS128And) \
|
||||
V(RiscvS128Or) \
|
||||
V(RiscvS128Xor) \
|
||||
V(RiscvS128Not) \
|
||||
V(RiscvS128Select) \
|
||||
V(RiscvS128AndNot) \
|
||||
V(RiscvS128Load64Zero) \
|
||||
V(RiscvS128Load32Zero) \
|
||||
V(RiscvI32x4AllTrue) \
|
||||
V(RiscvI16x8AllTrue) \
|
||||
V(RiscvV128AnyTrue) \
|
||||
V(RiscvI8x16AllTrue) \
|
||||
V(RiscvI64x2AllTrue) \
|
||||
V(RiscvS32x4InterleaveRight) \
|
||||
V(RiscvS32x4InterleaveLeft) \
|
||||
V(RiscvS32x4PackEven) \
|
||||
V(RiscvS32x4PackOdd) \
|
||||
V(RiscvS32x4InterleaveEven) \
|
||||
V(RiscvS32x4InterleaveOdd) \
|
||||
V(RiscvS32x4Shuffle) \
|
||||
V(RiscvS16x8InterleaveRight) \
|
||||
V(RiscvS16x8InterleaveLeft) \
|
||||
V(RiscvS16x8PackEven) \
|
||||
V(RiscvS16x8PackOdd) \
|
||||
V(RiscvS16x8InterleaveEven) \
|
||||
V(RiscvS16x8InterleaveOdd) \
|
||||
V(RiscvS16x4Reverse) \
|
||||
V(RiscvS16x2Reverse) \
|
||||
V(RiscvS8x16InterleaveRight) \
|
||||
V(RiscvS8x16InterleaveLeft) \
|
||||
V(RiscvS8x16PackEven) \
|
||||
V(RiscvS8x16PackOdd) \
|
||||
V(RiscvS8x16InterleaveEven) \
|
||||
V(RiscvS8x16InterleaveOdd) \
|
||||
V(RiscvI8x16Shuffle) \
|
||||
V(RiscvS8x16Concat) \
|
||||
V(RiscvS8x8Reverse) \
|
||||
V(RiscvS8x4Reverse) \
|
||||
V(RiscvS8x2Reverse) \
|
||||
V(RiscvS128LoadSplat) \
|
||||
V(RiscvS128Load64ExtendS) \
|
||||
V(RiscvS128Load64ExtendU) \
|
||||
V(RiscvS128LoadLane) \
|
||||
V(RiscvS128StoreLane) \
|
||||
V(RiscvRvvLd) \
|
||||
V(RiscvRvvSt) \
|
||||
V(RiscvI32x4SConvertI16x8Low) \
|
||||
V(RiscvI32x4SConvertI16x8High) \
|
||||
V(RiscvI32x4UConvertI16x8Low) \
|
||||
V(RiscvI32x4UConvertI16x8High) \
|
||||
V(RiscvI16x8SConvertI8x16Low) \
|
||||
V(RiscvI16x8SConvertI8x16High) \
|
||||
V(RiscvI16x8SConvertI32x4) \
|
||||
V(RiscvI16x8UConvertI32x4) \
|
||||
V(RiscvI16x8UConvertI8x16Low) \
|
||||
V(RiscvI16x8UConvertI8x16High) \
|
||||
V(RiscvI8x16SConvertI16x8) \
|
||||
V(RiscvI8x16UConvertI16x8) \
|
||||
V(RiscvVwmul) \
|
||||
V(RiscvVwmulu) \
|
||||
V(RiscvVmvSx) \
|
||||
V(RiscvVcompress) \
|
||||
V(RiscvVaddVv) \
|
||||
V(RiscvVwadd) \
|
||||
V(RiscvVwaddu) \
|
||||
V(RiscvVrgather) \
|
||||
V(RiscvVslidedown)
|
||||
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
TARGET_ARCH_OPCODE_LIST_COMMON(V) \
|
||||
TARGET_ARCH_OPCODE_LIST_SPECAIL(V)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
//
|
||||
// We use the following local notation for addressing modes:
|
||||
//
|
||||
// R = register
|
||||
// O = register or stack slot
|
||||
// D = double register
|
||||
// I = immediate (handle, external, int32)
|
||||
// MRI = [register + immediate]
|
||||
// MRR = [register + register]
|
||||
// Root = [kRootregister + immediate]
|
||||
// TODO(plind): Add the new r6 address modes.
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V) \
|
||||
V(MRI) /* [%r0 + K] */ \
|
||||
V(MRR) /* [%r0 + %r1] */ \
|
||||
V(Root) /* [root + k] */
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_BACKEND_RISCV_INSTRUCTION_CODES_RISCV_H_
|
@ -14,40 +14,16 @@ bool InstructionScheduler::SchedulerSupported() { return true; }
|
||||
int InstructionScheduler::GetTargetInstructionFlags(
|
||||
const Instruction* instr) const {
|
||||
switch (instr->arch_opcode()) {
|
||||
case kRiscvAbsD:
|
||||
case kRiscvAbsS:
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvAdd32:
|
||||
case kRiscvAddD:
|
||||
case kRiscvAddS:
|
||||
case kRiscvAnd:
|
||||
case kRiscvAnd32:
|
||||
case kRiscvAssertEqual:
|
||||
case kRiscvBitcastDL:
|
||||
case kRiscvBitcastLD:
|
||||
case kRiscvBitcastInt32ToFloat32:
|
||||
case kRiscvBitcastFloat32ToInt32:
|
||||
case kRiscvByteSwap32:
|
||||
case kRiscvByteSwap64:
|
||||
case kRiscvCeilWD:
|
||||
case kRiscvCeilWS:
|
||||
case kRiscvClz32:
|
||||
case kRiscvCmp:
|
||||
case kRiscvCmpZero:
|
||||
case kRiscvCmpD:
|
||||
case kRiscvCmpS:
|
||||
case kRiscvCtz32:
|
||||
case kRiscvCvtDL:
|
||||
case kRiscvCvtDS:
|
||||
case kRiscvCvtDUl:
|
||||
case kRiscvCvtDUw:
|
||||
case kRiscvCvtDW:
|
||||
case kRiscvCvtSD:
|
||||
case kRiscvCvtSL:
|
||||
case kRiscvCvtSUl:
|
||||
case kRiscvCvtSUw:
|
||||
case kRiscvCvtSW:
|
||||
case kRiscvMulHigh64:
|
||||
case kRiscvMulHighU32:
|
||||
case kRiscvAdd64:
|
||||
case kRiscvAddOvf64:
|
||||
case kRiscvClz64:
|
||||
@ -56,10 +32,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvDivU64:
|
||||
case kRiscvZeroExtendWord:
|
||||
case kRiscvSignExtendWord:
|
||||
case kRiscvDiv32:
|
||||
case kRiscvDivD:
|
||||
case kRiscvDivS:
|
||||
case kRiscvDivU32:
|
||||
case kRiscvMod64:
|
||||
case kRiscvModU64:
|
||||
case kRiscvMul64:
|
||||
@ -70,6 +42,59 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvShr64:
|
||||
case kRiscvSub64:
|
||||
case kRiscvSubOvf64:
|
||||
case kRiscvFloat64RoundDown:
|
||||
case kRiscvFloat64RoundTiesEven:
|
||||
case kRiscvFloat64RoundTruncate:
|
||||
case kRiscvFloat64RoundUp:
|
||||
case kRiscvSub32:
|
||||
case kRiscvTruncLD:
|
||||
case kRiscvTruncLS:
|
||||
case kRiscvTruncUlD:
|
||||
case kRiscvTruncUlS:
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
case kRiscvAdd32:
|
||||
case kRiscvAddPair:
|
||||
case kRiscvSubPair:
|
||||
case kRiscvMulPair:
|
||||
case kRiscvAndPair:
|
||||
case kRiscvOrPair:
|
||||
case kRiscvXorPair:
|
||||
case kRiscvShlPair:
|
||||
case kRiscvShrPair:
|
||||
case kRiscvSarPair:
|
||||
case kRiscvAddOvf:
|
||||
case kRiscvSubOvf:
|
||||
case kRiscvSub32:
|
||||
#endif
|
||||
case kRiscvAbsD:
|
||||
case kRiscvAbsS:
|
||||
case kRiscvAddD:
|
||||
case kRiscvAddS:
|
||||
case kRiscvAnd:
|
||||
case kRiscvAnd32:
|
||||
case kRiscvAssertEqual:
|
||||
case kRiscvBitcastInt32ToFloat32:
|
||||
case kRiscvBitcastFloat32ToInt32:
|
||||
case kRiscvByteSwap32:
|
||||
case kRiscvCeilWD:
|
||||
case kRiscvCeilWS:
|
||||
case kRiscvClz32:
|
||||
case kRiscvCmp:
|
||||
case kRiscvCmpZero:
|
||||
case kRiscvCmpD:
|
||||
case kRiscvCmpS:
|
||||
case kRiscvCtz32:
|
||||
case kRiscvCvtDS:
|
||||
case kRiscvCvtDUw:
|
||||
case kRiscvCvtDW:
|
||||
case kRiscvCvtSD:
|
||||
case kRiscvCvtSUw:
|
||||
case kRiscvCvtSW:
|
||||
case kRiscvMulHighU32:
|
||||
case kRiscvDiv32:
|
||||
case kRiscvDivD:
|
||||
case kRiscvDivS:
|
||||
case kRiscvDivU32:
|
||||
case kRiscvF64x2Abs:
|
||||
case kRiscvF64x2Neg:
|
||||
case kRiscvF64x2Sqrt:
|
||||
@ -152,10 +177,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvFloat64InsertHighWord32:
|
||||
case kRiscvFloat64Max:
|
||||
case kRiscvFloat64Min:
|
||||
case kRiscvFloat64RoundDown:
|
||||
case kRiscvFloat64RoundTiesEven:
|
||||
case kRiscvFloat64RoundTruncate:
|
||||
case kRiscvFloat64RoundUp:
|
||||
case kRiscvFloat64SilenceNaN:
|
||||
case kRiscvFloorWD:
|
||||
case kRiscvFloorWS:
|
||||
@ -338,13 +359,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvShr32:
|
||||
case kRiscvSqrtD:
|
||||
case kRiscvSqrtS:
|
||||
case kRiscvSub32:
|
||||
case kRiscvSubD:
|
||||
case kRiscvSubS:
|
||||
case kRiscvTruncLD:
|
||||
case kRiscvTruncLS:
|
||||
case kRiscvTruncUlD:
|
||||
case kRiscvTruncUlS:
|
||||
case kRiscvTruncUwD:
|
||||
case kRiscvTruncUwS:
|
||||
case kRiscvTruncWD:
|
||||
@ -353,16 +369,24 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvXor:
|
||||
case kRiscvXor32:
|
||||
return kNoOpcodeFlags;
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvLd:
|
||||
case kRiscvLwu:
|
||||
case kRiscvUlwu:
|
||||
case kRiscvWord64AtomicLoadUint64:
|
||||
case kRiscvLoadDecompressTaggedSigned:
|
||||
case kRiscvLoadDecompressTaggedPointer:
|
||||
case kRiscvLoadDecompressAnyTagged:
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
case kRiscvWord32AtomicPairLoad:
|
||||
#endif
|
||||
case kRiscvLb:
|
||||
case kRiscvLbu:
|
||||
case kRiscvLd:
|
||||
case kRiscvLoadDouble:
|
||||
case kRiscvLh:
|
||||
case kRiscvLhu:
|
||||
case kRiscvLw:
|
||||
case kRiscvLoadFloat:
|
||||
case kRiscvLwu:
|
||||
case kRiscvRvvLd:
|
||||
case kRiscvPeek:
|
||||
case kRiscvUld:
|
||||
@ -370,36 +394,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvUlh:
|
||||
case kRiscvUlhu:
|
||||
case kRiscvUlw:
|
||||
case kRiscvUlwu:
|
||||
case kRiscvULoadFloat:
|
||||
case kRiscvS128LoadSplat:
|
||||
case kRiscvS128Load64ExtendU:
|
||||
case kRiscvS128Load64ExtendS:
|
||||
case kRiscvS128LoadLane:
|
||||
case kRiscvWord64AtomicLoadUint64:
|
||||
case kRiscvLoadDecompressTaggedSigned:
|
||||
case kRiscvLoadDecompressTaggedPointer:
|
||||
case kRiscvLoadDecompressAnyTagged:
|
||||
return kIsLoadOperation;
|
||||
|
||||
case kRiscvModD:
|
||||
case kRiscvModS:
|
||||
case kRiscvRvvSt:
|
||||
case kRiscvPush:
|
||||
case kRiscvSb:
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvSd:
|
||||
case kRiscvStoreDouble:
|
||||
case kRiscvSh:
|
||||
case kRiscvStackClaim:
|
||||
case kRiscvStoreToStackSlot:
|
||||
case kRiscvSw:
|
||||
case kRiscvStoreFloat:
|
||||
case kRiscvUsd:
|
||||
case kRiscvUStoreDouble:
|
||||
case kRiscvUsh:
|
||||
case kRiscvUsw:
|
||||
case kRiscvUStoreFloat:
|
||||
case kRiscvSync:
|
||||
case kRiscvWord64AtomicStoreWord64:
|
||||
case kRiscvWord64AtomicAddUint64:
|
||||
case kRiscvWord64AtomicSubUint64:
|
||||
@ -409,6 +413,32 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kRiscvWord64AtomicExchangeUint64:
|
||||
case kRiscvWord64AtomicCompareExchangeUint64:
|
||||
case kRiscvStoreCompressTagged:
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
case kRiscvWord32AtomicPairStore:
|
||||
case kRiscvWord32AtomicPairAdd:
|
||||
case kRiscvWord32AtomicPairSub:
|
||||
case kRiscvWord32AtomicPairAnd:
|
||||
case kRiscvWord32AtomicPairOr:
|
||||
case kRiscvWord32AtomicPairXor:
|
||||
case kRiscvWord32AtomicPairExchange:
|
||||
case kRiscvWord32AtomicPairCompareExchange:
|
||||
#endif
|
||||
case kRiscvModD:
|
||||
case kRiscvModS:
|
||||
case kRiscvRvvSt:
|
||||
case kRiscvPush:
|
||||
case kRiscvSb:
|
||||
case kRiscvStoreDouble:
|
||||
case kRiscvSh:
|
||||
case kRiscvStackClaim:
|
||||
case kRiscvStoreToStackSlot:
|
||||
case kRiscvSw:
|
||||
case kRiscvStoreFloat:
|
||||
case kRiscvUStoreDouble:
|
||||
case kRiscvUsh:
|
||||
case kRiscvUsw:
|
||||
case kRiscvUStoreFloat:
|
||||
case kRiscvSync:
|
||||
case kRiscvS128StoreLane:
|
||||
return kHasSideEffect;
|
||||
|
||||
@ -934,12 +964,13 @@ int Popcnt32Latency() {
|
||||
1 + Mul32Latency() + 1;
|
||||
}
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
int Popcnt64Latency() {
|
||||
return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 +
|
||||
AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() +
|
||||
1 + Mul64Latency() + 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
int CompareFLatency() { return Latency::C_cond_S; }
|
||||
|
||||
int CompareF32Latency() { return CompareFLatency(); }
|
||||
@ -964,13 +995,14 @@ int NegdLatency() {
|
||||
Latency::MOVF_HIGH_DREG + 1 + XorLatency() + Latency::MOVT_DREG;
|
||||
}
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
int Float64RoundLatency() {
|
||||
// For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4.
|
||||
return Latency::MOVF_HIGH_DREG + 1 + Latency::BRANCH + Latency::MOV_D + 4 +
|
||||
Latency::MOVF_HIGH_DREG + Latency::BRANCH + Latency::CVT_D_L + 2 +
|
||||
Latency::MOVT_HIGH_FREG;
|
||||
}
|
||||
|
||||
#endif
|
||||
int Float32RoundLatency() {
|
||||
// For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4.
|
||||
return Latency::MOVF_FREG + 1 + Latency::BRANCH + Latency::MOV_S + 4 +
|
||||
@ -1169,6 +1201,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
case kIeee754Float64Tanh:
|
||||
return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
|
||||
CallCFunctionLatency() + MovFromFloatResultLatency();
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvAdd32:
|
||||
case kRiscvAdd64:
|
||||
return Add64Latency(instr->InputAt(1)->IsRegister());
|
||||
@ -1179,28 +1212,8 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return Sub64Latency(instr->InputAt(1)->IsRegister());
|
||||
case kRiscvSubOvf64:
|
||||
return SubOverflow64Latency();
|
||||
case kRiscvMul32:
|
||||
return Mul32Latency();
|
||||
case kRiscvMulOvf32:
|
||||
return MulOverflow32Latency();
|
||||
case kRiscvMulHigh32:
|
||||
return Mulh32Latency();
|
||||
case kRiscvMulHighU32:
|
||||
return Mulhu32Latency();
|
||||
case kRiscvMulHigh64:
|
||||
return Mulh64Latency();
|
||||
case kRiscvDiv32: {
|
||||
int latency = Div32Latency(instr->InputAt(1)->IsRegister());
|
||||
return latency + MovzLatency();
|
||||
}
|
||||
case kRiscvDivU32: {
|
||||
int latency = Divu32Latency(instr->InputAt(1)->IsRegister());
|
||||
return latency + MovzLatency();
|
||||
}
|
||||
case kRiscvMod32:
|
||||
return Mod32Latency();
|
||||
case kRiscvModU32:
|
||||
return Modu32Latency();
|
||||
case kRiscvMul64:
|
||||
return Mul64Latency();
|
||||
case kRiscvDiv64: {
|
||||
@ -1215,6 +1228,36 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return Mod64Latency();
|
||||
case kRiscvModU64:
|
||||
return Modu64Latency();
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
case kRiscvAdd32:
|
||||
return Add64Latency(instr->InputAt(1)->IsRegister());
|
||||
case kRiscvAddOvf:
|
||||
return AddOverflow64Latency();
|
||||
case kRiscvSub32:
|
||||
return Sub64Latency(instr->InputAt(1)->IsRegister());
|
||||
case kRiscvSubOvf:
|
||||
return SubOverflow64Latency();
|
||||
#endif
|
||||
case kRiscvMul32:
|
||||
return Mul32Latency();
|
||||
case kRiscvMulOvf32:
|
||||
return MulOverflow32Latency();
|
||||
case kRiscvMulHigh32:
|
||||
return Mulh32Latency();
|
||||
case kRiscvMulHighU32:
|
||||
return Mulhu32Latency();
|
||||
case kRiscvDiv32: {
|
||||
int latency = Div32Latency(instr->InputAt(1)->IsRegister());
|
||||
return latency + MovzLatency();
|
||||
}
|
||||
case kRiscvDivU32: {
|
||||
int latency = Divu32Latency(instr->InputAt(1)->IsRegister());
|
||||
return latency + MovzLatency();
|
||||
}
|
||||
case kRiscvMod32:
|
||||
return Mod32Latency();
|
||||
case kRiscvModU32:
|
||||
return Modu32Latency();
|
||||
case kRiscvAnd:
|
||||
return AndLatency(instr->InputAt(1)->IsRegister());
|
||||
case kRiscvAnd32: {
|
||||
@ -1260,28 +1303,36 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
}
|
||||
}
|
||||
case kRiscvClz32:
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvClz64:
|
||||
#endif
|
||||
return Clz64Latency();
|
||||
case kRiscvCtz32:
|
||||
return Ctz32Latency();
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvCtz64:
|
||||
return Ctz64Latency();
|
||||
case kRiscvPopcnt32:
|
||||
return Popcnt32Latency();
|
||||
case kRiscvPopcnt64:
|
||||
return Popcnt64Latency();
|
||||
#endif
|
||||
case kRiscvCtz32:
|
||||
return Ctz32Latency();
|
||||
case kRiscvPopcnt32:
|
||||
return Popcnt32Latency();
|
||||
case kRiscvShl32:
|
||||
return 1;
|
||||
case kRiscvShr32:
|
||||
case kRiscvSar32:
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvZeroExtendWord:
|
||||
#endif
|
||||
return 2;
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvSignExtendWord:
|
||||
case kRiscvShl64:
|
||||
case kRiscvShr64:
|
||||
case kRiscvSar64:
|
||||
case kRiscvRor32:
|
||||
case kRiscvRor64:
|
||||
#endif
|
||||
case kRiscvRor32:
|
||||
return 1;
|
||||
case kRiscvTst:
|
||||
return AndLatency(instr->InputAt(1)->IsRegister());
|
||||
@ -1333,11 +1384,13 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return Latency::MAX_D;
|
||||
case kRiscvMinD:
|
||||
return Latency::MIN_D;
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvFloat64RoundDown:
|
||||
case kRiscvFloat64RoundTruncate:
|
||||
case kRiscvFloat64RoundUp:
|
||||
case kRiscvFloat64RoundTiesEven:
|
||||
return Float64RoundLatency();
|
||||
#endif
|
||||
case kRiscvFloat32RoundDown:
|
||||
case kRiscvFloat32RoundTruncate:
|
||||
case kRiscvFloat32RoundUp:
|
||||
@ -1363,18 +1416,20 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return Latency::MOVT_FREG + Latency::CVT_S_W;
|
||||
case kRiscvCvtSUw:
|
||||
return 1 + Latency::MOVT_DREG + Latency::CVT_S_L;
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvCvtSL:
|
||||
return Latency::MOVT_DREG + Latency::CVT_S_L;
|
||||
case kRiscvCvtDL:
|
||||
return Latency::MOVT_DREG + Latency::CVT_D_L;
|
||||
case kRiscvCvtDUw:
|
||||
return 1 + Latency::MOVT_DREG + Latency::CVT_D_L;
|
||||
case kRiscvCvtDUl:
|
||||
return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG +
|
||||
2 * Latency::CVT_D_L + Latency::ADD_D;
|
||||
case kRiscvCvtSUl:
|
||||
return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG +
|
||||
2 * Latency::CVT_S_L + Latency::ADD_S;
|
||||
#endif
|
||||
case kRiscvCvtDUw:
|
||||
return 1 + Latency::MOVT_DREG + Latency::CVT_D_L;
|
||||
case kRiscvFloorWD:
|
||||
return Latency::FLOOR_W_D + Latency::MOVF_FREG;
|
||||
case kRiscvCeilWD:
|
||||
@ -1391,10 +1446,20 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return Latency::ROUND_W_S + Latency::MOVF_FREG;
|
||||
case kRiscvTruncWS:
|
||||
return Latency::TRUNC_W_S + Latency::MOVF_FREG + 2 + MovnLatency();
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvTruncLS:
|
||||
return TruncLSLatency(instr->OutputCount() > 1);
|
||||
case kRiscvTruncLD:
|
||||
return TruncLDLatency(instr->OutputCount() > 1);
|
||||
case kRiscvTruncUlS:
|
||||
return TruncUlSLatency();
|
||||
case kRiscvTruncUlD:
|
||||
return TruncUlDLatency();
|
||||
case kRiscvBitcastDL:
|
||||
return Latency::MOVF_HIGH_DREG;
|
||||
case kRiscvBitcastLD:
|
||||
return Latency::MOVT_DREG;
|
||||
#endif
|
||||
case kRiscvTruncUwD:
|
||||
// Estimated max.
|
||||
return CompareF64Latency() + 2 * Latency::BRANCH +
|
||||
@ -1406,14 +1471,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return CompareF32Latency() + 2 * Latency::BRANCH +
|
||||
2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() +
|
||||
Latency::MOVT_FREG + 2 * Latency::MOVF_FREG + 2 + MovzLatency();
|
||||
case kRiscvTruncUlS:
|
||||
return TruncUlSLatency();
|
||||
case kRiscvTruncUlD:
|
||||
return TruncUlDLatency();
|
||||
case kRiscvBitcastDL:
|
||||
return Latency::MOVF_HIGH_DREG;
|
||||
case kRiscvBitcastLD:
|
||||
return Latency::MOVT_DREG;
|
||||
case kRiscvFloat64ExtractLowWord32:
|
||||
return Latency::MOVF_FREG;
|
||||
case kRiscvFloat64InsertLowWord32:
|
||||
@ -1430,13 +1487,15 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
case kRiscvLb:
|
||||
case kRiscvLhu:
|
||||
case kRiscvLh:
|
||||
case kRiscvLwu:
|
||||
case kRiscvLw:
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvLd:
|
||||
case kRiscvSd:
|
||||
case kRiscvLwu:
|
||||
#endif
|
||||
case kRiscvSb:
|
||||
case kRiscvSh:
|
||||
case kRiscvSw:
|
||||
case kRiscvSd:
|
||||
return AlignedMemoryLatency();
|
||||
case kRiscvLoadFloat:
|
||||
return ULoadFloatLatency();
|
||||
@ -1449,12 +1508,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
case kRiscvUlhu:
|
||||
case kRiscvUlh:
|
||||
return UlhuLatency();
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
case kRiscvUlwu:
|
||||
return UlwuLatency();
|
||||
case kRiscvUlw:
|
||||
return UlwLatency();
|
||||
case kRiscvUld:
|
||||
return UldLatency();
|
||||
case kRiscvUsd:
|
||||
return UsdLatency();
|
||||
case kRiscvByteSwap64:
|
||||
return ByteSwapSignedLatency();
|
||||
#endif
|
||||
case kRiscvUlw:
|
||||
return UlwLatency();
|
||||
case kRiscvULoadFloat:
|
||||
return ULoadFloatLatency();
|
||||
case kRiscvULoadDouble:
|
||||
@ -1463,8 +1528,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
return UshLatency();
|
||||
case kRiscvUsw:
|
||||
return UswLatency();
|
||||
case kRiscvUsd:
|
||||
return UsdLatency();
|
||||
case kRiscvUStoreFloat:
|
||||
return UStoreFloatLatency();
|
||||
case kRiscvUStoreDouble:
|
||||
@ -1512,8 +1575,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
}
|
||||
return latency;
|
||||
}
|
||||
case kRiscvByteSwap64:
|
||||
return ByteSwapSignedLatency();
|
||||
case kRiscvByteSwap32:
|
||||
return ByteSwapSignedLatency();
|
||||
case kAtomicLoadInt8:
|
1239
src/compiler/backend/riscv/instruction-selector-riscv.h
Normal file
1239
src/compiler/backend/riscv/instruction-selector-riscv.h
Normal file
File diff suppressed because it is too large
Load Diff
1325
src/compiler/backend/riscv/instruction-selector-riscv32.cc
Normal file
1325
src/compiler/backend/riscv/instruction-selector-riscv32.cc
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,432 +0,0 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
|
||||
#define V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// RISC-V-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(RiscvAdd32) \
|
||||
V(RiscvAdd64) \
|
||||
V(RiscvAddOvf64) \
|
||||
V(RiscvSub32) \
|
||||
V(RiscvSub64) \
|
||||
V(RiscvSubOvf64) \
|
||||
V(RiscvMul32) \
|
||||
V(RiscvMulOvf32) \
|
||||
V(RiscvMulHigh32) \
|
||||
V(RiscvMulHigh64) \
|
||||
V(RiscvMulHighU32) \
|
||||
V(RiscvMul64) \
|
||||
V(RiscvDiv32) \
|
||||
V(RiscvDiv64) \
|
||||
V(RiscvDivU32) \
|
||||
V(RiscvDivU64) \
|
||||
V(RiscvMod32) \
|
||||
V(RiscvMod64) \
|
||||
V(RiscvModU32) \
|
||||
V(RiscvModU64) \
|
||||
V(RiscvAnd) \
|
||||
V(RiscvAnd32) \
|
||||
V(RiscvOr) \
|
||||
V(RiscvOr32) \
|
||||
V(RiscvNor) \
|
||||
V(RiscvNor32) \
|
||||
V(RiscvXor) \
|
||||
V(RiscvXor32) \
|
||||
V(RiscvClz32) \
|
||||
V(RiscvShl32) \
|
||||
V(RiscvShr32) \
|
||||
V(RiscvSar32) \
|
||||
V(RiscvZeroExtendWord) \
|
||||
V(RiscvSignExtendWord) \
|
||||
V(RiscvClz64) \
|
||||
V(RiscvCtz32) \
|
||||
V(RiscvCtz64) \
|
||||
V(RiscvPopcnt32) \
|
||||
V(RiscvPopcnt64) \
|
||||
V(RiscvShl64) \
|
||||
V(RiscvShr64) \
|
||||
V(RiscvSar64) \
|
||||
V(RiscvRor32) \
|
||||
V(RiscvRor64) \
|
||||
V(RiscvMov) \
|
||||
V(RiscvTst) \
|
||||
V(RiscvCmp) \
|
||||
V(RiscvCmpZero) \
|
||||
V(RiscvCmpS) \
|
||||
V(RiscvAddS) \
|
||||
V(RiscvSubS) \
|
||||
V(RiscvMulS) \
|
||||
V(RiscvDivS) \
|
||||
V(RiscvModS) \
|
||||
V(RiscvAbsS) \
|
||||
V(RiscvNegS) \
|
||||
V(RiscvSqrtS) \
|
||||
V(RiscvMaxS) \
|
||||
V(RiscvMinS) \
|
||||
V(RiscvCmpD) \
|
||||
V(RiscvAddD) \
|
||||
V(RiscvSubD) \
|
||||
V(RiscvMulD) \
|
||||
V(RiscvDivD) \
|
||||
V(RiscvModD) \
|
||||
V(RiscvAbsD) \
|
||||
V(RiscvNegD) \
|
||||
V(RiscvSqrtD) \
|
||||
V(RiscvMaxD) \
|
||||
V(RiscvMinD) \
|
||||
V(RiscvFloat64RoundDown) \
|
||||
V(RiscvFloat64RoundTruncate) \
|
||||
V(RiscvFloat64RoundUp) \
|
||||
V(RiscvFloat64RoundTiesEven) \
|
||||
V(RiscvFloat32RoundDown) \
|
||||
V(RiscvFloat32RoundTruncate) \
|
||||
V(RiscvFloat32RoundUp) \
|
||||
V(RiscvFloat32RoundTiesEven) \
|
||||
V(RiscvCvtSD) \
|
||||
V(RiscvCvtDS) \
|
||||
V(RiscvTruncWD) \
|
||||
V(RiscvRoundWD) \
|
||||
V(RiscvFloorWD) \
|
||||
V(RiscvCeilWD) \
|
||||
V(RiscvTruncWS) \
|
||||
V(RiscvRoundWS) \
|
||||
V(RiscvFloorWS) \
|
||||
V(RiscvCeilWS) \
|
||||
V(RiscvTruncLS) \
|
||||
V(RiscvTruncLD) \
|
||||
V(RiscvTruncUwD) \
|
||||
V(RiscvTruncUwS) \
|
||||
V(RiscvTruncUlS) \
|
||||
V(RiscvTruncUlD) \
|
||||
V(RiscvCvtDW) \
|
||||
V(RiscvCvtSL) \
|
||||
V(RiscvCvtSW) \
|
||||
V(RiscvCvtSUw) \
|
||||
V(RiscvCvtSUl) \
|
||||
V(RiscvCvtDL) \
|
||||
V(RiscvCvtDUw) \
|
||||
V(RiscvCvtDUl) \
|
||||
V(RiscvLb) \
|
||||
V(RiscvLbu) \
|
||||
V(RiscvSb) \
|
||||
V(RiscvLh) \
|
||||
V(RiscvUlh) \
|
||||
V(RiscvLhu) \
|
||||
V(RiscvUlhu) \
|
||||
V(RiscvSh) \
|
||||
V(RiscvUsh) \
|
||||
V(RiscvLd) \
|
||||
V(RiscvUld) \
|
||||
V(RiscvLw) \
|
||||
V(RiscvUlw) \
|
||||
V(RiscvLwu) \
|
||||
V(RiscvUlwu) \
|
||||
V(RiscvSw) \
|
||||
V(RiscvUsw) \
|
||||
V(RiscvSd) \
|
||||
V(RiscvUsd) \
|
||||
V(RiscvLoadFloat) \
|
||||
V(RiscvULoadFloat) \
|
||||
V(RiscvStoreFloat) \
|
||||
V(RiscvUStoreFloat) \
|
||||
V(RiscvLoadDouble) \
|
||||
V(RiscvULoadDouble) \
|
||||
V(RiscvStoreDouble) \
|
||||
V(RiscvUStoreDouble) \
|
||||
V(RiscvBitcastDL) \
|
||||
V(RiscvBitcastLD) \
|
||||
V(RiscvBitcastInt32ToFloat32) \
|
||||
V(RiscvBitcastFloat32ToInt32) \
|
||||
V(RiscvFloat64ExtractLowWord32) \
|
||||
V(RiscvFloat64ExtractHighWord32) \
|
||||
V(RiscvFloat64InsertLowWord32) \
|
||||
V(RiscvFloat64InsertHighWord32) \
|
||||
V(RiscvFloat32Max) \
|
||||
V(RiscvFloat64Max) \
|
||||
V(RiscvFloat32Min) \
|
||||
V(RiscvFloat64Min) \
|
||||
V(RiscvFloat64SilenceNaN) \
|
||||
V(RiscvPush) \
|
||||
V(RiscvPeek) \
|
||||
V(RiscvByteSwap64) \
|
||||
V(RiscvByteSwap32) \
|
||||
V(RiscvStoreToStackSlot) \
|
||||
V(RiscvStackClaim) \
|
||||
V(RiscvSignExtendByte) \
|
||||
V(RiscvSignExtendShort) \
|
||||
V(RiscvSync) \
|
||||
V(RiscvAssertEqual) \
|
||||
V(RiscvS128Const) \
|
||||
V(RiscvS128Zero) \
|
||||
V(RiscvS128AllOnes) \
|
||||
V(RiscvI32x4Splat) \
|
||||
V(RiscvI32x4ExtractLane) \
|
||||
V(RiscvI32x4ReplaceLane) \
|
||||
V(RiscvI32x4Add) \
|
||||
V(RiscvI32x4Sub) \
|
||||
V(RiscvF64x2Abs) \
|
||||
V(RiscvF64x2Neg) \
|
||||
V(RiscvF32x4Splat) \
|
||||
V(RiscvF32x4ExtractLane) \
|
||||
V(RiscvF32x4ReplaceLane) \
|
||||
V(RiscvF32x4SConvertI32x4) \
|
||||
V(RiscvF32x4UConvertI32x4) \
|
||||
V(RiscvI64x2SConvertI32x4Low) \
|
||||
V(RiscvI64x2SConvertI32x4High) \
|
||||
V(RiscvI64x2UConvertI32x4Low) \
|
||||
V(RiscvI64x2UConvertI32x4High) \
|
||||
V(RiscvI32x4Mul) \
|
||||
V(RiscvI32x4MaxS) \
|
||||
V(RiscvI32x4MinS) \
|
||||
V(RiscvI32x4Eq) \
|
||||
V(RiscvI32x4Ne) \
|
||||
V(RiscvI32x4Shl) \
|
||||
V(RiscvI32x4ShrS) \
|
||||
V(RiscvI32x4ShrU) \
|
||||
V(RiscvI32x4MaxU) \
|
||||
V(RiscvI32x4MinU) \
|
||||
V(RiscvI64x2GtS) \
|
||||
V(RiscvI64x2GeS) \
|
||||
V(RiscvI64x2Eq) \
|
||||
V(RiscvI64x2Ne) \
|
||||
V(RiscvF64x2Sqrt) \
|
||||
V(RiscvF64x2Add) \
|
||||
V(RiscvF64x2Sub) \
|
||||
V(RiscvF64x2Mul) \
|
||||
V(RiscvF64x2Div) \
|
||||
V(RiscvF64x2Min) \
|
||||
V(RiscvF64x2Max) \
|
||||
V(RiscvF64x2ConvertLowI32x4S) \
|
||||
V(RiscvF64x2ConvertLowI32x4U) \
|
||||
V(RiscvF64x2PromoteLowF32x4) \
|
||||
V(RiscvF64x2Eq) \
|
||||
V(RiscvF64x2Ne) \
|
||||
V(RiscvF64x2Lt) \
|
||||
V(RiscvF64x2Le) \
|
||||
V(RiscvF64x2Splat) \
|
||||
V(RiscvF64x2ExtractLane) \
|
||||
V(RiscvF64x2ReplaceLane) \
|
||||
V(RiscvF64x2Pmin) \
|
||||
V(RiscvF64x2Pmax) \
|
||||
V(RiscvF64x2Ceil) \
|
||||
V(RiscvF64x2Floor) \
|
||||
V(RiscvF64x2Trunc) \
|
||||
V(RiscvF64x2NearestInt) \
|
||||
V(RiscvI64x2Splat) \
|
||||
V(RiscvI64x2ExtractLane) \
|
||||
V(RiscvI64x2ReplaceLane) \
|
||||
V(RiscvI64x2Add) \
|
||||
V(RiscvI64x2Sub) \
|
||||
V(RiscvI64x2Mul) \
|
||||
V(RiscvI64x2Abs) \
|
||||
V(RiscvI64x2Neg) \
|
||||
V(RiscvI64x2Shl) \
|
||||
V(RiscvI64x2ShrS) \
|
||||
V(RiscvI64x2ShrU) \
|
||||
V(RiscvI64x2BitMask) \
|
||||
V(RiscvF32x4Abs) \
|
||||
V(RiscvF32x4Neg) \
|
||||
V(RiscvF32x4Sqrt) \
|
||||
V(RiscvF32x4Qfma) \
|
||||
V(RiscvF32x4Qfms) \
|
||||
V(RiscvF64x2Qfma) \
|
||||
V(RiscvF64x2Qfms) \
|
||||
V(RiscvF32x4Add) \
|
||||
V(RiscvF32x4Sub) \
|
||||
V(RiscvF32x4Mul) \
|
||||
V(RiscvF32x4Div) \
|
||||
V(RiscvF32x4Max) \
|
||||
V(RiscvF32x4Min) \
|
||||
V(RiscvF32x4Eq) \
|
||||
V(RiscvF32x4Ne) \
|
||||
V(RiscvF32x4Lt) \
|
||||
V(RiscvF32x4Le) \
|
||||
V(RiscvF32x4Pmin) \
|
||||
V(RiscvF32x4Pmax) \
|
||||
V(RiscvF32x4DemoteF64x2Zero) \
|
||||
V(RiscvF32x4Ceil) \
|
||||
V(RiscvF32x4Floor) \
|
||||
V(RiscvF32x4Trunc) \
|
||||
V(RiscvF32x4NearestInt) \
|
||||
V(RiscvI32x4SConvertF32x4) \
|
||||
V(RiscvI32x4UConvertF32x4) \
|
||||
V(RiscvI32x4Neg) \
|
||||
V(RiscvI32x4GtS) \
|
||||
V(RiscvI32x4GeS) \
|
||||
V(RiscvI32x4GtU) \
|
||||
V(RiscvI32x4GeU) \
|
||||
V(RiscvI32x4Abs) \
|
||||
V(RiscvI32x4BitMask) \
|
||||
V(RiscvI32x4TruncSatF64x2SZero) \
|
||||
V(RiscvI32x4TruncSatF64x2UZero) \
|
||||
V(RiscvI16x8Splat) \
|
||||
V(RiscvI16x8ExtractLaneU) \
|
||||
V(RiscvI16x8ExtractLaneS) \
|
||||
V(RiscvI16x8ReplaceLane) \
|
||||
V(RiscvI16x8Neg) \
|
||||
V(RiscvI16x8Shl) \
|
||||
V(RiscvI16x8ShrS) \
|
||||
V(RiscvI16x8ShrU) \
|
||||
V(RiscvI16x8Add) \
|
||||
V(RiscvI16x8AddSatS) \
|
||||
V(RiscvI16x8Sub) \
|
||||
V(RiscvI16x8SubSatS) \
|
||||
V(RiscvI16x8Mul) \
|
||||
V(RiscvI16x8MaxS) \
|
||||
V(RiscvI16x8MinS) \
|
||||
V(RiscvI16x8Eq) \
|
||||
V(RiscvI16x8Ne) \
|
||||
V(RiscvI16x8GtS) \
|
||||
V(RiscvI16x8GeS) \
|
||||
V(RiscvI16x8AddSatU) \
|
||||
V(RiscvI16x8SubSatU) \
|
||||
V(RiscvI16x8MaxU) \
|
||||
V(RiscvI16x8MinU) \
|
||||
V(RiscvI16x8GtU) \
|
||||
V(RiscvI16x8GeU) \
|
||||
V(RiscvI16x8RoundingAverageU) \
|
||||
V(RiscvI16x8Q15MulRSatS) \
|
||||
V(RiscvI16x8Abs) \
|
||||
V(RiscvI16x8BitMask) \
|
||||
V(RiscvI8x16Splat) \
|
||||
V(RiscvI8x16ExtractLaneU) \
|
||||
V(RiscvI8x16ExtractLaneS) \
|
||||
V(RiscvI8x16ReplaceLane) \
|
||||
V(RiscvI8x16Neg) \
|
||||
V(RiscvI8x16Shl) \
|
||||
V(RiscvI8x16ShrS) \
|
||||
V(RiscvI8x16Add) \
|
||||
V(RiscvI8x16AddSatS) \
|
||||
V(RiscvI8x16Sub) \
|
||||
V(RiscvI8x16SubSatS) \
|
||||
V(RiscvI8x16MaxS) \
|
||||
V(RiscvI8x16MinS) \
|
||||
V(RiscvI8x16Eq) \
|
||||
V(RiscvI8x16Ne) \
|
||||
V(RiscvI8x16GtS) \
|
||||
V(RiscvI8x16GeS) \
|
||||
V(RiscvI8x16ShrU) \
|
||||
V(RiscvI8x16AddSatU) \
|
||||
V(RiscvI8x16SubSatU) \
|
||||
V(RiscvI8x16MaxU) \
|
||||
V(RiscvI8x16MinU) \
|
||||
V(RiscvI8x16GtU) \
|
||||
V(RiscvI8x16GeU) \
|
||||
V(RiscvI8x16RoundingAverageU) \
|
||||
V(RiscvI8x16Abs) \
|
||||
V(RiscvI8x16BitMask) \
|
||||
V(RiscvI8x16Popcnt) \
|
||||
V(RiscvS128And) \
|
||||
V(RiscvS128Or) \
|
||||
V(RiscvS128Xor) \
|
||||
V(RiscvS128Not) \
|
||||
V(RiscvS128Select) \
|
||||
V(RiscvS128AndNot) \
|
||||
V(RiscvS128Load64Zero) \
|
||||
V(RiscvS128Load32Zero) \
|
||||
V(RiscvI32x4AllTrue) \
|
||||
V(RiscvI16x8AllTrue) \
|
||||
V(RiscvV128AnyTrue) \
|
||||
V(RiscvI8x16AllTrue) \
|
||||
V(RiscvI64x2AllTrue) \
|
||||
V(RiscvS32x4InterleaveRight) \
|
||||
V(RiscvS32x4InterleaveLeft) \
|
||||
V(RiscvS32x4PackEven) \
|
||||
V(RiscvS32x4PackOdd) \
|
||||
V(RiscvS32x4InterleaveEven) \
|
||||
V(RiscvS32x4InterleaveOdd) \
|
||||
V(RiscvS32x4Shuffle) \
|
||||
V(RiscvS16x8InterleaveRight) \
|
||||
V(RiscvS16x8InterleaveLeft) \
|
||||
V(RiscvS16x8PackEven) \
|
||||
V(RiscvS16x8PackOdd) \
|
||||
V(RiscvS16x8InterleaveEven) \
|
||||
V(RiscvS16x8InterleaveOdd) \
|
||||
V(RiscvS16x4Reverse) \
|
||||
V(RiscvS16x2Reverse) \
|
||||
V(RiscvS8x16InterleaveRight) \
|
||||
V(RiscvS8x16InterleaveLeft) \
|
||||
V(RiscvS8x16PackEven) \
|
||||
V(RiscvS8x16PackOdd) \
|
||||
V(RiscvS8x16InterleaveEven) \
|
||||
V(RiscvS8x16InterleaveOdd) \
|
||||
V(RiscvI8x16Shuffle) \
|
||||
V(RiscvS8x16Concat) \
|
||||
V(RiscvS8x8Reverse) \
|
||||
V(RiscvS8x4Reverse) \
|
||||
V(RiscvS8x2Reverse) \
|
||||
V(RiscvS128LoadSplat) \
|
||||
V(RiscvS128Load64ExtendS) \
|
||||
V(RiscvS128Load64ExtendU) \
|
||||
V(RiscvS128LoadLane) \
|
||||
V(RiscvS128StoreLane) \
|
||||
V(RiscvRvvLd) \
|
||||
V(RiscvRvvSt) \
|
||||
V(RiscvI32x4SConvertI16x8Low) \
|
||||
V(RiscvI32x4SConvertI16x8High) \
|
||||
V(RiscvI32x4UConvertI16x8Low) \
|
||||
V(RiscvI32x4UConvertI16x8High) \
|
||||
V(RiscvI16x8SConvertI8x16Low) \
|
||||
V(RiscvI16x8SConvertI8x16High) \
|
||||
V(RiscvI16x8SConvertI32x4) \
|
||||
V(RiscvI16x8UConvertI32x4) \
|
||||
V(RiscvI16x8UConvertI8x16Low) \
|
||||
V(RiscvI16x8UConvertI8x16High) \
|
||||
V(RiscvI8x16SConvertI16x8) \
|
||||
V(RiscvI8x16UConvertI16x8) \
|
||||
V(RiscvVwmul) \
|
||||
V(RiscvVwmulu) \
|
||||
V(RiscvVmvSx) \
|
||||
V(RiscvVcompress) \
|
||||
V(RiscvVaddVv) \
|
||||
V(RiscvVwadd) \
|
||||
V(RiscvVwaddu) \
|
||||
V(RiscvVrgather) \
|
||||
V(RiscvVslidedown) \
|
||||
V(RiscvWord64AtomicLoadUint64) \
|
||||
V(RiscvWord64AtomicStoreWord64) \
|
||||
V(RiscvWord64AtomicAddUint64) \
|
||||
V(RiscvWord64AtomicSubUint64) \
|
||||
V(RiscvWord64AtomicAndUint64) \
|
||||
V(RiscvWord64AtomicOrUint64) \
|
||||
V(RiscvWord64AtomicXorUint64) \
|
||||
V(RiscvWord64AtomicExchangeUint64) \
|
||||
V(RiscvWord64AtomicCompareExchangeUint64) \
|
||||
V(RiscvStoreCompressTagged) \
|
||||
V(RiscvLoadDecompressTaggedSigned) \
|
||||
V(RiscvLoadDecompressTaggedPointer) \
|
||||
V(RiscvLoadDecompressAnyTagged)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
//
|
||||
// We use the following local notation for addressing modes:
|
||||
//
|
||||
// R = register
|
||||
// O = register or stack slot
|
||||
// D = double register
|
||||
// I = immediate (handle, external, int32)
|
||||
// MRI = [register + immediate]
|
||||
// MRR = [register + register]
|
||||
// Root = [kRootregister + immediate]
|
||||
// TODO(plind): Add the new r6 address modes.
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V) \
|
||||
V(MRI) /* [%r0 + K] */ \
|
||||
V(MRR) /* [%r0 + %r1] */ \
|
||||
V(Root) /* [root + k] */
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
|
@ -121,7 +121,7 @@ namespace {
|
||||
#define CALLEE_SAVE_REGISTERS r6, r7, r8, r9, r10, ip, r13
|
||||
#define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15
|
||||
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
// ===========================================================================
|
||||
// == riscv64 =================================================================
|
||||
// ===========================================================================
|
||||
|
@ -112,7 +112,7 @@ class LinuxPerfJitLogger : public CodeEventLogger {
|
||||
return kElfMachS390x;
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
return kElfMachPPC64;
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
return kElfMachRISCV;
|
||||
#else
|
||||
UNIMPLEMENTED();
|
||||
|
@ -27,13 +27,11 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/strings.h"
|
||||
#include "src/base/vector.h"
|
||||
#include "src/codegen/constants-arch.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
#include "src/diagnostics/disasm.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -1708,10 +1706,10 @@ void Decoder::DecodeBType(Instruction* instr) {
|
||||
void Decoder::DecodeUType(Instruction* instr) {
|
||||
// U Type doesn't have additional mask
|
||||
switch (instr->BaseOpcodeFieldRaw()) {
|
||||
case RO_LUI:
|
||||
case LUI:
|
||||
Format(instr, "lui 'rd, 'imm20U");
|
||||
break;
|
||||
case RO_AUIPC:
|
||||
case AUIPC:
|
||||
Format(instr, "auipc 'rd, 'imm20U");
|
||||
break;
|
||||
default:
|
||||
@ -1722,7 +1720,7 @@ void Decoder::DecodeUType(Instruction* instr) {
|
||||
void Decoder::DecodeJType(Instruction* instr) {
|
||||
// J Type doesn't have additional mask
|
||||
switch (instr->BaseOpcodeValue()) {
|
||||
case RO_JAL:
|
||||
case JAL:
|
||||
if (instr->RdValue() == zero_reg.code())
|
||||
Format(instr, "j 'imm20J");
|
||||
else if (instr->RdValue() == ra.code())
|
||||
@ -1774,12 +1772,14 @@ void Decoder::DecodeCAType(Instruction* instr) {
|
||||
case RO_C_AND:
|
||||
Format(instr, "and 'Crs1s, 'Crs1s, 'Crs2s");
|
||||
break;
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
case RO_C_SUBW:
|
||||
Format(instr, "subw 'Crs1s, 'Crs1s, 'Crs2s");
|
||||
break;
|
||||
case RO_C_ADDW:
|
||||
Format(instr, "addw 'Crs1s, 'Crs1s, 'Crs2s");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
UNSUPPORTED_RISCV();
|
||||
}
|
||||
@ -1793,9 +1793,11 @@ void Decoder::DecodeCIType(Instruction* instr) {
|
||||
else
|
||||
Format(instr, "addi 'Crd, 'Crd, 'Cimm6");
|
||||
break;
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
case RO_C_ADDIW:
|
||||
Format(instr, "addiw 'Crd, 'Crd, 'Cimm6");
|
||||
break;
|
||||
#endif
|
||||
case RO_C_LI:
|
||||
Format(instr, "li 'Crd, 'Cimm6");
|
||||
break;
|
||||
@ -1816,9 +1818,15 @@ void Decoder::DecodeCIType(Instruction* instr) {
|
||||
case RO_C_LWSP:
|
||||
Format(instr, "lw 'Crd, 'Cimm6Lwsp(sp)");
|
||||
break;
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
case RO_C_LDSP:
|
||||
Format(instr, "ld 'Crd, 'Cimm6Ldsp(sp)");
|
||||
break;
|
||||
#elif defined(V8_TARGET_ARCH_32_BIT)
|
||||
case RO_C_FLWSP:
|
||||
Format(instr, "flw 'Cfd, 'Cimm6Ldsp(sp)");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
UNSUPPORTED_RISCV();
|
||||
}
|
||||
@ -1839,9 +1847,15 @@ void Decoder::DecodeCSSType(Instruction* instr) {
|
||||
case RO_C_SWSP:
|
||||
Format(instr, "sw 'Crs2, 'Cimm6Swsp(sp)");
|
||||
break;
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
case RO_C_SDSP:
|
||||
Format(instr, "sd 'Crs2, 'Cimm6Sdsp(sp)");
|
||||
break;
|
||||
#elif defined(V8_TARGET_ARCH_32_BIT)
|
||||
case RO_C_FSWSP:
|
||||
Format(instr, "fsw 'Cfs2, 'Cimm6Sdsp(sp)");
|
||||
break;
|
||||
#endif
|
||||
case RO_C_FSDSP:
|
||||
Format(instr, "fsd 'Cfs2, 'Cimm6Sdsp(sp)");
|
||||
break;
|
||||
@ -1858,9 +1872,16 @@ void Decoder::DecodeCLType(Instruction* instr) {
|
||||
case RO_C_LW:
|
||||
Format(instr, "lw 'Crs2s, 'Cimm5W('Crs1s)");
|
||||
break;
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
case RO_C_LD:
|
||||
Format(instr, "ld 'Crs2s, 'Cimm5D('Crs1s)");
|
||||
break;
|
||||
#elif defined(V8_TARGET_ARCH_32_BIT)
|
||||
case RO_C_FLW:
|
||||
Format(instr, "fld 'Cfs2s, 'Cimm5D('Crs1s)");
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
UNSUPPORTED_RISCV();
|
||||
}
|
||||
@ -1874,9 +1895,15 @@ void Decoder::DecodeCSType(Instruction* instr) {
|
||||
case RO_C_SW:
|
||||
Format(instr, "sw 'Crs2s, 'Cimm5W('Crs1s)");
|
||||
break;
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
case RO_C_SD:
|
||||
Format(instr, "sd 'Crs2s, 'Cimm5D('Crs1s)");
|
||||
break;
|
||||
#elif defined(V8_TARGET_ARCH_32_BIT)
|
||||
case RO_C_FSW:
|
||||
Format(instr, "fsw 'Cfs2s, 'Cimm5D('Crs1s)");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
UNSUPPORTED_RISCV();
|
||||
}
|
||||
@ -2954,5 +2981,3 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
|
||||
#undef STRING_STARTS_WITH
|
||||
|
||||
} // namespace disasm
|
||||
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
@ -424,8 +424,8 @@ inline static int FrameSlotToFPOffset(int slot) {
|
||||
#include "src/execution/loong64/frame-constants-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/execution/s390/frame-constants-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/execution/riscv64/frame-constants-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/execution/riscv/frame-constants-riscv.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -184,7 +184,7 @@ class StackFrame {
|
||||
// invalid frame markers.
|
||||
#if (defined(USE_SIMULATOR) && \
|
||||
(V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)) || \
|
||||
V8_TARGET_ARCH_RISCV64
|
||||
(V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64)
|
||||
if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
|
||||
// Appease UBSan.
|
||||
return Type::NUMBER_OF_TYPES;
|
||||
|
@ -2,11 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
#include "src/execution/riscv/frame-constants-riscv.h"
|
||||
|
||||
#include "src/execution/riscv64/frame-constants-riscv64.h"
|
||||
|
||||
#include "src/codegen/riscv64/assembler-riscv64-inl.h"
|
||||
#include "src/execution/frame-constants.h"
|
||||
#include "src/execution/frames.h"
|
||||
|
||||
@ -28,5 +25,3 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_RISCV64
|
@ -2,8 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_EXECUTION_RISCV64_FRAME_CONSTANTS_RISCV64_H_
|
||||
#define V8_EXECUTION_RISCV64_FRAME_CONSTANTS_RISCV64_H_
|
||||
#ifndef V8_EXECUTION_RISCV_FRAME_CONSTANTS_RISCV_H_
|
||||
#define V8_EXECUTION_RISCV_FRAME_CONSTANTS_RISCV_H_
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/macros.h"
|
||||
@ -31,10 +31,16 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
|
||||
kNumberOfSavedGpParamRegs + kNumberOfSavedFpParamRegs;
|
||||
|
||||
// FP-relative.
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
// See Generate_WasmCompileLazy in builtins-riscv64.cc.
|
||||
// TODO(riscv): add rvv v reg save
|
||||
static constexpr int kWasmInstanceOffset =
|
||||
TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
// See Generate_WasmCompileLazy in builtins-riscv32.cc.
|
||||
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(
|
||||
kNumberOfSavedGpParamRegs + kNumberOfSavedFpParamRegs * 2);
|
||||
#endif
|
||||
static constexpr int kFixedFrameSizeFromFp =
|
||||
TypedFrameConstants::kFixedFrameSizeFromFp +
|
||||
kNumberOfSavedGpParamRegs * kSystemPointerSize +
|
||||
@ -80,4 +86,4 @@ class WasmDebugBreakFrameConstants : public TypedFrameConstants {
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_EXECUTION_RISCV64_FRAME_CONSTANTS_RISCV64_H_
|
||||
#endif // V8_EXECUTION_RISCV_FRAME_CONSTANTS_RISCV_H_
|
File diff suppressed because it is too large
Load Diff
@ -46,8 +46,8 @@
|
||||
// GeneratedCode wrapper, which will start execution in the Simulator or
|
||||
// forwards to the real entry on a RISC-V HW platform.
|
||||
|
||||
#ifndef V8_EXECUTION_RISCV64_SIMULATOR_RISCV64_H_
|
||||
#define V8_EXECUTION_RISCV64_SIMULATOR_RISCV64_H_
|
||||
#ifndef V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
|
||||
#define V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
|
||||
|
||||
// globals.h defines USE_SIMULATOR.
|
||||
#include "src/common/globals.h"
|
||||
@ -74,9 +74,10 @@ T Nabs(T a) {
|
||||
|
||||
#include "src/base/hashmap.h"
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/riscv64/constants-riscv64.h"
|
||||
#include "src/codegen/constants-arch.h"
|
||||
#include "src/execution/simulator-base.h"
|
||||
#include "src/utils/allocation.h"
|
||||
#include "src/utils/boxed-float.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -86,19 +87,27 @@ namespace internal {
|
||||
#ifdef V8_TARGET_ARCH_32_BIT
|
||||
using sreg_t = int32_t;
|
||||
using reg_t = uint32_t;
|
||||
#define xlen 32
|
||||
using freg_t = uint64_t;
|
||||
using sfreg_t = int64_t;
|
||||
#elif V8_TARGET_ARCH_64_BIT
|
||||
using sreg_t = int64_t;
|
||||
using reg_t = uint64_t;
|
||||
#define xlen 64
|
||||
using freg_t = uint64_t;
|
||||
using sfreg_t = int64_t;
|
||||
#else
|
||||
#error "Cannot detect Riscv's bitwidth"
|
||||
#endif
|
||||
|
||||
#define sext32(x) ((sreg_t)(int32_t)(x))
|
||||
#define zext32(x) ((reg_t)(uint32_t)(x))
|
||||
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
|
||||
#define zext_xlen(x) (((reg_t)(x) << (64 - xlen)) >> (64 - xlen))
|
||||
#elif V8_TARGET_ARCH_32_BIT
|
||||
#define sext_xlen(x) (((sreg_t)(x) << (32 - xlen)) >> (32 - xlen))
|
||||
#define zext_xlen(x) (((reg_t)(x) << (32 - xlen)) >> (32 - xlen))
|
||||
#endif
|
||||
|
||||
#define BIT(n) (0x1LL << n)
|
||||
#define QUIET_BIT_S(nan) (base::bit_cast<int32_t>(nan) & BIT(22))
|
||||
@ -108,6 +117,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
|
||||
#undef QUIET_BIT_S
|
||||
#undef QUIET_BIT_D
|
||||
|
||||
#ifdef V8_TARGET_ARCH_64_BIT
|
||||
inline uint64_t mulhu(uint64_t a, uint64_t b) {
|
||||
__uint128_t full_result = ((__uint128_t)a) * ((__uint128_t)b);
|
||||
return full_result >> 64;
|
||||
@ -122,6 +132,25 @@ inline int64_t mulhsu(int64_t a, uint64_t b) {
|
||||
__int128_t full_result = ((__int128_t)a) * ((__uint128_t)b);
|
||||
return full_result >> 64;
|
||||
}
|
||||
#elif V8_TARGET_ARCH_32_BIT
|
||||
inline uint32_t mulhu(uint32_t a, uint32_t b) {
|
||||
uint64_t full_result = ((uint64_t)a) * ((uint64_t)b);
|
||||
uint64_t upper_part = full_result >> 32;
|
||||
return (uint32_t)upper_part;
|
||||
}
|
||||
|
||||
inline int32_t mulh(int32_t a, int32_t b) {
|
||||
int64_t full_result = ((int64_t)a) * ((int64_t)b);
|
||||
int64_t upper_part = full_result >> 32;
|
||||
return (int32_t)upper_part;
|
||||
}
|
||||
|
||||
inline int32_t mulhsu(int32_t a, uint32_t b) {
|
||||
int64_t full_result = ((int64_t)a) * ((uint64_t)b);
|
||||
int64_t upper_part = full_result >> 32;
|
||||
return (int32_t)upper_part;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Floating point helpers
|
||||
#define F32_SIGN ((uint32_t)1 << 31)
|
||||
@ -139,6 +168,21 @@ inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
|
||||
F32_SIGN);
|
||||
return res.f;
|
||||
}
|
||||
|
||||
inline Float32 fsgnj32(Float32 rs1, Float32 rs2, bool n, bool x) {
|
||||
u32_f32 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
|
||||
u32_f32 res;
|
||||
if (x) { // RO_FSQNJX_S
|
||||
res.u = (a.u & ~F32_SIGN) | ((a.u ^ b.u) & F32_SIGN);
|
||||
} else {
|
||||
if (n) { // RO_FSGNJN_S
|
||||
res.u = (a.u & ~F32_SIGN) | ((F32_SIGN ^ b.u) & F32_SIGN);
|
||||
} else { // RO_FSGNJ_S
|
||||
res.u = (a.u & ~F32_SIGN) | ((0 ^ b.u) & F32_SIGN);
|
||||
}
|
||||
}
|
||||
return Float32::FromBits(res.u);
|
||||
}
|
||||
#define F64_SIGN ((uint64_t)1 << 63)
|
||||
union u64_f64 {
|
||||
uint64_t u;
|
||||
@ -155,11 +199,27 @@ inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
|
||||
return res.d;
|
||||
}
|
||||
|
||||
inline Float64 fsgnj64(Float64 rs1, Float64 rs2, bool n, bool x) {
|
||||
u64_f64 a = {.d = rs1.get_scalar()}, b = {.d = rs2.get_scalar()};
|
||||
u64_f64 res;
|
||||
if (x) { // RO_FSQNJX_D
|
||||
res.u = (a.u & ~F64_SIGN) | ((a.u ^ b.u) & F64_SIGN);
|
||||
} else {
|
||||
if (n) { // RO_FSGNJN_D
|
||||
res.u = (a.u & ~F64_SIGN) | ((F64_SIGN ^ b.u) & F64_SIGN);
|
||||
} else { // RO_FSGNJ_D
|
||||
res.u = (a.u & ~F64_SIGN) | ((0 ^ b.u) & F64_SIGN);
|
||||
}
|
||||
}
|
||||
return Float64::FromBits(res.u);
|
||||
}
|
||||
inline bool is_boxed_float(int64_t v) { return (uint32_t)((v >> 32) + 1) == 0; }
|
||||
inline int64_t box_float(float v) {
|
||||
return (0xFFFFFFFF00000000 | base::bit_cast<int32_t>(v));
|
||||
}
|
||||
|
||||
inline uint64_t box_float(uint32_t v) { return (0xFFFFFFFF00000000 | v); }
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Utility functions
|
||||
|
||||
@ -351,10 +411,9 @@ class Simulator : public SimulatorBase {
|
||||
// Accessors for register state. Reading the pc value adheres to the RISC-V
|
||||
// architecture specification and is off by a 8 from the currently executing
|
||||
// instruction.
|
||||
void set_register(int reg, int64_t value);
|
||||
void set_register(int reg, sreg_t value);
|
||||
void set_register_word(int reg, int32_t value);
|
||||
void set_dw_register(int dreg, const int* dbl);
|
||||
V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
|
||||
V8_EXPORT_PRIVATE sreg_t get_register(int reg) const;
|
||||
double get_double_from_register_pair(int reg);
|
||||
|
||||
// Same for FPURegisters.
|
||||
@ -362,20 +421,24 @@ class Simulator : public SimulatorBase {
|
||||
void set_fpu_register_word(int fpureg, int32_t value);
|
||||
void set_fpu_register_hi_word(int fpureg, int32_t value);
|
||||
void set_fpu_register_float(int fpureg, float value);
|
||||
void set_fpu_register_float(int fpureg, Float32 value);
|
||||
void set_fpu_register_double(int fpureg, double value);
|
||||
void set_fpu_register_double(int fpureg, Float64 value);
|
||||
|
||||
int64_t get_fpu_register(int fpureg) const;
|
||||
int32_t get_fpu_register_word(int fpureg) const;
|
||||
int32_t get_fpu_register_signed_word(int fpureg) const;
|
||||
int32_t get_fpu_register_hi_word(int fpureg) const;
|
||||
float get_fpu_register_float(int fpureg) const;
|
||||
Float32 get_fpu_register_Float32(int fpureg) const;
|
||||
double get_fpu_register_double(int fpureg) const;
|
||||
Float64 get_fpu_register_Float64(int fpureg) const;
|
||||
|
||||
// RV CSR manipulation
|
||||
uint32_t read_csr_value(uint32_t csr);
|
||||
void write_csr_value(uint32_t csr, uint64_t value);
|
||||
void set_csr_bits(uint32_t csr, uint64_t flags);
|
||||
void clear_csr_bits(uint32_t csr, uint64_t flags);
|
||||
void write_csr_value(uint32_t csr, reg_t value);
|
||||
void set_csr_bits(uint32_t csr, reg_t flags);
|
||||
void clear_csr_bits(uint32_t csr, reg_t flags);
|
||||
|
||||
void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
|
||||
void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
|
||||
@ -457,8 +520,8 @@ class Simulator : public SimulatorBase {
|
||||
bool CompareFHelper(T input1, T input2, FPUCondition cc);
|
||||
|
||||
// Special case of set_register and get_register to access the raw PC value.
|
||||
void set_pc(int64_t value);
|
||||
V8_EXPORT_PRIVATE int64_t get_pc() const;
|
||||
void set_pc(sreg_t value);
|
||||
V8_EXPORT_PRIVATE sreg_t get_pc() const;
|
||||
|
||||
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
|
||||
|
||||
@ -523,7 +586,9 @@ class Simulator : public SimulatorBase {
|
||||
BYTE,
|
||||
HALF,
|
||||
WORD,
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
DWORD,
|
||||
#endif
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
// FLOAT_DOUBLE,
|
||||
@ -532,11 +597,11 @@ class Simulator : public SimulatorBase {
|
||||
|
||||
// RISCV Memory read/write methods
|
||||
template <typename T>
|
||||
T ReadMem(int64_t addr, Instruction* instr);
|
||||
T ReadMem(sreg_t addr, Instruction* instr);
|
||||
template <typename T>
|
||||
void WriteMem(int64_t addr, T value, Instruction* instr);
|
||||
void WriteMem(sreg_t addr, T value, Instruction* instr);
|
||||
template <typename T, typename OP>
|
||||
T amo(int64_t addr, OP f, Instruction* instr, TraceType t) {
|
||||
T amo(sreg_t addr, OP f, Instruction* instr, TraceType t) {
|
||||
auto lhs = ReadMem<T>(addr, instr);
|
||||
// TODO(RISCV): trace memory read for AMO
|
||||
WriteMem<T>(addr, (T)f(lhs), instr);
|
||||
@ -546,41 +611,69 @@ class Simulator : public SimulatorBase {
|
||||
// Helper for debugging memory access.
|
||||
inline void DieOrDebug();
|
||||
|
||||
void TraceRegWr(int64_t value, TraceType t = DWORD);
|
||||
void TraceMemWr(int64_t addr, int64_t value, TraceType t);
|
||||
#if V8_TARGET_ARCH_RISCV32
|
||||
template <typename T>
|
||||
void TraceMemRd(int64_t addr, T value, int64_t reg_value);
|
||||
void TraceRegWr(T value, TraceType t = WORD);
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
void TraceRegWr(sreg_t value, TraceType t = DWORD);
|
||||
#endif
|
||||
void TraceMemWr(sreg_t addr, sreg_t value, TraceType t);
|
||||
template <typename T>
|
||||
void TraceMemWr(int64_t addr, T value);
|
||||
void TraceMemRd(sreg_t addr, T value, sreg_t reg_value);
|
||||
void TraceMemRdDouble(sreg_t addr, double value, int64_t reg_value);
|
||||
void TraceMemRdDouble(sreg_t addr, Float64 value, int64_t reg_value);
|
||||
void TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value);
|
||||
|
||||
template <typename T>
|
||||
void TraceMemWr(sreg_t addr, T value);
|
||||
void TraceMemWrDouble(sreg_t addr, double value);
|
||||
|
||||
SimInstruction instr_;
|
||||
|
||||
// RISCV utlity API to access register value
|
||||
inline int32_t rs1_reg() const { return instr_.Rs1Value(); }
|
||||
inline int64_t rs1() const { return get_register(rs1_reg()); }
|
||||
inline sreg_t rs1() const { return get_register(rs1_reg()); }
|
||||
inline float frs1() const { return get_fpu_register_float(rs1_reg()); }
|
||||
inline double drs1() const { return get_fpu_register_double(rs1_reg()); }
|
||||
inline Float32 frs1_boxed() const {
|
||||
return get_fpu_register_Float32(rs1_reg());
|
||||
}
|
||||
inline Float64 drs1_boxed() const {
|
||||
return get_fpu_register_Float64(rs1_reg());
|
||||
}
|
||||
inline int32_t rs2_reg() const { return instr_.Rs2Value(); }
|
||||
inline int64_t rs2() const { return get_register(rs2_reg()); }
|
||||
inline sreg_t rs2() const { return get_register(rs2_reg()); }
|
||||
inline float frs2() const { return get_fpu_register_float(rs2_reg()); }
|
||||
inline double drs2() const { return get_fpu_register_double(rs2_reg()); }
|
||||
inline Float32 frs2_boxed() const {
|
||||
return get_fpu_register_Float32(rs2_reg());
|
||||
}
|
||||
inline Float64 drs2_boxed() const {
|
||||
return get_fpu_register_Float64(rs2_reg());
|
||||
}
|
||||
inline int32_t rs3_reg() const { return instr_.Rs3Value(); }
|
||||
inline int64_t rs3() const { return get_register(rs3_reg()); }
|
||||
inline sreg_t rs3() const { return get_register(rs3_reg()); }
|
||||
inline float frs3() const { return get_fpu_register_float(rs3_reg()); }
|
||||
inline double drs3() const { return get_fpu_register_double(rs3_reg()); }
|
||||
inline Float32 frs3_boxed() const {
|
||||
return get_fpu_register_Float32(rs3_reg());
|
||||
}
|
||||
inline Float64 drs3_boxed() const {
|
||||
return get_fpu_register_Float64(rs3_reg());
|
||||
}
|
||||
inline int32_t rd_reg() const { return instr_.RdValue(); }
|
||||
inline int32_t frd_reg() const { return instr_.RdValue(); }
|
||||
inline int32_t rvc_rs1_reg() const { return instr_.RvcRs1Value(); }
|
||||
inline int64_t rvc_rs1() const { return get_register(rvc_rs1_reg()); }
|
||||
inline sreg_t rvc_rs1() const { return get_register(rvc_rs1_reg()); }
|
||||
inline int32_t rvc_rs2_reg() const { return instr_.RvcRs2Value(); }
|
||||
inline int64_t rvc_rs2() const { return get_register(rvc_rs2_reg()); }
|
||||
inline sreg_t rvc_rs2() const { return get_register(rvc_rs2_reg()); }
|
||||
inline double rvc_drs2() const {
|
||||
return get_fpu_register_double(rvc_rs2_reg());
|
||||
}
|
||||
inline int32_t rvc_rs1s_reg() const { return instr_.RvcRs1sValue(); }
|
||||
inline int64_t rvc_rs1s() const { return get_register(rvc_rs1s_reg()); }
|
||||
inline sreg_t rvc_rs1s() const { return get_register(rvc_rs1s_reg()); }
|
||||
inline int32_t rvc_rs2s_reg() const { return instr_.RvcRs2sValue(); }
|
||||
inline int64_t rvc_rs2s() const { return get_register(rvc_rs2s_reg()); }
|
||||
inline sreg_t rvc_rs2s() const { return get_register(rvc_rs2s_reg()); }
|
||||
inline double rvc_drs2s() const {
|
||||
return get_fpu_register_double(rvc_rs2s_reg());
|
||||
}
|
||||
@ -606,42 +699,87 @@ class Simulator : public SimulatorBase {
|
||||
inline int16_t rvc_imm5_d() const { return instr_.RvcImm5DValue(); }
|
||||
inline int16_t rvc_imm8_b() const { return instr_.RvcImm8BValue(); }
|
||||
|
||||
inline void set_rd(int64_t value, bool trace = true) {
|
||||
inline void set_rd(sreg_t value, bool trace = true) {
|
||||
set_register(rd_reg(), value);
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (trace) TraceRegWr(get_register(rd_reg()), DWORD);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
if (trace) TraceRegWr(get_register(rd_reg()), WORD);
|
||||
#endif
|
||||
}
|
||||
inline void set_frd(float value, bool trace = true) {
|
||||
set_fpu_register_float(rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
|
||||
}
|
||||
inline void set_frd(Float32 value, bool trace = true) {
|
||||
set_fpu_register_float(rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
|
||||
}
|
||||
inline void set_drd(double value, bool trace = true) {
|
||||
set_fpu_register_double(rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
|
||||
}
|
||||
inline void set_rvc_rd(int64_t value, bool trace = true) {
|
||||
inline void set_drd(Float64 value, bool trace = true) {
|
||||
set_fpu_register_double(rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
|
||||
}
|
||||
inline void set_rvc_rd(sreg_t value, bool trace = true) {
|
||||
set_register(rvc_rd_reg(), value);
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (trace) TraceRegWr(get_register(rvc_rd_reg()), DWORD);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
if (trace) TraceRegWr(get_register(rvc_rd_reg()), WORD);
|
||||
#endif
|
||||
}
|
||||
inline void set_rvc_rs1s(int64_t value, bool trace = true) {
|
||||
inline void set_rvc_rs1s(sreg_t value, bool trace = true) {
|
||||
set_register(rvc_rs1s_reg(), value);
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), DWORD);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), WORD);
|
||||
#endif
|
||||
}
|
||||
inline void set_rvc_rs2(int64_t value, bool trace = true) {
|
||||
inline void set_rvc_rs2(sreg_t value, bool trace = true) {
|
||||
set_register(rvc_rs2_reg(), value);
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (trace) TraceRegWr(get_register(rvc_rs2_reg()), DWORD);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
if (trace) TraceRegWr(get_register(rvc_rs2_reg()), WORD);
|
||||
#endif
|
||||
}
|
||||
inline void set_rvc_drd(double value, bool trace = true) {
|
||||
set_fpu_register_double(rvc_rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
|
||||
}
|
||||
inline void set_rvc_rs2s(int64_t value, bool trace = true) {
|
||||
inline void set_rvc_drd(Float64 value, bool trace = true) {
|
||||
set_fpu_register_double(rvc_rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
|
||||
}
|
||||
inline void set_rvc_frd(Float32 value, bool trace = true) {
|
||||
set_fpu_register_float(rvc_rd_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
|
||||
}
|
||||
inline void set_rvc_rs2s(sreg_t value, bool trace = true) {
|
||||
set_register(rvc_rs2s_reg(), value);
|
||||
#if V8_TARGET_ARCH_RISCV64
|
||||
if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), DWORD);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), WORD);
|
||||
#endif
|
||||
}
|
||||
inline void set_rvc_drs2s(double value, bool trace = true) {
|
||||
set_fpu_register_double(rvc_rs2s_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
|
||||
}
|
||||
inline void set_rvc_drs2s(Float64 value, bool trace = true) {
|
||||
set_fpu_register_double(rvc_rs2s_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
|
||||
}
|
||||
|
||||
inline void set_rvc_frs2s(Float32 value, bool trace = true) {
|
||||
set_fpu_register_float(rvc_rs2s_reg(), value);
|
||||
if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), FLOAT);
|
||||
}
|
||||
inline int16_t shamt6() const { return (imm12() & 0x3F); }
|
||||
inline int16_t shamt5() const { return (imm12() & 0x1F); }
|
||||
inline int16_t rvc_shamt6() const { return instr_.RvcShamt6(); }
|
||||
@ -896,15 +1034,15 @@ class Simulator : public SimulatorBase {
|
||||
void CheckBreakpoints();
|
||||
|
||||
// Stop helper functions.
|
||||
bool IsWatchpoint(uint64_t code);
|
||||
void PrintWatchpoint(uint64_t code);
|
||||
void HandleStop(uint64_t code);
|
||||
bool IsWatchpoint(reg_t code);
|
||||
void PrintWatchpoint(reg_t code);
|
||||
void HandleStop(reg_t code);
|
||||
bool IsStopInstruction(Instruction* instr);
|
||||
bool IsEnabledStop(uint64_t code);
|
||||
void EnableStop(uint64_t code);
|
||||
void DisableStop(uint64_t code);
|
||||
void IncreaseStopCounter(uint64_t code);
|
||||
void PrintStopInfo(uint64_t code);
|
||||
bool IsEnabledStop(reg_t code);
|
||||
void EnableStop(reg_t code);
|
||||
void DisableStop(reg_t code);
|
||||
void IncreaseStopCounter(reg_t code);
|
||||
void PrintStopInfo(reg_t code);
|
||||
|
||||
// Executes one instruction.
|
||||
void InstructionDecode(Instruction* instr);
|
||||
@ -938,9 +1076,9 @@ class Simulator : public SimulatorBase {
|
||||
|
||||
// Architecture state.
|
||||
// Registers.
|
||||
int64_t registers_[kNumSimuRegisters];
|
||||
sreg_t registers_[kNumSimuRegisters];
|
||||
// Coprocessor Registers.
|
||||
int64_t FPUregisters_[kNumFPURegisters];
|
||||
sfreg_t FPUregisters_[kNumFPURegisters];
|
||||
// Floating-point control and status register.
|
||||
uint32_t FCSR_;
|
||||
|
||||
@ -956,6 +1094,8 @@ class Simulator : public SimulatorBase {
|
||||
char* stack_;
|
||||
bool pc_modified_;
|
||||
int64_t icount_;
|
||||
sreg_t* watch_address_ = nullptr;
|
||||
sreg_t watch_value_ = 0;
|
||||
int break_count_;
|
||||
base::EmbeddedVector<char, 256> trace_buf_;
|
||||
|
||||
@ -1074,4 +1214,4 @@ class Simulator : public SimulatorBase {
|
||||
} // namespace v8
|
||||
|
||||
#endif // defined(USE_SIMULATOR)
|
||||
#endif // V8_EXECUTION_RISCV64_SIMULATOR_RISCV64_H_
|
||||
#endif // V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
|
@ -96,7 +96,8 @@ class SimulatorBase {
|
||||
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
|
||||
ConvertArg(T arg) {
|
||||
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
|
||||
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
|
||||
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 || \
|
||||
V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
// The MIPS64, LOONG64 and RISCV64 calling convention is to sign extend all
|
||||
// values, even unsigned ones.
|
||||
using signed_t = typename std::make_signed<T>::type;
|
||||
|
@ -28,8 +28,8 @@
|
||||
#include "src/execution/loong64/simulator-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/execution/s390/simulator-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/execution/riscv64/simulator-riscv64.h"
|
||||
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
|
||||
#include "src/execution/riscv/simulator-riscv.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -1511,7 +1511,7 @@ DEFINE_STRING(sim_arm64_optional_features, "none",
|
||||
"enable optional features on the simulator for testing: none or "
|
||||
"all")
|
||||
|
||||
#if defined(V8_TARGET_ARCH_RISCV64)
|
||||
#if defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_RISCV64)
|
||||
DEFINE_BOOL(riscv_trap_to_simulator_debugger, false,
|
||||
"enable simulator trap to debugger")
|
||||
DEFINE_BOOL(riscv_debug, false, "enable debug prints")
|
||||
|
@ -10,6 +10,7 @@
|
||||
//
|
||||
// Calling convention source:
|
||||
// https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf Table 18.2
|
||||
#ifdef V8_TARGET_ARCH_RISCV64
|
||||
asm(".global PushAllRegistersAndIterateStack \n"
|
||||
".type PushAllRegistersAndIterateStack, %function \n"
|
||||
".hidden PushAllRegistersAndIterateStack \n"
|
||||
@ -49,3 +50,44 @@ asm(".global PushAllRegistersAndIterateStack \n"
|
||||
" ld s0, 0(sp) \n"
|
||||
" addi sp, sp, 112 \n"
|
||||
" jr ra \n");
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
asm(".global PushAllRegistersAndIterateStack \n"
|
||||
".type PushAllRegistersAndIterateStack, %function \n"
|
||||
".hidden PushAllRegistersAndIterateStack \n"
|
||||
"PushAllRegistersAndIterateStack: \n"
|
||||
// Push all callee-saved registers and save return address.
|
||||
" addi sp, sp, -56 \n"
|
||||
// Save return address.
|
||||
" sw ra, 52(sp) \n"
|
||||
// sp is callee-saved.
|
||||
" sw sp, 48(sp) \n"
|
||||
// s0-s11 are callee-saved.
|
||||
" sw s11, 44(sp) \n"
|
||||
" sw s10, 40(sp) \n"
|
||||
" sw s9, 36(sp) \n"
|
||||
" sw s8, 32(sp) \n"
|
||||
" sw s7, 28(sp) \n"
|
||||
" sw s6, 24(sp) \n"
|
||||
" sw s5, 20(sp) \n"
|
||||
" sw s4, 16(sp) \n"
|
||||
" sw s3, 12(sp) \n"
|
||||
" sw s2, 8(sp) \n"
|
||||
" sw s1, 4(sp) \n"
|
||||
" sw s0, 0(sp) \n"
|
||||
// Maintain frame pointer(fp is s0).
|
||||
" mv s0, sp \n"
|
||||
// Pass 1st parameter (a0) unchanged (Stack*).
|
||||
// Pass 2nd parameter (a1) unchanged (StackVisitor*).
|
||||
// Save 3rd parameter (a2; IterateStackCallback) to a3.
|
||||
" mv a3, a2 \n"
|
||||
// Pass 3rd parameter as sp (stack pointer).
|
||||
" mv a2, sp \n"
|
||||
// Call the callback.
|
||||
" jalr a3 \n"
|
||||
// Load return address.
|
||||
" lw ra, 52(sp) \n"
|
||||
// Restore frame pointer.
|
||||
" lw s0, 0(sp) \n"
|
||||
" addi sp, sp, 56 \n"
|
||||
" jr ra \n");
|
||||
#endif
|
@ -1448,7 +1448,8 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
|
||||
|
||||
// static
|
||||
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || \
|
||||
V8_TARGET_ARCH_RISCV32
|
||||
return false;
|
||||
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
|
||||
|
@ -459,7 +459,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
|
||||
state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
|
||||
state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
|
||||
state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[14]);
|
||||
#elif V8_HOST_ARCH_RISCV64
|
||||
#elif V8_HOST_ARCH_RISCV64 || V8_HOST_ARCH_RISCV32
|
||||
// Spec CH.25 RISC-V Assembly Programmer’s Handbook
|
||||
state->pc = reinterpret_cast<void*>(mcontext.__gregs[REG_PC]);
|
||||
state->sp = reinterpret_cast<void*>(mcontext.__gregs[REG_SP]);
|
||||
|
@ -702,6 +702,8 @@ void LowLevelLogger::LogCodeInfo() {
|
||||
const char arch[] = "s390";
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
const char arch[] = "riscv64";
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
const char arch[] = "riscv32";
|
||||
#else
|
||||
const char arch[] = "unknown";
|
||||
#endif
|
||||
|
@ -372,10 +372,11 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
|
||||
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
|
||||
defined(V8_TARGET_ARCH_MIPS64)
|
||||
return RelocIterator(*this, kModeMask).done();
|
||||
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
|
||||
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \
|
||||
defined(V8_TARGET_ARCH_RISCV32)
|
||||
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
|
||||
// On these platforms we emit relative builtin-to-builtin
|
||||
// jumps for isolate independent builtins in the snapshot. They are later
|
||||
|
@ -740,6 +740,8 @@ class Code : public HeapObject {
|
||||
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 8 : 20;
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
static constexpr int kHeaderPaddingSize = (COMPRESS_POINTERS_BOOL ? 8 : 20);
|
||||
#elif V8_TARGET_ARCH_RISCV32
|
||||
static constexpr int kHeaderPaddingSize = 8;
|
||||
#else
|
||||
#error Unknown architecture.
|
||||
#endif
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user