[LOONG64] Add LoongArch64 backend
Bug: v8:12008 Change-Id: I2e1d918a1370dae1e15919fbf02d69cbe48f63bf Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3089095 Reviewed-by: Georg Neis <neis@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Hannes Payer <hpayer@chromium.org> Reviewed-by: Clemens Backes <clemensb@chromium.org> Commit-Queue: Jakob Gruber <jgruber@chromium.org> Cr-Commit-Position: refs/heads/master@{#76308}
This commit is contained in:
parent
c149551809
commit
816e9fa3b9
54
BUILD.gn
54
BUILD.gn
@ -286,7 +286,9 @@ declare_args() {
|
||||
cppgc_enable_object_names = false
|
||||
|
||||
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
|
||||
cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64"
|
||||
cppgc_enable_caged_heap =
|
||||
v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
|
||||
v8_current_cpu == "loong64"
|
||||
|
||||
# Enable verification of live bytes in the marking verifier.
|
||||
# TODO(v8:11785): Enable by default when running with the verifier.
|
||||
@ -506,7 +508,7 @@ assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
|
||||
"Write barriers can't be both enabled and disabled")
|
||||
|
||||
assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
|
||||
v8_current_cpu == "arm64",
|
||||
v8_current_cpu == "arm64" || v8_current_cpu == "loong64",
|
||||
"CppGC caged heap requires 64bit platforms")
|
||||
|
||||
assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
|
||||
@ -1062,6 +1064,15 @@ config("toolchain") {
|
||||
defines += [ "_MIPS_ARCH_MIPS64R2" ]
|
||||
}
|
||||
}
|
||||
|
||||
# loong64 simulators.
|
||||
if (target_is_simulator && v8_current_cpu == "loong64") {
|
||||
defines += [ "_LOONG64_TARGET_SIMULATOR" ]
|
||||
}
|
||||
if (v8_current_cpu == "loong64") {
|
||||
defines += [ "V8_TARGET_ARCH_LOONG64" ]
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
|
||||
defines += [ "V8_TARGET_ARCH_S390" ]
|
||||
cflags += [ "-ffp-contract=off" ]
|
||||
@ -2208,6 +2219,11 @@ v8_source_set("v8_initializers") {
|
||||
### gcmole(arch:mips64el) ###
|
||||
"src/builtins/mips64/builtins-mips64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "loong64") {
|
||||
sources += [
|
||||
### gcmole(arch:loong64) ###
|
||||
"src/builtins/loong64/builtins-loong64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc") {
|
||||
sources += [
|
||||
### gcmole(arch:ppc) ###
|
||||
@ -3421,6 +3437,21 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
|
||||
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "loong64") {
|
||||
sources += [ ### gcmole(arch:loong64) ###
|
||||
"src/baseline/loong64/baseline-assembler-loong64-inl.h",
|
||||
"src/baseline/loong64/baseline-compiler-loong64-inl.h",
|
||||
"src/codegen/loong64/assembler-loong64-inl.h",
|
||||
"src/codegen/loong64/assembler-loong64.h",
|
||||
"src/codegen/loong64/constants-loong64.h",
|
||||
"src/codegen/loong64/macro-assembler-loong64.h",
|
||||
"src/codegen/loong64/register-loong64.h",
|
||||
"src/compiler/backend/loong64/instruction-codes-loong64.h",
|
||||
"src/execution/loong64/frame-constants-loong64.h",
|
||||
"src/execution/loong64/simulator-loong64.h",
|
||||
"src/regexp/loong64/regexp-macro-assembler-loong64.h",
|
||||
"src/wasm/baseline/loong64/liftoff-assembler-loong64.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc") {
|
||||
sources += [ ### gcmole(arch:ppc) ###
|
||||
"src/codegen/ppc/assembler-ppc-inl.h",
|
||||
@ -4339,6 +4370,23 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/execution/mips64/simulator-mips64.cc",
|
||||
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "loong64") {
|
||||
sources += [ ### gcmole(arch:loong64) ###
|
||||
"src/codegen/loong64/assembler-loong64.cc",
|
||||
"src/codegen/loong64/constants-loong64.cc",
|
||||
"src/codegen/loong64/cpu-loong64.cc",
|
||||
"src/codegen/loong64/interface-descriptors-loong64-inl.h",
|
||||
"src/codegen/loong64/macro-assembler-loong64.cc",
|
||||
"src/compiler/backend/loong64/code-generator-loong64.cc",
|
||||
"src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
|
||||
"src/compiler/backend/loong64/instruction-selector-loong64.cc",
|
||||
"src/deoptimizer/loong64/deoptimizer-loong64.cc",
|
||||
"src/diagnostics/loong64/disasm-loong64.cc",
|
||||
"src/diagnostics/loong64/unwinder-loong64.cc",
|
||||
"src/execution/loong64/frame-constants-loong64.cc",
|
||||
"src/execution/loong64/simulator-loong64.cc",
|
||||
"src/regexp/loong64/regexp-macro-assembler-loong64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc") {
|
||||
sources += [ ### gcmole(arch:ppc) ###
|
||||
"src/codegen/ppc/assembler-ppc.cc",
|
||||
@ -5040,6 +5088,8 @@ v8_source_set("v8_cppgc_shared") {
|
||||
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "mips64el") {
|
||||
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "loong64") {
|
||||
sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "riscv64") {
|
||||
sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
|
||||
}
|
||||
|
3
LOONG_OWNERS
Normal file
3
LOONG_OWNERS
Normal file
@ -0,0 +1,3 @@
|
||||
liuyu@loongson.cn
|
||||
yuyin-hf@loongson.cn
|
||||
zhaojiazhong-hf@loongson.cn
|
1
OWNERS
1
OWNERS
@ -27,6 +27,7 @@ per-file codereview.settings=file:INFRA_OWNERS
|
||||
per-file AUTHORS=file:COMMON_OWNERS
|
||||
per-file WATCHLISTS=file:COMMON_OWNERS
|
||||
|
||||
per-file ...-loong64*=file:LOONG_OWNERS
|
||||
per-file ...-mips*=file:MIPS_OWNERS
|
||||
per-file ...-mips64*=file:MIPS_OWNERS
|
||||
per-file ...-ppc*=file:PPC_OWNERS
|
||||
|
@ -84,7 +84,7 @@ if (v8_snapshot_toolchain == "") {
|
||||
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
|
||||
_cpus = v8_current_cpu
|
||||
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
|
||||
v8_current_cpu == "riscv64") {
|
||||
v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
|
||||
if (is_win && v8_current_cpu == "arm64") {
|
||||
# set _cpus to blank for Windows ARM64 so host_toolchain could be
|
||||
# selected as snapshot toolchain later.
|
||||
|
@ -17,9 +17,10 @@ struct CalleeSavedRegisters {
|
||||
void* arm_r9;
|
||||
void* arm_r10;
|
||||
};
|
||||
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
|
||||
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390
|
||||
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
|
||||
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_LOONG64
|
||||
struct CalleeSavedRegisters {};
|
||||
#else
|
||||
#error Target architecture was not detected as supported by v8
|
||||
|
@ -33,6 +33,9 @@
|
||||
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
|
||||
#define V8_HOST_ARCH_MIPS 1
|
||||
#define V8_HOST_ARCH_32_BIT 1
|
||||
#elif defined(__loongarch64)
|
||||
#define V8_HOST_ARCH_LOONG64 1
|
||||
#define V8_HOST_ARCH_64_BIT 1
|
||||
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
|
||||
#define V8_HOST_ARCH_PPC64 1
|
||||
#define V8_HOST_ARCH_64_BIT 1
|
||||
@ -83,7 +86,7 @@
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
|
||||
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
|
||||
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
|
||||
!V8_TARGET_ARCH_RISCV64
|
||||
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
#define V8_TARGET_ARCH_X64 1
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
@ -128,6 +131,8 @@
|
||||
#define V8_TARGET_ARCH_32_BIT 1
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#define V8_TARGET_ARCH_64_BIT 1
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#define V8_TARGET_ARCH_64_BIT 1
|
||||
#elif V8_TARGET_ARCH_PPC
|
||||
#define V8_TARGET_ARCH_32_BIT 1
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
@ -171,6 +176,9 @@
|
||||
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
|
||||
#error Target architecture riscv64 is only supported on riscv64 and x64 host
|
||||
#endif
|
||||
#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
|
||||
#error Target architecture loong64 is only supported on loong64 and x64 host
|
||||
#endif
|
||||
|
||||
// Determine architecture endianness.
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
@ -181,6 +189,8 @@
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#define V8_TARGET_LITTLE_ENDIAN 1
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#if defined(__MIPSEB__)
|
||||
#define V8_TARGET_BIG_ENDIAN 1
|
||||
|
@ -341,6 +341,10 @@ void* OS::GetRandomMmapAddr() {
|
||||
// TODO(RISCV): We need more information from the kernel to correctly mask
|
||||
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
|
||||
raw_addr &= uint64_t{0xFFFFFF0000};
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
|
||||
// to fulfill request.
|
||||
raw_addr &= uint64_t{0xFFFFFF0000};
|
||||
#else
|
||||
raw_addr &= 0x3FFFF000;
|
||||
|
||||
@ -544,6 +548,8 @@ void OS::DebugBreak() {
|
||||
asm("break");
|
||||
#elif V8_HOST_ARCH_MIPS64
|
||||
asm("break");
|
||||
#elif V8_HOST_ARCH_LOONG64
|
||||
asm("break 0");
|
||||
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
|
||||
asm("twge 2,2");
|
||||
#elif V8_HOST_ARCH_IA32
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -48,6 +48,8 @@
|
||||
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
501
src/baseline/loong64/baseline-assembler-loong64-inl.h
Normal file
501
src/baseline/loong64/baseline-assembler-loong64-inl.h
Normal file
@ -0,0 +1,501 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
|
||||
#define V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
|
||||
|
||||
#include "src/baseline/baseline-assembler.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
#include "src/codegen/loong64/assembler-loong64-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace baseline {
|
||||
|
||||
class BaselineAssembler::ScratchRegisterScope {
|
||||
public:
|
||||
explicit ScratchRegisterScope(BaselineAssembler* assembler)
|
||||
: assembler_(assembler),
|
||||
prev_scope_(assembler->scratch_register_scope_),
|
||||
wrapped_scope_(assembler->masm()) {
|
||||
if (!assembler_->scratch_register_scope_) {
|
||||
// If we haven't opened a scratch scope yet, for the first one add a
|
||||
// couple of extra registers.
|
||||
wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
|
||||
}
|
||||
assembler_->scratch_register_scope_ = this;
|
||||
}
|
||||
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
|
||||
|
||||
Register AcquireScratch() { return wrapped_scope_.Acquire(); }
|
||||
|
||||
private:
|
||||
BaselineAssembler* assembler_;
|
||||
ScratchRegisterScope* prev_scope_;
|
||||
UseScratchRegisterScope wrapped_scope_;
|
||||
};
|
||||
|
||||
enum class Condition : uint32_t {
|
||||
kEqual = eq,
|
||||
kNotEqual = ne,
|
||||
|
||||
kLessThan = lt,
|
||||
kGreaterThan = gt,
|
||||
kLessThanEqual = le,
|
||||
kGreaterThanEqual = ge,
|
||||
|
||||
kUnsignedLessThan = Uless,
|
||||
kUnsignedGreaterThan = Ugreater,
|
||||
kUnsignedLessThanEqual = Uless_equal,
|
||||
kUnsignedGreaterThanEqual = Ugreater_equal,
|
||||
|
||||
kOverflow = overflow,
|
||||
kNoOverflow = no_overflow,
|
||||
|
||||
kZero = eq,
|
||||
kNotZero = ne,
|
||||
};
|
||||
|
||||
inline internal::Condition AsMasmCondition(Condition cond) {
|
||||
STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
|
||||
return static_cast<internal::Condition>(cond);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
#ifdef DEBUG
|
||||
inline bool Clobbers(Register target, MemOperand op) {
|
||||
return op.base() == target || op.index() == target;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
|
||||
#define __ masm_->
|
||||
|
||||
MemOperand BaselineAssembler::RegisterFrameOperand(
|
||||
interpreter::Register interpreter_register) {
|
||||
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
|
||||
|
||||
void BaselineAssembler::JumpTarget() {
|
||||
// NOP.
|
||||
}
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ Branch(target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance) {
|
||||
__ JumpIfRoot(value, index, target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance) {
|
||||
__ JumpIfNotRoot(value, index, target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
|
||||
Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireScratch();
|
||||
__ JumpIfSmi(value, target, temp);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
|
||||
Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireScratch();
|
||||
__ JumpIfNotSmi(value, target, temp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::CallBuiltin(Builtin builtin) {
|
||||
ASM_CODE_COMMENT_STRING(masm_,
|
||||
__ CommentForOffHeapTrampoline("call", builtin));
|
||||
Register temp = t7;
|
||||
__ LoadEntryFromBuiltin(builtin, temp);
|
||||
__ Call(temp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
ASM_CODE_COMMENT_STRING(masm_,
|
||||
__ CommentForOffHeapTrampoline("tail call", builtin));
|
||||
Register temp = t7;
|
||||
__ LoadEntryFromBuiltin(builtin, temp);
|
||||
__ Jump(temp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
|
||||
Label* target, Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ And(scratch, value, Operand(mask));
|
||||
__ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
|
||||
Label* target, Label::Distance) {
|
||||
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
|
||||
}
|
||||
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
|
||||
InstanceType instance_type,
|
||||
Register map, Label* target,
|
||||
Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register type = temps.AcquireScratch();
|
||||
__ GetObjectType(object, map, type);
|
||||
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
|
||||
}
|
||||
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
|
||||
InstanceType instance_type,
|
||||
Label* target, Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register type = temps.AcquireScratch();
|
||||
if (FLAG_debug_code) {
|
||||
__ AssertNotSmi(map);
|
||||
__ GetObjectType(map, type, type);
|
||||
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
|
||||
}
|
||||
__ Ld_d(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
||||
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
|
||||
Label* target, Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ li(scratch, Operand(smi));
|
||||
__ SmiUntag(scratch);
|
||||
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
|
||||
Label* target, Label::Distance) {
|
||||
__ AssertSmi(lhs);
|
||||
__ AssertSmi(rhs);
|
||||
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
|
||||
}
|
||||
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
||||
MemOperand operand, Label* target,
|
||||
Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ Ld_d(scratch, operand);
|
||||
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
|
||||
}
|
||||
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
||||
Register value, Label* target,
|
||||
Label::Distance) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ Ld_d(scratch, operand);
|
||||
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
|
||||
}
|
||||
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
|
||||
Label* target, Label::Distance) {
|
||||
__ Branch(target, AsMasmCondition(cc), value, Operand(byte));
|
||||
}
|
||||
void BaselineAssembler::Move(interpreter::Register output, Register source) {
|
||||
Move(RegisterFrameOperand(output), source);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, TaggedIndex value) {
|
||||
__ li(output, Operand(value.ptr()));
|
||||
}
|
||||
void BaselineAssembler::Move(MemOperand output, Register source) {
|
||||
__ St_d(source, output);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, ExternalReference reference) {
|
||||
__ li(output, Operand(reference));
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
|
||||
__ li(output, Operand(value));
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, int32_t value) {
|
||||
__ li(output, Operand(value));
|
||||
}
|
||||
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
|
||||
__ Move(output, source);
|
||||
}
|
||||
void BaselineAssembler::MoveSmi(Register output, Register source) {
|
||||
__ Move(output, source);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename Arg>
|
||||
inline Register ToRegister(BaselineAssembler* basm,
|
||||
BaselineAssembler::ScratchRegisterScope* scope,
|
||||
Arg arg) {
|
||||
Register reg = scope->AcquireScratch();
|
||||
basm->Move(reg, arg);
|
||||
return reg;
|
||||
}
|
||||
inline Register ToRegister(BaselineAssembler* basm,
|
||||
BaselineAssembler::ScratchRegisterScope* scope,
|
||||
Register reg) {
|
||||
return reg;
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
struct PushAllHelper;
|
||||
template <>
|
||||
struct PushAllHelper<> {
|
||||
static int Push(BaselineAssembler* basm) { return 0; }
|
||||
static int PushReverse(BaselineAssembler* basm) { return 0; }
|
||||
};
|
||||
// TODO(ishell): try to pack sequence of pushes into one instruction by
|
||||
// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
|
||||
// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
|
||||
template <typename Arg>
|
||||
struct PushAllHelper<Arg> {
|
||||
static int Push(BaselineAssembler* basm, Arg arg) {
|
||||
BaselineAssembler::ScratchRegisterScope scope(basm);
|
||||
basm->masm()->Push(ToRegister(basm, &scope, arg));
|
||||
return 1;
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm, Arg arg) {
|
||||
return Push(basm, arg);
|
||||
}
|
||||
};
|
||||
// TODO(ishell): try to pack sequence of pushes into one instruction by
|
||||
// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
|
||||
// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
|
||||
template <typename Arg, typename... Args>
|
||||
struct PushAllHelper<Arg, Args...> {
|
||||
static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
|
||||
PushAllHelper<Arg>::Push(basm, arg);
|
||||
return 1 + PushAllHelper<Args...>::Push(basm, args...);
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
|
||||
int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
|
||||
PushAllHelper<Arg>::Push(basm, arg);
|
||||
return nargs + 1;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct PushAllHelper<interpreter::RegisterList> {
|
||||
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
|
||||
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
|
||||
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
|
||||
}
|
||||
return list.register_count();
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm,
|
||||
interpreter::RegisterList list) {
|
||||
for (int reg_index = list.register_count() - 1; reg_index >= 0;
|
||||
--reg_index) {
|
||||
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
|
||||
}
|
||||
return list.register_count();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... T>
|
||||
struct PopAllHelper;
|
||||
template <>
|
||||
struct PopAllHelper<> {
|
||||
static void Pop(BaselineAssembler* basm) {}
|
||||
};
|
||||
// TODO(ishell): try to pack sequence of pops into one instruction by
|
||||
// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
|
||||
// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
|
||||
template <>
|
||||
struct PopAllHelper<Register> {
|
||||
static void Pop(BaselineAssembler* basm, Register reg) {
|
||||
basm->masm()->Pop(reg);
|
||||
}
|
||||
};
|
||||
template <typename... T>
|
||||
struct PopAllHelper<Register, T...> {
|
||||
static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
|
||||
PopAllHelper<Register>::Pop(basm, reg);
|
||||
PopAllHelper<T...>::Pop(basm, tail...);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename... T>
|
||||
int BaselineAssembler::Push(T... vals) {
|
||||
return detail::PushAllHelper<T...>::Push(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::PushReverse(T... vals) {
|
||||
detail::PushAllHelper<T...>::PushReverse(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadByteField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ld_b(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
|
||||
Smi value) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ li(scratch, Operand(value));
|
||||
__ St_d(scratch, FieldMemOperand(target, offset));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
|
||||
int offset,
|
||||
Register value) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
__ St_d(value, FieldMemOperand(target, offset));
|
||||
ScratchRegisterScope temps(this);
|
||||
__ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
|
||||
SaveFPRegsMode::kIgnore);
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
int offset,
|
||||
Register value) {
|
||||
__ St_d(value, FieldMemOperand(target, offset));
|
||||
}
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
int32_t weight, Label* skip_interrupt_label) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Ld_w(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
__ Add_w(interrupt_budget, interrupt_budget, weight);
|
||||
__ St_w(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
if (skip_interrupt_label) {
|
||||
DCHECK_LT(weight, 0);
|
||||
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
|
||||
}
|
||||
}
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
Register weight, Label* skip_interrupt_label) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||
__ Ld_w(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
__ Add_w(interrupt_budget, interrupt_budget, weight);
|
||||
__ St_w(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
if (skip_interrupt_label)
|
||||
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
||||
__ Add_d(lhs, lhs, Operand(rhs));
|
||||
}
|
||||
|
||||
void BaselineAssembler::Switch(Register reg, int case_value_base,
|
||||
Label** labels, int num_labels) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
Label fallthrough;
|
||||
if (case_value_base > 0) {
|
||||
__ Sub_d(reg, reg, Operand(case_value_base));
|
||||
}
|
||||
|
||||
ScratchRegisterScope scope(this);
|
||||
Register scratch = scope.AcquireScratch();
|
||||
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
|
||||
reg, Operand(num_labels));
|
||||
int entry_size_log2 = 2;
|
||||
__ pcaddi(scratch, 3);
|
||||
__ Alsl_d(scratch, reg, scratch, entry_size_log2);
|
||||
__ Jump(scratch);
|
||||
{
|
||||
TurboAssembler::BlockTrampolinePoolScope(masm());
|
||||
__ BlockTrampolinePoolFor(num_labels * kInstrSize);
|
||||
for (int i = 0; i < num_labels; ++i) {
|
||||
__ Branch(labels[i]);
|
||||
}
|
||||
__ bind(&fallthrough);
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ basm.
|
||||
|
||||
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
||||
ASM_CODE_COMMENT(masm);
|
||||
BaselineAssembler basm(masm);
|
||||
|
||||
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
|
||||
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
|
||||
|
||||
{
|
||||
ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
|
||||
|
||||
Label skip_interrupt_label;
|
||||
__ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
|
||||
__ masm()->SmiTag(params_size);
|
||||
__ masm()->Push(params_size, kInterpreterAccumulatorRegister);
|
||||
|
||||
__ LoadContext(kContextRegister);
|
||||
__ LoadFunction(kJSFunctionRegister);
|
||||
__ masm()->Push(kJSFunctionRegister);
|
||||
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
|
||||
|
||||
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
|
||||
__ masm()->SmiUntag(params_size);
|
||||
__ Bind(&skip_interrupt_label);
|
||||
}
|
||||
|
||||
BaselineAssembler::ScratchRegisterScope temps(&basm);
|
||||
Register actual_params_size = temps.AcquireScratch();
|
||||
// Compute the size of the actual parameters + receiver (in bytes).
|
||||
__ Move(actual_params_size,
|
||||
MemOperand(fp, StandardFrameConstants::kArgCOffset));
|
||||
|
||||
// If actual is bigger than formal, then we should use it to free up the stack
|
||||
// arguments.
|
||||
Label corrected_args_count;
|
||||
__ masm()->Branch(&corrected_args_count, ge, params_size,
|
||||
Operand(actual_params_size));
|
||||
__ masm()->Move(params_size, actual_params_size);
|
||||
__ Bind(&corrected_args_count);
|
||||
|
||||
// Leave the frame (also dropping the register file).
|
||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||
|
||||
// Drop receiver + arguments.
|
||||
__ masm()->Add_d(params_size, params_size, 1); // Include the receiver.
|
||||
__ masm()->Alsl_d(sp, params_size, sp, kPointerSizeLog2);
|
||||
__ masm()->Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
|
77
src/baseline/loong64/baseline-compiler-loong64-inl.h
Normal file
77
src/baseline/loong64/baseline-compiler-loong64-inl.h
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
|
||||
#define V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace baseline {
|
||||
|
||||
#define __ basm_.
|
||||
|
||||
void BaselineCompiler::Prologue() {
|
||||
ASM_CODE_COMMENT(&masm_);
|
||||
__ masm()->EnterFrame(StackFrame::BASELINE);
|
||||
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
|
||||
int max_frame_size =
|
||||
bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
|
||||
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
|
||||
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
|
||||
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
|
||||
|
||||
PrologueFillFrame();
|
||||
}
|
||||
|
||||
void BaselineCompiler::PrologueFillFrame() {
|
||||
ASM_CODE_COMMENT(&masm_);
|
||||
// Inlined register frame fill
|
||||
interpreter::Register new_target_or_generator_register =
|
||||
bytecode_->incoming_new_target_or_generator_register();
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
int register_count = bytecode_->register_count();
|
||||
// Magic value
|
||||
const int kLoopUnrollSize = 8;
|
||||
const int new_target_index = new_target_or_generator_register.index();
|
||||
const bool has_new_target = new_target_index != kMaxInt;
|
||||
if (has_new_target) {
|
||||
DCHECK_LE(new_target_index, register_count);
|
||||
__ masm()->Add_d(sp, sp, Operand(-(kPointerSize * new_target_index)));
|
||||
for (int i = 0; i < new_target_index; i++) {
|
||||
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
|
||||
}
|
||||
// Push new_target_or_generator.
|
||||
__ Push(kJavaScriptCallNewTargetRegister);
|
||||
register_count -= new_target_index + 1;
|
||||
}
|
||||
if (register_count < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill completely.
|
||||
__ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
|
||||
for (int i = 0; i < register_count; ++i) {
|
||||
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
|
||||
}
|
||||
} else {
|
||||
__ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
|
||||
for (int i = 0; i < register_count; ++i) {
|
||||
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineCompiler::VerifyFrameSize() {
|
||||
ASM_CODE_COMMENT(&masm_);
|
||||
__ masm()->Add_d(t0, sp,
|
||||
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
|
||||
bytecode_->frame_size()));
|
||||
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, t0, Operand(fp));
|
||||
}
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
|
3738
src/builtins/loong64/builtins-loong64.cc
Normal file
3738
src/builtins/loong64/builtins-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -21,6 +21,8 @@
|
||||
#include "src/codegen/mips/assembler-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/codegen/mips64/assembler-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/assembler-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/assembler-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include "src/codegen/mips/assembler-mips-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/codegen/mips64/assembler-mips64-inl.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/assembler-loong64-inl.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/assembler-s390-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -276,8 +276,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
|
||||
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
|
||||
|
||||
int pc_offset_for_safepoint() {
|
||||
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
|
||||
// Mips needs it's own implementation to avoid trampoline's influence.
|
||||
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
|
||||
defined(V8_TARGET_ARCH_LOONG64)
|
||||
// MIPS and LOONG need to use their own implementation to avoid trampoline's
|
||||
// influence.
|
||||
UNREACHABLE();
|
||||
#else
|
||||
return pc_offset();
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include "src/codegen/mips/constants-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/codegen/mips64/constants-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/constants-loong64.h"
|
||||
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
#include "src/codegen/ppc/constants-ppc.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
|
@ -51,6 +51,9 @@ enum CpuFeature {
|
||||
MIPSr6,
|
||||
MIPS_SIMD, // MSA instructions
|
||||
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
FPU,
|
||||
|
||||
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
PPC_6_PLUS,
|
||||
PPC_7_PLUS,
|
||||
|
@ -688,6 +688,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
|
||||
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
|
||||
#else
|
||||
@ -318,9 +320,10 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
|
||||
// static
|
||||
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
|
||||
// TODO(v8:11421): Implement on other platforms.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
|
||||
V8_TARGET_ARCH_LOONG64
|
||||
return RegisterArray(
|
||||
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
|
||||
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
|
||||
@ -341,7 +344,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
|
||||
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
|
||||
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
|
||||
V8_TARGET_ARCH_MIPS
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
|
||||
return RegisterArray(ParamsSizeRegister(), WeightRegister());
|
||||
#else
|
||||
return DefaultRegisterArray();
|
||||
|
249
src/codegen/loong64/assembler-loong64-inl.h
Normal file
249
src/codegen/loong64/assembler-loong64-inl.h
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
|
||||
#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
|
||||
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/loong64/assembler-loong64.h"
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operand and MemOperand.
|
||||
|
||||
bool Operand::is_reg() const { return rm_.is_valid(); }
|
||||
|
||||
int64_t Operand::immediate() const {
|
||||
DCHECK(!is_reg());
|
||||
DCHECK(!IsHeapObjectRequest());
|
||||
return value_.immediate;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// RelocInfo.
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
if (IsInternalReference(rmode_)) {
|
||||
// Absolute code pointer inside code object moves with the code object.
|
||||
Assembler::RelocateInternalReference(rmode_, pc_, delta);
|
||||
} else {
|
||||
DCHECK(IsRelativeCodeTarget(rmode_));
|
||||
Assembler::RelocateRelativeReference(rmode_, pc_, delta);
|
||||
}
|
||||
}
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
|
||||
IsWasmCall(rmode_));
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
DCHECK(HasTargetAddressAddress());
|
||||
// Read the address of the word containing the target_address in an
|
||||
// instruction stream.
|
||||
// The only architecture-independent user of this function is the serializer.
|
||||
// The serializer uses it to find out how many raw bytes of instruction to
|
||||
// output before the next target.
|
||||
// For an instruction like LUI/ORI where the target bits are mixed into the
|
||||
// instruction bits, the size of the target will be zero, indicating that the
|
||||
// serializer should not step forward in memory after a target is resolved
|
||||
// and written. In this case the target_address_address function should
|
||||
// return the end of the instructions to be patched, allowing the
|
||||
// deserializer to deserialize the instructions as raw bytes and put them in
|
||||
// place, ready to be patched with the target. After jump optimization,
|
||||
// that is the address of the instruction that follows J/JAL/JR/JALR
|
||||
// instruction.
|
||||
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
|
||||
}
|
||||
|
||||
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
|
||||
|
||||
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
|
||||
|
||||
void Assembler::deserialization_set_special_target_at(
|
||||
Address instruction_payload, Code code, Address target) {
|
||||
set_target_address_at(instruction_payload,
|
||||
!code.is_null() ? code.constant_pool() : kNullAddress,
|
||||
target);
|
||||
}
|
||||
|
||||
int Assembler::deserialization_special_target_size(
|
||||
Address instruction_payload) {
|
||||
return kSpecialTargetSize;
|
||||
}
|
||||
|
||||
void Assembler::deserialization_set_target_internal_reference_at(
|
||||
Address pc, Address target, RelocInfo::Mode mode) {
|
||||
WriteUnalignedValue<Address>(pc, target);
|
||||
}
|
||||
|
||||
HeapObject RelocInfo::target_object() {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
|
||||
IsDataEmbeddedObject(rmode_));
|
||||
if (IsDataEmbeddedObject(rmode_)) {
|
||||
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
|
||||
}
|
||||
return HeapObject::cast(
|
||||
Object(Assembler::target_address_at(pc_, constant_pool_)));
|
||||
}
|
||||
|
||||
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
|
||||
return target_object();
|
||||
}
|
||||
|
||||
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
if (IsDataEmbeddedObject(rmode_)) {
|
||||
return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
|
||||
} else if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
|
||||
return Handle<HeapObject>(reinterpret_cast<Address*>(
|
||||
Assembler::target_address_at(pc_, constant_pool_)));
|
||||
} else {
|
||||
DCHECK(IsRelativeCodeTarget(rmode_));
|
||||
return origin->relative_code_target_object_handle_at(pc_);
|
||||
}
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
|
||||
IsDataEmbeddedObject(rmode_));
|
||||
if (IsDataEmbeddedObject(rmode_)) {
|
||||
WriteUnalignedValue(pc_, target.ptr());
|
||||
// No need to flush icache since no instructions were changed.
|
||||
} else {
|
||||
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
|
||||
icache_flush_mode);
|
||||
}
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
|
||||
!FLAG_disable_write_barriers) {
|
||||
WriteBarrierForCode(host(), this, target);
|
||||
}
|
||||
}
|
||||
|
||||
Address RelocInfo::target_external_reference() {
|
||||
DCHECK(rmode_ == EXTERNAL_REFERENCE);
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_external_reference(
|
||||
Address target, ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
|
||||
Assembler::set_target_address_at(pc_, constant_pool_, target,
|
||||
icache_flush_mode);
|
||||
}
|
||||
|
||||
Address RelocInfo::target_internal_reference() {
|
||||
if (rmode_ == INTERNAL_REFERENCE) {
|
||||
return Memory<Address>(pc_);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
Address RelocInfo::target_internal_reference_address() {
|
||||
DCHECK(rmode_ == INTERNAL_REFERENCE);
|
||||
return pc_;
|
||||
}
|
||||
|
||||
Handle<Code> Assembler::relative_code_target_object_handle_at(
|
||||
Address pc) const {
|
||||
Instr instr = Assembler::instr_at(pc);
|
||||
int32_t code_target_index = instr & kImm26Mask;
|
||||
code_target_index = ((code_target_index & 0x3ff) << 22 >> 6) |
|
||||
((code_target_index >> 10) & kImm16Mask);
|
||||
return GetCodeTarget(code_target_index);
|
||||
}
|
||||
|
||||
Address RelocInfo::target_runtime_entry(Assembler* origin) {
|
||||
DCHECK(IsRuntimeEntry(rmode_));
|
||||
return target_address();
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_runtime_entry(Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(IsRuntimeEntry(rmode_));
|
||||
if (target_address() != target)
|
||||
set_target_address(target, write_barrier_mode, icache_flush_mode);
|
||||
}
|
||||
|
||||
Address RelocInfo::target_off_heap_target() {
|
||||
DCHECK(IsOffHeapTarget(rmode_));
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
void RelocInfo::WipeOut() {
|
||||
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
|
||||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
|
||||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
|
||||
if (IsInternalReference(rmode_)) {
|
||||
Memory<Address>(pc_) = kNullAddress;
|
||||
} else {
|
||||
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Assembler.
|
||||
|
||||
void Assembler::CheckBuffer() {
|
||||
if (buffer_space() <= kGap) {
|
||||
GrowBuffer();
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::EmitHelper(Instr x) {
|
||||
*reinterpret_cast<Instr*>(pc_) = x;
|
||||
pc_ += kInstrSize;
|
||||
CheckTrampolinePoolQuick();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void Assembler::EmitHelper(uint8_t x);
|
||||
|
||||
template <typename T>
|
||||
void Assembler::EmitHelper(T x) {
|
||||
*reinterpret_cast<T*>(pc_) = x;
|
||||
pc_ += sizeof(x);
|
||||
CheckTrampolinePoolQuick();
|
||||
}
|
||||
|
||||
template <>
|
||||
void Assembler::EmitHelper(uint8_t x) {
|
||||
*reinterpret_cast<uint8_t*>(pc_) = x;
|
||||
pc_ += sizeof(x);
|
||||
if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
|
||||
CheckTrampolinePoolQuick();
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::emit(Instr x) {
|
||||
if (!is_buffer_growth_blocked()) {
|
||||
CheckBuffer();
|
||||
}
|
||||
EmitHelper(x);
|
||||
}
|
||||
|
||||
void Assembler::emit(uint64_t data) {
|
||||
// CheckForEmitInForbiddenSlot();
|
||||
if (!is_buffer_growth_blocked()) {
|
||||
CheckBuffer();
|
||||
}
|
||||
EmitHelper(data);
|
||||
}
|
||||
|
||||
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
|
2405
src/codegen/loong64/assembler-loong64.cc
Normal file
2405
src/codegen/loong64/assembler-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
1129
src/codegen/loong64/assembler-loong64.h
Normal file
1129
src/codegen/loong64/assembler-loong64.h
Normal file
File diff suppressed because it is too large
Load Diff
100
src/codegen/loong64/constants-loong64.cc
Normal file
100
src/codegen/loong64/constants-loong64.cc
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_LOONG64
|
||||
|
||||
#include "src/codegen/loong64/constants-loong64.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Registers.
|
||||
|
||||
// These register names are defined in a way to match the native disassembler
|
||||
// formatting. See for example the command "objdump -d <binary file>".
|
||||
const char* Registers::names_[kNumSimuRegisters] = {
|
||||
"zero_reg", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
|
||||
"a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x_reg",
|
||||
"fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"};
|
||||
|
||||
// List of alias names which can be used when referring to registers.
|
||||
const Registers::RegisterAlias Registers::aliases_[] = {
|
||||
{0, "zero"}, {30, "cp"}, {kInvalidRegister, nullptr}};
|
||||
|
||||
const char* Registers::Name(int reg) {
|
||||
const char* result;
|
||||
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
|
||||
result = names_[reg];
|
||||
} else {
|
||||
result = "noreg";
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int Registers::Number(const char* name) {
|
||||
// Look through the canonical names.
|
||||
for (int i = 0; i < kNumSimuRegisters; i++) {
|
||||
if (strcmp(names_[i], name) == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
// Look through the alias names.
|
||||
int i = 0;
|
||||
while (aliases_[i].reg != kInvalidRegister) {
|
||||
if (strcmp(aliases_[i].name, name) == 0) {
|
||||
return aliases_[i].reg;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
// No register with the reguested name found.
|
||||
return kInvalidRegister;
|
||||
}
|
||||
|
||||
const char* FPURegisters::names_[kNumFPURegisters] = {
|
||||
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
|
||||
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
|
||||
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
|
||||
|
||||
// List of alias names which can be used when referring to LoongArch registers.
|
||||
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
|
||||
{kInvalidRegister, nullptr}};
|
||||
|
||||
const char* FPURegisters::Name(int creg) {
|
||||
const char* result;
|
||||
if ((0 <= creg) && (creg < kNumFPURegisters)) {
|
||||
result = names_[creg];
|
||||
} else {
|
||||
result = "nocreg";
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int FPURegisters::Number(const char* name) {
|
||||
// Look through the canonical names.
|
||||
for (int i = 0; i < kNumFPURegisters; i++) {
|
||||
if (strcmp(names_[i], name) == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
// Look through the alias names.
|
||||
int i = 0;
|
||||
while (aliases_[i].creg != kInvalidRegister) {
|
||||
if (strcmp(aliases_[i].name, name) == 0) {
|
||||
return aliases_[i].creg;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
// No Cregister with the reguested name found.
|
||||
return kInvalidFPURegister;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_LOONG64
|
1291
src/codegen/loong64/constants-loong64.h
Normal file
1291
src/codegen/loong64/constants-loong64.h
Normal file
File diff suppressed because it is too large
Load Diff
38
src/codegen/loong64/cpu-loong64.cc
Normal file
38
src/codegen/loong64/cpu-loong64.cc
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// CPU specific code for LoongArch independent of OS goes here.
|
||||
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if V8_TARGET_ARCH_LOONG64
|
||||
|
||||
#include "src/codegen/cpu-features.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void CpuFeatures::FlushICache(void* start, size_t size) {
|
||||
#if defined(V8_HOST_ARCH_LOONG64)
|
||||
// Nothing to do, flushing no instructions.
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(ANDROID) && !defined(__LP64__)
|
||||
// Bionic cacheflush can typically run in userland, avoiding kernel call.
|
||||
char* end = reinterpret_cast<char*>(start) + size;
|
||||
cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
|
||||
0);
|
||||
#else // ANDROID
|
||||
asm("ibar 0\n");
|
||||
#endif // ANDROID
|
||||
#endif // V8_HOST_ARCH_LOONG64
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_LOONG64
|
278
src/codegen/loong64/interface-descriptors-loong64-inl.h
Normal file
278
src/codegen/loong64/interface-descriptors-loong64-inl.h
Normal file
@ -0,0 +1,278 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
|
||||
#define V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
|
||||
|
||||
#if V8_TARGET_ARCH_LOONG64
|
||||
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
#include "src/execution/frames.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
|
||||
auto registers = RegisterArray(a0, a1, a2, a3, a4);
|
||||
STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
|
||||
return registers;
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
template <typename DerivedDescriptor>
|
||||
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
|
||||
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
|
||||
RegList allocatable_regs = data->allocatable_registers();
|
||||
if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
|
||||
if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
|
||||
if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
|
||||
if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
|
||||
if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
|
||||
if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
|
||||
if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
|
||||
if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
|
||||
// Additional arguments are passed on the stack.
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
// static
|
||||
constexpr auto WriteBarrierDescriptor::registers() {
|
||||
return RegisterArray(a1, a5, a4, a2, a0, a3);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto DynamicCheckMapsDescriptor::registers() {
|
||||
STATIC_ASSERT(kReturnRegister0 == a0);
|
||||
return RegisterArray(a0, a1, a2, a3, cp);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
|
||||
STATIC_ASSERT(kReturnRegister0 == a0);
|
||||
return RegisterArray(a0, a1, a2, a3, cp);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
|
||||
// static
|
||||
constexpr Register LoadDescriptor::NameRegister() { return a2; }
|
||||
// static
|
||||
constexpr Register LoadDescriptor::SlotRegister() { return a0; }
|
||||
|
||||
// static
|
||||
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
|
||||
|
||||
// static
|
||||
constexpr Register
|
||||
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
|
||||
return a4;
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
|
||||
// static
|
||||
constexpr Register StoreDescriptor::NameRegister() { return a2; }
|
||||
// static
|
||||
constexpr Register StoreDescriptor::ValueRegister() { return a0; }
|
||||
// static
|
||||
constexpr Register StoreDescriptor::SlotRegister() { return a4; }
|
||||
|
||||
// static
|
||||
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
|
||||
|
||||
// static
|
||||
constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
|
||||
|
||||
// static
|
||||
constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
|
||||
// static
|
||||
constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
|
||||
|
||||
// static
|
||||
constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
|
||||
// static
|
||||
constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
|
||||
|
||||
// static
|
||||
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
return a2;
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
|
||||
|
||||
// static
|
||||
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
|
||||
|
||||
// static
|
||||
constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
|
||||
|
||||
// static
|
||||
constexpr auto CallTrampolineDescriptor::registers() {
|
||||
// a1: target
|
||||
// a0: number of arguments
|
||||
return RegisterArray(a1, a0);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto CallVarargsDescriptor::registers() {
|
||||
// a0 : number of arguments (on the stack, not including receiver)
|
||||
// a1 : the target to call
|
||||
// a4 : arguments list length (untagged)
|
||||
// a2 : arguments list (FixedArray)
|
||||
return RegisterArray(a1, a0, a4, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto CallForwardVarargsDescriptor::registers() {
|
||||
// a1: the target to call
|
||||
// a0: number of arguments
|
||||
// a2: start index (to support rest parameters)
|
||||
return RegisterArray(a1, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto CallFunctionTemplateDescriptor::registers() {
|
||||
// a1 : function template info
|
||||
// a0 : number of arguments (on the stack, not including receiver)
|
||||
return RegisterArray(a1, a0);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto CallWithSpreadDescriptor::registers() {
|
||||
// a0 : number of arguments (on the stack, not including receiver)
|
||||
// a1 : the target to call
|
||||
// a2 : the object to spread
|
||||
return RegisterArray(a1, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto CallWithArrayLikeDescriptor::registers() {
|
||||
// a1 : the target to call
|
||||
// a2 : the arguments list
|
||||
return RegisterArray(a1, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ConstructVarargsDescriptor::registers() {
|
||||
// a0 : number of arguments (on the stack, not including receiver)
|
||||
// a1 : the target to call
|
||||
// a3 : the new target
|
||||
// a4 : arguments list length (untagged)
|
||||
// a2 : arguments list (FixedArray)
|
||||
return RegisterArray(a1, a3, a0, a4, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ConstructForwardVarargsDescriptor::registers() {
|
||||
// a1: the target to call
|
||||
// a3: new target
|
||||
// a0: number of arguments
|
||||
// a2: start index (to support rest parameters)
|
||||
return RegisterArray(a1, a3, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ConstructWithSpreadDescriptor::registers() {
|
||||
// a0 : number of arguments (on the stack, not including receiver)
|
||||
// a1 : the target to call
|
||||
// a3 : the new target
|
||||
// a2 : the object to spread
|
||||
return RegisterArray(a1, a3, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ConstructWithArrayLikeDescriptor::registers() {
|
||||
// a1 : the target to call
|
||||
// a3 : the new target
|
||||
// a2 : the arguments list
|
||||
return RegisterArray(a1, a3, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ConstructStubDescriptor::registers() {
|
||||
// a1: target
|
||||
// a3: new target
|
||||
// a0: number of arguments
|
||||
// a2: allocation site or undefined
|
||||
return RegisterArray(a1, a3, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
|
||||
|
||||
// static
|
||||
constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
|
||||
|
||||
// static
|
||||
constexpr auto Compare_BaselineDescriptor::registers() {
|
||||
// a1: left operand
|
||||
// a0: right operand
|
||||
// a2: feedback slot
|
||||
return RegisterArray(a1, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
|
||||
|
||||
// static
|
||||
constexpr auto BinaryOp_BaselineDescriptor::registers() {
|
||||
// a1: left operand
|
||||
// a0: right operand
|
||||
// a2: feedback slot
|
||||
return RegisterArray(a1, a0, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ApiCallbackDescriptor::registers() {
|
||||
// a1 : kApiFunctionAddress
|
||||
// a2 : kArgc
|
||||
// a3 : kCallData
|
||||
// a0 : kHolder
|
||||
return RegisterArray(a1, a2, a3, a0);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto InterpreterDispatchDescriptor::registers() {
|
||||
return RegisterArray(
|
||||
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
|
||||
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
|
||||
// a0 : argument count (not including receiver)
|
||||
// a2 : address of first argument
|
||||
// a1 : the target callable to be call
|
||||
return RegisterArray(a0, a2, a1);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
|
||||
// a0 : argument count (not including receiver)
|
||||
// a4 : address of the first argument
|
||||
// a1 : constructor to call
|
||||
// a3 : new target
|
||||
// a2 : allocation site feedback if available, undefined otherwise
|
||||
return RegisterArray(a0, a4, a1, a3, a2);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto ResumeGeneratorDescriptor::registers() {
|
||||
// v0 : the value to pass to the generator
|
||||
// a1 : the JSGeneratorObject to resume
|
||||
return RegisterArray(a0, a1);
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto RunMicrotasksEntryDescriptor::registers() {
|
||||
return RegisterArray(a0, a1);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_LOONG64
|
||||
|
||||
#endif // V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
|
4105
src/codegen/loong64/macro-assembler-loong64.cc
Normal file
4105
src/codegen/loong64/macro-assembler-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
1067
src/codegen/loong64/macro-assembler-loong64.h
Normal file
1067
src/codegen/loong64/macro-assembler-loong64.h
Normal file
File diff suppressed because it is too large
Load Diff
288
src/codegen/loong64/register-loong64.h
Normal file
288
src/codegen/loong64/register-loong64.h
Normal file
@ -0,0 +1,288 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
|
||||
#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
|
||||
|
||||
#include "src/codegen/loong64/constants-loong64.h"
|
||||
#include "src/codegen/register.h"
|
||||
#include "src/codegen/reglist.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// clang-format off
|
||||
#define GENERAL_REGISTERS(V) \
|
||||
V(zero_reg) V(ra) V(tp) V(sp) \
|
||||
V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
|
||||
V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \
|
||||
V(x_reg) V(fp) \
|
||||
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \
|
||||
|
||||
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
|
||||
V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
|
||||
V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) \
|
||||
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s7) V(s8)
|
||||
|
||||
#define DOUBLE_REGISTERS(V) \
|
||||
V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
|
||||
V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
|
||||
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
|
||||
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
|
||||
|
||||
#define FLOAT_REGISTERS DOUBLE_REGISTERS
|
||||
#define SIMD128_REGISTERS(V) \
|
||||
V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
|
||||
V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
|
||||
V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
|
||||
V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
|
||||
|
||||
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
|
||||
V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
|
||||
V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \
|
||||
V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23)
|
||||
// clang-format on
|
||||
|
||||
// Note that the bit values must match those used in actual instruction
|
||||
// encoding.
|
||||
const int kNumRegs = 32;
|
||||
|
||||
const RegList kJSCallerSaved = 1 << 4 | // a0
|
||||
1 << 5 | // a1
|
||||
1 << 6 | // a2
|
||||
1 << 7 | // a3
|
||||
1 << 8 | // a4
|
||||
1 << 9 | // a5
|
||||
1 << 10 | // a6
|
||||
1 << 11 | // a7
|
||||
1 << 12 | // t0
|
||||
1 << 13 | // t1
|
||||
1 << 14 | // t2
|
||||
1 << 15 | // t3
|
||||
1 << 16 | // t4
|
||||
1 << 17 | // t5
|
||||
1 << 20; // t8
|
||||
|
||||
const int kNumJSCallerSaved = 15;
|
||||
|
||||
// Callee-saved registers preserved when switching from C to JavaScript.
|
||||
const RegList kCalleeSaved = 1 << 22 | // fp
|
||||
1 << 23 | // s0
|
||||
1 << 24 | // s1
|
||||
1 << 25 | // s2
|
||||
1 << 26 | // s3
|
||||
1 << 27 | // s4
|
||||
1 << 28 | // s5
|
||||
1 << 29 | // s6 (roots in Javascript code)
|
||||
1 << 30 | // s7 (cp in Javascript code)
|
||||
1 << 31; // s8
|
||||
|
||||
const int kNumCalleeSaved = 10;
|
||||
|
||||
const RegList kCalleeSavedFPU = 1 << 24 | // f24
|
||||
1 << 25 | // f25
|
||||
1 << 26 | // f26
|
||||
1 << 27 | // f27
|
||||
1 << 28 | // f28
|
||||
1 << 29 | // f29
|
||||
1 << 30 | // f30
|
||||
1 << 31; // f31
|
||||
|
||||
const int kNumCalleeSavedFPU = 8;
|
||||
|
||||
const RegList kCallerSavedFPU = 1 << 0 | // f0
|
||||
1 << 1 | // f1
|
||||
1 << 2 | // f2
|
||||
1 << 3 | // f3
|
||||
1 << 4 | // f4
|
||||
1 << 5 | // f5
|
||||
1 << 6 | // f6
|
||||
1 << 7 | // f7
|
||||
1 << 8 | // f8
|
||||
1 << 9 | // f9
|
||||
1 << 10 | // f10
|
||||
1 << 11 | // f11
|
||||
1 << 12 | // f12
|
||||
1 << 13 | // f13
|
||||
1 << 14 | // f14
|
||||
1 << 15 | // f15
|
||||
1 << 16 | // f16
|
||||
1 << 17 | // f17
|
||||
1 << 18 | // f18
|
||||
1 << 19 | // f19
|
||||
1 << 20 | // f20
|
||||
1 << 21 | // f21
|
||||
1 << 22 | // f22
|
||||
1 << 23; // f23
|
||||
|
||||
// CPU Registers.
|
||||
//
|
||||
// 1) We would prefer to use an enum, but enum values are assignment-
|
||||
// compatible with int, which has caused code-generation bugs.
|
||||
//
|
||||
// 2) We would prefer to use a class instead of a struct but we don't like
|
||||
// the register initialization to depend on the particular initialization
|
||||
// order (which appears to be different on OS X, Linux, and Windows for the
|
||||
// installed versions of C++ we tried). Using a struct permits C-style
|
||||
// "initialization". Also, the Register objects cannot be const as this
|
||||
// forces initialization stubs in MSVC, making us dependent on initialization
|
||||
// order.
|
||||
//
|
||||
// 3) By not using an enum, we are possibly preventing the compiler from
|
||||
// doing certain constant folds, which may significantly reduce the
|
||||
// code generated for some assembly instructions (because they boil down
|
||||
// to a few constants). If this is a problem, we could change the code
|
||||
// such that we use an enum in optimized mode, and the struct in debug
|
||||
// mode. This way we get the compile-time error checking in debug mode
|
||||
// and best performance in optimized code.
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Implementation of Register and FPURegister.
|
||||
|
||||
enum RegisterCode {
|
||||
#define REGISTER_CODE(R) kRegCode_##R,
|
||||
GENERAL_REGISTERS(REGISTER_CODE)
|
||||
#undef REGISTER_CODE
|
||||
kRegAfterLast
|
||||
};
|
||||
|
||||
class Register : public RegisterBase<Register, kRegAfterLast> {
|
||||
public:
|
||||
static constexpr int kMantissaOffset = 0;
|
||||
static constexpr int kExponentOffset = 4;
|
||||
|
||||
private:
|
||||
friend class RegisterBase;
|
||||
explicit constexpr Register(int code) : RegisterBase(code) {}
|
||||
};
|
||||
|
||||
// s7: context register
|
||||
// s3: scratch register
|
||||
// s4: scratch register 2
|
||||
#define DECLARE_REGISTER(R) \
|
||||
constexpr Register R = Register::from_code(kRegCode_##R);
|
||||
GENERAL_REGISTERS(DECLARE_REGISTER)
|
||||
#undef DECLARE_REGISTER
|
||||
|
||||
constexpr Register no_reg = Register::no_reg();
|
||||
|
||||
int ToNumber(Register reg);
|
||||
|
||||
Register ToRegister(int num);
|
||||
|
||||
// Returns the number of padding slots needed for stack pointer alignment.
|
||||
constexpr int ArgumentPaddingSlots(int argument_count) {
|
||||
// No argument padding required.
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr bool kSimpleFPAliasing = true;
|
||||
constexpr bool kSimdMaskRegisters = false;
|
||||
|
||||
enum DoubleRegisterCode {
|
||||
#define REGISTER_CODE(R) kDoubleCode_##R,
|
||||
DOUBLE_REGISTERS(REGISTER_CODE)
|
||||
#undef REGISTER_CODE
|
||||
kDoubleAfterLast
|
||||
};
|
||||
|
||||
// FPURegister register.
|
||||
class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
|
||||
public:
|
||||
FPURegister low() const { return FPURegister::from_code(code()); }
|
||||
|
||||
private:
|
||||
friend class RegisterBase;
|
||||
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
|
||||
};
|
||||
|
||||
// Condition Flag Register
|
||||
enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
|
||||
|
||||
using FloatRegister = FPURegister;
|
||||
|
||||
using DoubleRegister = FPURegister;
|
||||
|
||||
using Simd128Register = FPURegister;
|
||||
|
||||
#define DECLARE_DOUBLE_REGISTER(R) \
|
||||
constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
|
||||
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
|
||||
#undef DECLARE_DOUBLE_REGISTER
|
||||
|
||||
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
|
||||
|
||||
// Register aliases.
|
||||
// cp is assumed to be a callee saved register.
|
||||
constexpr Register kRootRegister = s6;
|
||||
constexpr Register cp = s7;
|
||||
constexpr Register kScratchReg = s3;
|
||||
constexpr Register kScratchReg2 = s4;
|
||||
constexpr DoubleRegister kScratchDoubleReg = f30;
|
||||
constexpr DoubleRegister kScratchDoubleReg1 = f30;
|
||||
constexpr DoubleRegister kScratchDoubleReg2 = f31;
|
||||
// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0.
|
||||
constexpr DoubleRegister kDoubleRegZero = f29;
|
||||
|
||||
struct FPUControlRegister {
|
||||
bool is_valid() const { return (reg_code >> 2) == 0; }
|
||||
bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
|
||||
int code() const {
|
||||
DCHECK(is_valid());
|
||||
return reg_code;
|
||||
}
|
||||
int bit() const {
|
||||
DCHECK(is_valid());
|
||||
return 1 << reg_code;
|
||||
}
|
||||
void setcode(int f) {
|
||||
reg_code = f;
|
||||
DCHECK(is_valid());
|
||||
}
|
||||
// Unfortunately we can't make this private in a struct.
|
||||
int reg_code;
|
||||
};
|
||||
|
||||
constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
|
||||
constexpr FPUControlRegister FCSR = {kFCSRRegister};
|
||||
constexpr FPUControlRegister FCSR0 = {kFCSRRegister};
|
||||
constexpr FPUControlRegister FCSR1 = {kFCSRRegister + 1};
|
||||
constexpr FPUControlRegister FCSR2 = {kFCSRRegister + 2};
|
||||
constexpr FPUControlRegister FCSR3 = {kFCSRRegister + 3};
|
||||
|
||||
// Define {RegisterName} methods for the register types.
|
||||
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
|
||||
DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
|
||||
|
||||
// Give alias names to registers for calling conventions.
|
||||
constexpr Register kReturnRegister0 = a0;
|
||||
constexpr Register kReturnRegister1 = a1;
|
||||
constexpr Register kReturnRegister2 = a2;
|
||||
constexpr Register kJSFunctionRegister = a1;
|
||||
constexpr Register kContextRegister = s7;
|
||||
constexpr Register kAllocateSizeRegister = a0;
|
||||
constexpr Register kInterpreterAccumulatorRegister = a0;
|
||||
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
|
||||
constexpr Register kInterpreterBytecodeArrayRegister = t1;
|
||||
constexpr Register kInterpreterDispatchTableRegister = t2;
|
||||
|
||||
constexpr Register kJavaScriptCallArgCountRegister = a0;
|
||||
constexpr Register kJavaScriptCallCodeStartRegister = a2;
|
||||
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
|
||||
constexpr Register kJavaScriptCallNewTargetRegister = a3;
|
||||
constexpr Register kJavaScriptCallExtraArg1Register = a2;
|
||||
|
||||
constexpr Register kOffHeapTrampolineRegister = t7;
|
||||
constexpr Register kRuntimeCallFunctionRegister = a1;
|
||||
constexpr Register kRuntimeCallArgCountRegister = a0;
|
||||
constexpr Register kRuntimeCallArgvRegister = a2;
|
||||
constexpr Register kWasmInstanceRegister = a0;
|
||||
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
|
||||
|
||||
constexpr DoubleRegister kFPReturnRegister0 = f0;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
|
@ -57,6 +57,9 @@ enum class SmiCheck { kOmit, kInline };
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/codegen/mips64/constants-mips64.h"
|
||||
#include "src/codegen/mips64/macro-assembler-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/constants-loong64.h"
|
||||
#include "src/codegen/loong64/macro-assembler-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/constants-s390.h"
|
||||
#include "src/codegen/s390/macro-assembler-s390.h"
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include "src/codegen/mips/register-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/codegen/mips64/register-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/codegen/loong64/register-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/codegen/s390/register-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() {
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#elif V8_TARGET_ARCH_PPC
|
||||
kMaxAllocatableDoubleRegisterCount;
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
|
@ -320,7 +320,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
|
||||
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
|
||||
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64)
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
@ -62,6 +62,9 @@ constexpr int GB = MB * 1024;
|
||||
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
|
||||
#define USE_SIMULATOR 1
|
||||
#endif
|
||||
#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
|
||||
#define USE_SIMULATOR 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Determine whether the architecture uses an embedded constant pool
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include "src/compiler/backend/mips/instruction-codes-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/compiler/backend/loong64/instruction-codes-loong64.h"
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
#include "src/compiler/backend/x64/instruction-codes-x64.h"
|
||||
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
|
@ -2711,7 +2711,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
|
||||
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
|
||||
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
|
||||
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
|
||||
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
|
||||
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
||||
@ -2737,7 +2738,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
|
||||
}
|
||||
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
|
||||
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
|
||||
// !V8_TARGET_ARCH_RISCV64
|
||||
// !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
|
||||
|
||||
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
|
||||
// This is only needed on 32-bit to split the 64-bit value into two operands.
|
||||
@ -2751,11 +2752,11 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
|
||||
|
||||
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
|
||||
#if !V8_TARGET_ARCH_ARM64
|
||||
#if !V8_TARGET_ARCH_MIPS64
|
||||
#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64
|
||||
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // !V8_TARGET_ARCH_MIPS64
|
||||
#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64
|
||||
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
|
||||
|
2630
src/compiler/backend/loong64/code-generator-loong64.cc
Normal file
2630
src/compiler/backend/loong64/code-generator-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
396
src/compiler/backend/loong64/instruction-codes-loong64.h
Normal file
396
src/compiler/backend/loong64/instruction-codes-loong64.h
Normal file
@ -0,0 +1,396 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
|
||||
#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// LOONG64-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(Loong64Add_d) \
|
||||
V(Loong64Add_w) \
|
||||
V(Loong64AddOvf_d) \
|
||||
V(Loong64Sub_d) \
|
||||
V(Loong64Sub_w) \
|
||||
V(Loong64SubOvf_d) \
|
||||
V(Loong64Mul_d) \
|
||||
V(Loong64MulOvf_w) \
|
||||
V(Loong64Mulh_d) \
|
||||
V(Loong64Mulh_w) \
|
||||
V(Loong64Mulh_wu) \
|
||||
V(Loong64Mul_w) \
|
||||
V(Loong64Div_d) \
|
||||
V(Loong64Div_w) \
|
||||
V(Loong64Div_du) \
|
||||
V(Loong64Div_wu) \
|
||||
V(Loong64Mod_d) \
|
||||
V(Loong64Mod_w) \
|
||||
V(Loong64Mod_du) \
|
||||
V(Loong64Mod_wu) \
|
||||
V(Loong64And) \
|
||||
V(Loong64And32) \
|
||||
V(Loong64Or) \
|
||||
V(Loong64Or32) \
|
||||
V(Loong64Nor) \
|
||||
V(Loong64Nor32) \
|
||||
V(Loong64Xor) \
|
||||
V(Loong64Xor32) \
|
||||
V(Loong64Alsl_d) \
|
||||
V(Loong64Alsl_w) \
|
||||
V(Loong64Sll_d) \
|
||||
V(Loong64Sll_w) \
|
||||
V(Loong64Srl_d) \
|
||||
V(Loong64Srl_w) \
|
||||
V(Loong64Sra_d) \
|
||||
V(Loong64Sra_w) \
|
||||
V(Loong64Rotr_d) \
|
||||
V(Loong64Rotr_w) \
|
||||
V(Loong64Bstrpick_d) \
|
||||
V(Loong64Bstrpick_w) \
|
||||
V(Loong64Bstrins_d) \
|
||||
V(Loong64Bstrins_w) \
|
||||
V(Loong64ByteSwap64) \
|
||||
V(Loong64ByteSwap32) \
|
||||
V(Loong64Clz_d) \
|
||||
V(Loong64Clz_w) \
|
||||
V(Loong64Mov) \
|
||||
V(Loong64Tst) \
|
||||
V(Loong64Cmp) \
|
||||
V(Loong64Float32Cmp) \
|
||||
V(Loong64Float32Add) \
|
||||
V(Loong64Float32Sub) \
|
||||
V(Loong64Float32Mul) \
|
||||
V(Loong64Float32Div) \
|
||||
V(Loong64Float32Abs) \
|
||||
V(Loong64Float32Neg) \
|
||||
V(Loong64Float32Sqrt) \
|
||||
V(Loong64Float32Max) \
|
||||
V(Loong64Float32Min) \
|
||||
V(Loong64Float32ToFloat64) \
|
||||
V(Loong64Float32RoundDown) \
|
||||
V(Loong64Float32RoundUp) \
|
||||
V(Loong64Float32RoundTruncate) \
|
||||
V(Loong64Float32RoundTiesEven) \
|
||||
V(Loong64Float32ToInt32) \
|
||||
V(Loong64Float32ToInt64) \
|
||||
V(Loong64Float32ToUint32) \
|
||||
V(Loong64Float32ToUint64) \
|
||||
V(Loong64Float64Cmp) \
|
||||
V(Loong64Float64Add) \
|
||||
V(Loong64Float64Sub) \
|
||||
V(Loong64Float64Mul) \
|
||||
V(Loong64Float64Div) \
|
||||
V(Loong64Float64Mod) \
|
||||
V(Loong64Float64Abs) \
|
||||
V(Loong64Float64Neg) \
|
||||
V(Loong64Float64Sqrt) \
|
||||
V(Loong64Float64Max) \
|
||||
V(Loong64Float64Min) \
|
||||
V(Loong64Float64ToFloat32) \
|
||||
V(Loong64Float64RoundDown) \
|
||||
V(Loong64Float64RoundUp) \
|
||||
V(Loong64Float64RoundTruncate) \
|
||||
V(Loong64Float64RoundTiesEven) \
|
||||
V(Loong64Float64ToInt32) \
|
||||
V(Loong64Float64ToInt64) \
|
||||
V(Loong64Float64ToUint32) \
|
||||
V(Loong64Float64ToUint64) \
|
||||
V(Loong64Int32ToFloat32) \
|
||||
V(Loong64Int32ToFloat64) \
|
||||
V(Loong64Int64ToFloat32) \
|
||||
V(Loong64Int64ToFloat64) \
|
||||
V(Loong64Uint32ToFloat32) \
|
||||
V(Loong64Uint32ToFloat64) \
|
||||
V(Loong64Uint64ToFloat32) \
|
||||
V(Loong64Uint64ToFloat64) \
|
||||
V(Loong64Float64ExtractLowWord32) \
|
||||
V(Loong64Float64ExtractHighWord32) \
|
||||
V(Loong64Float64InsertLowWord32) \
|
||||
V(Loong64Float64InsertHighWord32) \
|
||||
V(Loong64BitcastDL) \
|
||||
V(Loong64BitcastLD) \
|
||||
V(Loong64Float64SilenceNaN) \
|
||||
V(Loong64Ld_b) \
|
||||
V(Loong64Ld_bu) \
|
||||
V(Loong64St_b) \
|
||||
V(Loong64Ld_h) \
|
||||
V(Loong64Ld_hu) \
|
||||
V(Loong64St_h) \
|
||||
V(Loong64Ld_w) \
|
||||
V(Loong64Ld_wu) \
|
||||
V(Loong64St_w) \
|
||||
V(Loong64Ld_d) \
|
||||
V(Loong64St_d) \
|
||||
V(Loong64Fld_s) \
|
||||
V(Loong64Fst_s) \
|
||||
V(Loong64Fld_d) \
|
||||
V(Loong64Fst_d) \
|
||||
V(Loong64Push) \
|
||||
V(Loong64Peek) \
|
||||
V(Loong64Poke) \
|
||||
V(Loong64StackClaim) \
|
||||
V(Loong64Ext_w_b) \
|
||||
V(Loong64Ext_w_h) \
|
||||
V(Loong64Dbar) \
|
||||
V(Loong64S128Const) \
|
||||
V(Loong64S128Zero) \
|
||||
V(Loong64S128AllOnes) \
|
||||
V(Loong64I32x4Splat) \
|
||||
V(Loong64I32x4ExtractLane) \
|
||||
V(Loong64I32x4ReplaceLane) \
|
||||
V(Loong64I32x4Add) \
|
||||
V(Loong64I32x4Sub) \
|
||||
V(Loong64F64x2Abs) \
|
||||
V(Loong64F64x2Neg) \
|
||||
V(Loong64F32x4Splat) \
|
||||
V(Loong64F32x4ExtractLane) \
|
||||
V(Loong64F32x4ReplaceLane) \
|
||||
V(Loong64F32x4SConvertI32x4) \
|
||||
V(Loong64F32x4UConvertI32x4) \
|
||||
V(Loong64I32x4Mul) \
|
||||
V(Loong64I32x4MaxS) \
|
||||
V(Loong64I32x4MinS) \
|
||||
V(Loong64I32x4Eq) \
|
||||
V(Loong64I32x4Ne) \
|
||||
V(Loong64I32x4Shl) \
|
||||
V(Loong64I32x4ShrS) \
|
||||
V(Loong64I32x4ShrU) \
|
||||
V(Loong64I32x4MaxU) \
|
||||
V(Loong64I32x4MinU) \
|
||||
V(Loong64F64x2Sqrt) \
|
||||
V(Loong64F64x2Add) \
|
||||
V(Loong64F64x2Sub) \
|
||||
V(Loong64F64x2Mul) \
|
||||
V(Loong64F64x2Div) \
|
||||
V(Loong64F64x2Min) \
|
||||
V(Loong64F64x2Max) \
|
||||
V(Loong64F64x2Eq) \
|
||||
V(Loong64F64x2Ne) \
|
||||
V(Loong64F64x2Lt) \
|
||||
V(Loong64F64x2Le) \
|
||||
V(Loong64F64x2Splat) \
|
||||
V(Loong64F64x2ExtractLane) \
|
||||
V(Loong64F64x2ReplaceLane) \
|
||||
V(Loong64F64x2Pmin) \
|
||||
V(Loong64F64x2Pmax) \
|
||||
V(Loong64F64x2Ceil) \
|
||||
V(Loong64F64x2Floor) \
|
||||
V(Loong64F64x2Trunc) \
|
||||
V(Loong64F64x2NearestInt) \
|
||||
V(Loong64F64x2ConvertLowI32x4S) \
|
||||
V(Loong64F64x2ConvertLowI32x4U) \
|
||||
V(Loong64F64x2PromoteLowF32x4) \
|
||||
V(Loong64I64x2Splat) \
|
||||
V(Loong64I64x2ExtractLane) \
|
||||
V(Loong64I64x2ReplaceLane) \
|
||||
V(Loong64I64x2Add) \
|
||||
V(Loong64I64x2Sub) \
|
||||
V(Loong64I64x2Mul) \
|
||||
V(Loong64I64x2Neg) \
|
||||
V(Loong64I64x2Shl) \
|
||||
V(Loong64I64x2ShrS) \
|
||||
V(Loong64I64x2ShrU) \
|
||||
V(Loong64I64x2BitMask) \
|
||||
V(Loong64I64x2Eq) \
|
||||
V(Loong64I64x2Ne) \
|
||||
V(Loong64I64x2GtS) \
|
||||
V(Loong64I64x2GeS) \
|
||||
V(Loong64I64x2Abs) \
|
||||
V(Loong64I64x2SConvertI32x4Low) \
|
||||
V(Loong64I64x2SConvertI32x4High) \
|
||||
V(Loong64I64x2UConvertI32x4Low) \
|
||||
V(Loong64I64x2UConvertI32x4High) \
|
||||
V(Loong64ExtMulLow) \
|
||||
V(Loong64ExtMulHigh) \
|
||||
V(Loong64ExtAddPairwise) \
|
||||
V(Loong64F32x4Abs) \
|
||||
V(Loong64F32x4Neg) \
|
||||
V(Loong64F32x4Sqrt) \
|
||||
V(Loong64F32x4RecipApprox) \
|
||||
V(Loong64F32x4RecipSqrtApprox) \
|
||||
V(Loong64F32x4Add) \
|
||||
V(Loong64F32x4Sub) \
|
||||
V(Loong64F32x4Mul) \
|
||||
V(Loong64F32x4Div) \
|
||||
V(Loong64F32x4Max) \
|
||||
V(Loong64F32x4Min) \
|
||||
V(Loong64F32x4Eq) \
|
||||
V(Loong64F32x4Ne) \
|
||||
V(Loong64F32x4Lt) \
|
||||
V(Loong64F32x4Le) \
|
||||
V(Loong64F32x4Pmin) \
|
||||
V(Loong64F32x4Pmax) \
|
||||
V(Loong64F32x4Ceil) \
|
||||
V(Loong64F32x4Floor) \
|
||||
V(Loong64F32x4Trunc) \
|
||||
V(Loong64F32x4NearestInt) \
|
||||
V(Loong64F32x4DemoteF64x2Zero) \
|
||||
V(Loong64I32x4SConvertF32x4) \
|
||||
V(Loong64I32x4UConvertF32x4) \
|
||||
V(Loong64I32x4Neg) \
|
||||
V(Loong64I32x4GtS) \
|
||||
V(Loong64I32x4GeS) \
|
||||
V(Loong64I32x4GtU) \
|
||||
V(Loong64I32x4GeU) \
|
||||
V(Loong64I32x4Abs) \
|
||||
V(Loong64I32x4BitMask) \
|
||||
V(Loong64I32x4DotI16x8S) \
|
||||
V(Loong64I32x4TruncSatF64x2SZero) \
|
||||
V(Loong64I32x4TruncSatF64x2UZero) \
|
||||
V(Loong64I16x8Splat) \
|
||||
V(Loong64I16x8ExtractLaneU) \
|
||||
V(Loong64I16x8ExtractLaneS) \
|
||||
V(Loong64I16x8ReplaceLane) \
|
||||
V(Loong64I16x8Neg) \
|
||||
V(Loong64I16x8Shl) \
|
||||
V(Loong64I16x8ShrS) \
|
||||
V(Loong64I16x8ShrU) \
|
||||
V(Loong64I16x8Add) \
|
||||
V(Loong64I16x8AddSatS) \
|
||||
V(Loong64I16x8Sub) \
|
||||
V(Loong64I16x8SubSatS) \
|
||||
V(Loong64I16x8Mul) \
|
||||
V(Loong64I16x8MaxS) \
|
||||
V(Loong64I16x8MinS) \
|
||||
V(Loong64I16x8Eq) \
|
||||
V(Loong64I16x8Ne) \
|
||||
V(Loong64I16x8GtS) \
|
||||
V(Loong64I16x8GeS) \
|
||||
V(Loong64I16x8AddSatU) \
|
||||
V(Loong64I16x8SubSatU) \
|
||||
V(Loong64I16x8MaxU) \
|
||||
V(Loong64I16x8MinU) \
|
||||
V(Loong64I16x8GtU) \
|
||||
V(Loong64I16x8GeU) \
|
||||
V(Loong64I16x8RoundingAverageU) \
|
||||
V(Loong64I16x8Abs) \
|
||||
V(Loong64I16x8BitMask) \
|
||||
V(Loong64I16x8Q15MulRSatS) \
|
||||
V(Loong64I8x16Splat) \
|
||||
V(Loong64I8x16ExtractLaneU) \
|
||||
V(Loong64I8x16ExtractLaneS) \
|
||||
V(Loong64I8x16ReplaceLane) \
|
||||
V(Loong64I8x16Neg) \
|
||||
V(Loong64I8x16Shl) \
|
||||
V(Loong64I8x16ShrS) \
|
||||
V(Loong64I8x16Add) \
|
||||
V(Loong64I8x16AddSatS) \
|
||||
V(Loong64I8x16Sub) \
|
||||
V(Loong64I8x16SubSatS) \
|
||||
V(Loong64I8x16MaxS) \
|
||||
V(Loong64I8x16MinS) \
|
||||
V(Loong64I8x16Eq) \
|
||||
V(Loong64I8x16Ne) \
|
||||
V(Loong64I8x16GtS) \
|
||||
V(Loong64I8x16GeS) \
|
||||
V(Loong64I8x16ShrU) \
|
||||
V(Loong64I8x16AddSatU) \
|
||||
V(Loong64I8x16SubSatU) \
|
||||
V(Loong64I8x16MaxU) \
|
||||
V(Loong64I8x16MinU) \
|
||||
V(Loong64I8x16GtU) \
|
||||
V(Loong64I8x16GeU) \
|
||||
V(Loong64I8x16RoundingAverageU) \
|
||||
V(Loong64I8x16Abs) \
|
||||
V(Loong64I8x16Popcnt) \
|
||||
V(Loong64I8x16BitMask) \
|
||||
V(Loong64S128And) \
|
||||
V(Loong64S128Or) \
|
||||
V(Loong64S128Xor) \
|
||||
V(Loong64S128Not) \
|
||||
V(Loong64S128Select) \
|
||||
V(Loong64S128AndNot) \
|
||||
V(Loong64I64x2AllTrue) \
|
||||
V(Loong64I32x4AllTrue) \
|
||||
V(Loong64I16x8AllTrue) \
|
||||
V(Loong64I8x16AllTrue) \
|
||||
V(Loong64V128AnyTrue) \
|
||||
V(Loong64S32x4InterleaveRight) \
|
||||
V(Loong64S32x4InterleaveLeft) \
|
||||
V(Loong64S32x4PackEven) \
|
||||
V(Loong64S32x4PackOdd) \
|
||||
V(Loong64S32x4InterleaveEven) \
|
||||
V(Loong64S32x4InterleaveOdd) \
|
||||
V(Loong64S32x4Shuffle) \
|
||||
V(Loong64S16x8InterleaveRight) \
|
||||
V(Loong64S16x8InterleaveLeft) \
|
||||
V(Loong64S16x8PackEven) \
|
||||
V(Loong64S16x8PackOdd) \
|
||||
V(Loong64S16x8InterleaveEven) \
|
||||
V(Loong64S16x8InterleaveOdd) \
|
||||
V(Loong64S16x4Reverse) \
|
||||
V(Loong64S16x2Reverse) \
|
||||
V(Loong64S8x16InterleaveRight) \
|
||||
V(Loong64S8x16InterleaveLeft) \
|
||||
V(Loong64S8x16PackEven) \
|
||||
V(Loong64S8x16PackOdd) \
|
||||
V(Loong64S8x16InterleaveEven) \
|
||||
V(Loong64S8x16InterleaveOdd) \
|
||||
V(Loong64I8x16Shuffle) \
|
||||
V(Loong64I8x16Swizzle) \
|
||||
V(Loong64S8x16Concat) \
|
||||
V(Loong64S8x8Reverse) \
|
||||
V(Loong64S8x4Reverse) \
|
||||
V(Loong64S8x2Reverse) \
|
||||
V(Loong64S128LoadSplat) \
|
||||
V(Loong64S128Load8x8S) \
|
||||
V(Loong64S128Load8x8U) \
|
||||
V(Loong64S128Load16x4S) \
|
||||
V(Loong64S128Load16x4U) \
|
||||
V(Loong64S128Load32x2S) \
|
||||
V(Loong64S128Load32x2U) \
|
||||
V(Loong64S128Load32Zero) \
|
||||
V(Loong64S128Load64Zero) \
|
||||
V(Loong64LoadLane) \
|
||||
V(Loong64StoreLane) \
|
||||
V(Loong64I32x4SConvertI16x8Low) \
|
||||
V(Loong64I32x4SConvertI16x8High) \
|
||||
V(Loong64I32x4UConvertI16x8Low) \
|
||||
V(Loong64I32x4UConvertI16x8High) \
|
||||
V(Loong64I16x8SConvertI8x16Low) \
|
||||
V(Loong64I16x8SConvertI8x16High) \
|
||||
V(Loong64I16x8SConvertI32x4) \
|
||||
V(Loong64I16x8UConvertI32x4) \
|
||||
V(Loong64I16x8UConvertI8x16Low) \
|
||||
V(Loong64I16x8UConvertI8x16High) \
|
||||
V(Loong64I8x16SConvertI16x8) \
|
||||
V(Loong64I8x16UConvertI16x8) \
|
||||
V(Loong64Word64AtomicLoadUint32) \
|
||||
V(Loong64Word64AtomicLoadUint64) \
|
||||
V(Loong64Word64AtomicStoreWord64) \
|
||||
V(Loong64Word64AtomicAddUint64) \
|
||||
V(Loong64Word64AtomicSubUint64) \
|
||||
V(Loong64Word64AtomicAndUint64) \
|
||||
V(Loong64Word64AtomicOrUint64) \
|
||||
V(Loong64Word64AtomicXorUint64) \
|
||||
V(Loong64Word64AtomicExchangeUint64) \
|
||||
V(Loong64Word64AtomicCompareExchangeUint64)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
// are encoded into the InstructionCode of the instruction and tell the
|
||||
// code generator after register allocation which assembler method to call.
|
||||
//
|
||||
// We use the following local notation for addressing modes:
|
||||
//
|
||||
// R = register
|
||||
// O = register or stack slot
|
||||
// D = double register
|
||||
// I = immediate (handle, external, int32)
|
||||
// MRI = [register + immediate]
|
||||
// MRR = [register + register]
|
||||
#define TARGET_ADDRESSING_MODE_LIST(V) \
|
||||
V(MRI) /* [%r0 + K] */ \
|
||||
V(MRR) /* [%r0 + %r1] */ \
|
||||
V(Root) /* [%rr + K] */
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
|
@ -0,0 +1,26 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/compiler/backend/instruction-scheduler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// TODO(LOONG_dev): LOONG64 Support instruction scheduler.
|
||||
bool InstructionScheduler::SchedulerSupported() { return false; }
|
||||
|
||||
int InstructionScheduler::GetTargetInstructionFlags(
|
||||
const Instruction* instr) const {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
3112
src/compiler/backend/loong64/instruction-selector-loong64.cc
Normal file
3112
src/compiler/backend/loong64/instruction-selector-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -100,6 +100,18 @@ namespace {
|
||||
#define CALLEE_SAVE_FP_REGISTERS \
|
||||
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
|
||||
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
// ===========================================================================
|
||||
// == loong64 ================================================================
|
||||
// ===========================================================================
|
||||
#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
|
||||
#define CALLEE_SAVE_REGISTERS \
|
||||
s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
|
||||
s7.bit() | s8.bit() | fp.bit()
|
||||
#define CALLEE_SAVE_FP_REGISTERS \
|
||||
f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
|
||||
f30.bit() | f31.bit()
|
||||
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
// ===========================================================================
|
||||
// == ppc & ppc64 ============================================================
|
||||
|
42
src/deoptimizer/loong64/deoptimizer-loong64.cc
Normal file
42
src/deoptimizer/loong64/deoptimizer-loong64.cc
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/deoptimizer/deoptimizer.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
|
||||
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
|
||||
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
|
||||
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
|
||||
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
|
||||
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
|
||||
// TODO(LOONG_dev): LOONG64 Is the PcOffset right?
|
||||
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
|
||||
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
|
||||
kInstrSize + kSystemPointerSize;
|
||||
|
||||
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
|
||||
return Float32::FromBits(
|
||||
static_cast<uint32_t>(double_registers_[n].get_bits()));
|
||||
}
|
||||
|
||||
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
|
||||
SetFrameSlot(offset, value);
|
||||
}
|
||||
|
||||
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
|
||||
SetFrameSlot(offset, value);
|
||||
}
|
||||
|
||||
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
|
||||
// No embedded constant pool support.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -1080,6 +1080,8 @@ class DebugInfoSection : public DebugSection {
|
||||
UNIMPLEMENTED();
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
UNIMPLEMENTED();
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
UNIMPLEMENTED();
|
||||
#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
|
||||
w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
|
1702
src/diagnostics/loong64/disasm-loong64.cc
Normal file
1702
src/diagnostics/loong64/disasm-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
14
src/diagnostics/loong64/unwinder-loong64.cc
Normal file
14
src/diagnostics/loong64/unwinder-loong64.cc
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/diagnostics/unwinder.h"
|
||||
|
||||
namespace v8 {
|
||||
|
||||
struct RegisterState;
|
||||
|
||||
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
|
||||
RegisterState* register_state) {}
|
||||
|
||||
} // namespace v8
|
@ -87,6 +87,7 @@ class PerfJitLogger : public CodeEventLogger {
|
||||
static const uint32_t kElfMachARM = 40;
|
||||
static const uint32_t kElfMachMIPS = 8;
|
||||
static const uint32_t kElfMachMIPS64 = 8;
|
||||
static const uint32_t kElfMachLOONG64 = 258;
|
||||
static const uint32_t kElfMachARM64 = 183;
|
||||
static const uint32_t kElfMachS390x = 22;
|
||||
static const uint32_t kElfMachPPC64 = 21;
|
||||
@ -103,6 +104,8 @@ class PerfJitLogger : public CodeEventLogger {
|
||||
return kElfMachMIPS;
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
return kElfMachMIPS64;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
return kElfMachLOONG64;
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
return kElfMachARM64;
|
||||
#elif V8_TARGET_ARCH_S390X
|
||||
|
@ -403,6 +403,8 @@ inline static int FrameSlotToFPOffset(int slot) {
|
||||
#include "src/execution/mips/frame-constants-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/execution/mips64/frame-constants-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/execution/loong64/frame-constants-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/execution/s390/frame-constants-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
32
src/execution/loong64/frame-constants-loong64.cc
Normal file
32
src/execution/loong64/frame-constants-loong64.cc
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#if V8_TARGET_ARCH_LOONG64
|
||||
|
||||
#include "src/execution/loong64/frame-constants-loong64.h"
|
||||
|
||||
#include "src/codegen/loong64/assembler-loong64-inl.h"
|
||||
#include "src/execution/frame-constants.h"
|
||||
#include "src/execution/frames.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
|
||||
Register JavaScriptFrame::context_register() { return cp; }
|
||||
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
|
||||
|
||||
int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
|
||||
return register_count;
|
||||
}
|
||||
|
||||
int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
|
||||
USE(register_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_TARGET_ARCH_LOONG64
|
76
src/execution/loong64/frame-constants-loong64.h
Normal file
76
src/execution/loong64/frame-constants-loong64.h
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
|
||||
#define V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/execution/frame-constants.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class EntryFrameConstants : public AllStatic {
|
||||
public:
|
||||
// This is the offset to where JSEntry pushes the current value of
|
||||
// Isolate::c_entry_fp onto the stack.
|
||||
static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
|
||||
};
|
||||
|
||||
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
|
||||
public:
|
||||
static constexpr int kNumberOfSavedGpParamRegs = 7;
|
||||
static constexpr int kNumberOfSavedFpParamRegs = 8;
|
||||
static constexpr int kNumberOfSavedAllParamRegs = 15;
|
||||
|
||||
// FP-relative.
|
||||
// See Generate_WasmCompileLazy in builtins-loong64.cc.
|
||||
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(6);
|
||||
static constexpr int kFixedFrameSizeFromFp =
|
||||
TypedFrameConstants::kFixedFrameSizeFromFp +
|
||||
kNumberOfSavedGpParamRegs * kPointerSize +
|
||||
kNumberOfSavedFpParamRegs * kDoubleSize;
|
||||
};
|
||||
|
||||
// Frame constructed by the {WasmDebugBreak} builtin.
|
||||
// After pushing the frame type marker, the builtin pushes all Liftoff cache
|
||||
// registers (see liftoff-assembler-defs.h).
|
||||
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
|
||||
public:
|
||||
// {a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, t6, t7, t8, s7}
|
||||
static constexpr uint32_t kPushedGpRegs = 0b1000000000111111111111111110000;
|
||||
// {f0, f1, f2, ... f27, f28}
|
||||
static constexpr uint32_t kPushedFpRegs = 0x1fffffff;
|
||||
|
||||
static constexpr int kNumPushedGpRegisters =
|
||||
base::bits::CountPopulation(kPushedGpRegs);
|
||||
static constexpr int kNumPushedFpRegisters =
|
||||
base::bits::CountPopulation(kPushedFpRegs);
|
||||
|
||||
static constexpr int kLastPushedGpRegisterOffset =
|
||||
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
|
||||
static constexpr int kLastPushedFpRegisterOffset =
|
||||
kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
|
||||
|
||||
// Offsets are fp-relative.
|
||||
static int GetPushedGpRegisterOffset(int reg_code) {
|
||||
DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
|
||||
uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
|
||||
return kLastPushedGpRegisterOffset +
|
||||
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
|
||||
}
|
||||
|
||||
static int GetPushedFpRegisterOffset(int reg_code) {
|
||||
DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
|
||||
uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
|
||||
return kLastPushedFpRegisterOffset +
|
||||
base::bits::CountPopulation(lower_regs) * kDoubleSize;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
|
5589
src/execution/loong64/simulator-loong64.cc
Normal file
5589
src/execution/loong64/simulator-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
647
src/execution/loong64/simulator-loong64.h
Normal file
647
src/execution/loong64/simulator-loong64.h
Normal file
@ -0,0 +1,647 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Declares a Simulator for loongisa instructions if we are not generating a
|
||||
// native loongisa binary. This Simulator allows us to run and debug loongisa
|
||||
// code generation on regular desktop machines. V8 calls into generated code via
|
||||
// the GeneratedCode wrapper, which will start execution in the Simulator or
|
||||
// forwards to the real entry on a loongisa HW platform.
|
||||
|
||||
#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
|
||||
#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
|
||||
|
||||
// globals.h defines USE_SIMULATOR.
|
||||
#include "src/common/globals.h"
|
||||
|
||||
template <typename T>
|
||||
int Compare(const T& a, const T& b) {
|
||||
if (a == b)
|
||||
return 0;
|
||||
else if (a < b)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Returns the negative absolute value of its argument.
|
||||
template <typename T,
|
||||
typename = typename std::enable_if<std::is_signed<T>::value>::type>
|
||||
T Nabs(T a) {
|
||||
return a < 0 ? a : -a;
|
||||
}
|
||||
|
||||
#if defined(USE_SIMULATOR)
|
||||
// Running with a simulator.
|
||||
|
||||
#include "src/base/hashmap.h"
|
||||
#include "src/base/strings.h"
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/codegen/loong64/constants-loong64.h"
|
||||
#include "src/execution/simulator-base.h"
|
||||
#include "src/utils/allocation.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Utility functions
|
||||
|
||||
class CachePage {
|
||||
public:
|
||||
static const int LINE_VALID = 0;
|
||||
static const int LINE_INVALID = 1;
|
||||
|
||||
static const int kPageShift = 12;
|
||||
static const int kPageSize = 1 << kPageShift;
|
||||
static const int kPageMask = kPageSize - 1;
|
||||
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
|
||||
static const int kLineLength = 1 << kLineShift;
|
||||
static const int kLineMask = kLineLength - 1;
|
||||
|
||||
CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
|
||||
|
||||
char* ValidityByte(int offset) {
|
||||
return &validity_map_[offset >> kLineShift];
|
||||
}
|
||||
|
||||
char* CachedData(int offset) { return &data_[offset]; }
|
||||
|
||||
private:
|
||||
char data_[kPageSize]; // The cached data.
|
||||
static const int kValidityMapSize = kPageSize >> kLineShift;
|
||||
char validity_map_[kValidityMapSize]; // One byte per line.
|
||||
};
|
||||
|
||||
class SimInstructionBase : public InstructionBase {
|
||||
public:
|
||||
Type InstructionType() const { return type_; }
|
||||
inline Instruction* instr() const { return instr_; }
|
||||
inline int32_t operand() const { return operand_; }
|
||||
|
||||
protected:
|
||||
SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
|
||||
explicit SimInstructionBase(Instruction* instr) {}
|
||||
|
||||
int32_t operand_;
|
||||
Instruction* instr_;
|
||||
Type type_;
|
||||
|
||||
private:
|
||||
DISALLOW_ASSIGN(SimInstructionBase);
|
||||
};
|
||||
|
||||
class SimInstruction : public InstructionGetters<SimInstructionBase> {
|
||||
public:
|
||||
SimInstruction() {}
|
||||
|
||||
explicit SimInstruction(Instruction* instr) { *this = instr; }
|
||||
|
||||
SimInstruction& operator=(Instruction* instr) {
|
||||
operand_ = *reinterpret_cast<const int32_t*>(instr);
|
||||
instr_ = instr;
|
||||
type_ = InstructionBase::InstructionType();
|
||||
DCHECK(reinterpret_cast<void*>(&operand_) == this);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
class Simulator : public SimulatorBase {
|
||||
public:
|
||||
friend class Loong64Debugger;
|
||||
|
||||
// Registers are declared in order.
|
||||
enum Register {
|
||||
no_reg = -1,
|
||||
zero_reg = 0,
|
||||
ra,
|
||||
gp,
|
||||
sp,
|
||||
a0,
|
||||
a1,
|
||||
a2,
|
||||
a3,
|
||||
a4,
|
||||
a5,
|
||||
a6,
|
||||
a7,
|
||||
t0,
|
||||
t1,
|
||||
t2,
|
||||
t3,
|
||||
t4,
|
||||
t5,
|
||||
t6,
|
||||
t7,
|
||||
t8,
|
||||
tp,
|
||||
fp,
|
||||
s0,
|
||||
s1,
|
||||
s2,
|
||||
s3,
|
||||
s4,
|
||||
s5,
|
||||
s6,
|
||||
s7,
|
||||
s8,
|
||||
pc, // pc must be the last register.
|
||||
kNumSimuRegisters,
|
||||
// aliases
|
||||
v0 = a0,
|
||||
v1 = a1
|
||||
};
|
||||
|
||||
// Condition flag registers.
|
||||
enum CFRegister {
|
||||
fcc0,
|
||||
fcc1,
|
||||
fcc2,
|
||||
fcc3,
|
||||
fcc4,
|
||||
fcc5,
|
||||
fcc6,
|
||||
fcc7,
|
||||
kNumCFRegisters
|
||||
};
|
||||
|
||||
// Floating point registers.
|
||||
enum FPURegister {
|
||||
f0,
|
||||
f1,
|
||||
f2,
|
||||
f3,
|
||||
f4,
|
||||
f5,
|
||||
f6,
|
||||
f7,
|
||||
f8,
|
||||
f9,
|
||||
f10,
|
||||
f11,
|
||||
f12,
|
||||
f13,
|
||||
f14,
|
||||
f15,
|
||||
f16,
|
||||
f17,
|
||||
f18,
|
||||
f19,
|
||||
f20,
|
||||
f21,
|
||||
f22,
|
||||
f23,
|
||||
f24,
|
||||
f25,
|
||||
f26,
|
||||
f27,
|
||||
f28,
|
||||
f29,
|
||||
f30,
|
||||
f31,
|
||||
kNumFPURegisters
|
||||
};
|
||||
|
||||
explicit Simulator(Isolate* isolate);
|
||||
~Simulator();
|
||||
|
||||
// The currently executing Simulator instance. Potentially there can be one
|
||||
// for each native thread.
|
||||
V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
|
||||
|
||||
// Accessors for register state. Reading the pc value adheres to the LOONG64
|
||||
// architecture specification and is off by a 8 from the currently executing
|
||||
// instruction.
|
||||
void set_register(int reg, int64_t value);
|
||||
void set_register_word(int reg, int32_t value);
|
||||
void set_dw_register(int dreg, const int* dbl);
|
||||
V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
|
||||
double get_double_from_register_pair(int reg);
|
||||
// Same for FPURegisters.
|
||||
void set_fpu_register(int fpureg, int64_t value);
|
||||
void set_fpu_register_word(int fpureg, int32_t value);
|
||||
void set_fpu_register_hi_word(int fpureg, int32_t value);
|
||||
void set_fpu_register_float(int fpureg, float value);
|
||||
void set_fpu_register_double(int fpureg, double value);
|
||||
void set_fpu_register_invalid_result64(float original, float rounded);
|
||||
void set_fpu_register_invalid_result(float original, float rounded);
|
||||
void set_fpu_register_word_invalid_result(float original, float rounded);
|
||||
void set_fpu_register_invalid_result64(double original, double rounded);
|
||||
void set_fpu_register_invalid_result(double original, double rounded);
|
||||
void set_fpu_register_word_invalid_result(double original, double rounded);
|
||||
int64_t get_fpu_register(int fpureg) const;
|
||||
int32_t get_fpu_register_word(int fpureg) const;
|
||||
int32_t get_fpu_register_signed_word(int fpureg) const;
|
||||
int32_t get_fpu_register_hi_word(int fpureg) const;
|
||||
float get_fpu_register_float(int fpureg) const;
|
||||
double get_fpu_register_double(int fpureg) const;
|
||||
void set_cf_register(int cfreg, bool value);
|
||||
bool get_cf_register(int cfreg) const;
|
||||
void set_fcsr_rounding_mode(FPURoundingMode mode);
|
||||
unsigned int get_fcsr_rounding_mode();
|
||||
void set_fcsr_bit(uint32_t cc, bool value);
|
||||
bool test_fcsr_bit(uint32_t cc);
|
||||
bool set_fcsr_round_error(double original, double rounded);
|
||||
bool set_fcsr_round64_error(double original, double rounded);
|
||||
bool set_fcsr_round_error(float original, float rounded);
|
||||
bool set_fcsr_round64_error(float original, float rounded);
|
||||
void round_according_to_fcsr(double toRound, double* rounded,
|
||||
int32_t* rounded_int);
|
||||
void round64_according_to_fcsr(double toRound, double* rounded,
|
||||
int64_t* rounded_int);
|
||||
void round_according_to_fcsr(float toRound, float* rounded,
|
||||
int32_t* rounded_int);
|
||||
void round64_according_to_fcsr(float toRound, float* rounded,
|
||||
int64_t* rounded_int);
|
||||
// Special case of set_register and get_register to access the raw PC value.
|
||||
void set_pc(int64_t value);
|
||||
V8_EXPORT_PRIVATE int64_t get_pc() const;
|
||||
|
||||
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
|
||||
|
||||
// Accessor to the internal simulator stack area.
|
||||
uintptr_t StackLimit(uintptr_t c_limit) const;
|
||||
|
||||
// Executes LOONG64 instructions until the PC reaches end_sim_pc.
|
||||
void Execute();
|
||||
|
||||
template <typename Return, typename... Args>
|
||||
Return Call(Address entry, Args... args) {
|
||||
return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
|
||||
}
|
||||
|
||||
// Alternative: call a 2-argument double function.
|
||||
double CallFP(Address entry, double d0, double d1);
|
||||
|
||||
// Push an address onto the JS stack.
|
||||
uintptr_t PushAddress(uintptr_t address);
|
||||
|
||||
// Pop an address from the JS stack.
|
||||
uintptr_t PopAddress();
|
||||
|
||||
// Debugger input.
|
||||
void set_last_debugger_input(char* input);
|
||||
char* last_debugger_input() { return last_debugger_input_; }
|
||||
|
||||
// Redirection support.
|
||||
static void SetRedirectInstruction(Instruction* instruction);
|
||||
|
||||
// ICache checking.
|
||||
static bool ICacheMatch(void* one, void* two);
|
||||
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
|
||||
size_t size);
|
||||
|
||||
// Returns true if pc register contains one of the 'special_values' defined
|
||||
// below (bad_ra, end_sim_pc).
|
||||
bool has_bad_pc() const;
|
||||
|
||||
private:
|
||||
enum special_values {
|
||||
// Known bad pc value to ensure that the simulator does not execute
|
||||
// without being properly setup.
|
||||
bad_ra = -1,
|
||||
// A pc value used to signal the simulator to stop execution. Generally
|
||||
// the ra is set to this value on transition from native C code to
|
||||
// simulated execution, so that the simulator can "return" to the native
|
||||
// C code.
|
||||
end_sim_pc = -2,
|
||||
// Unpredictable value.
|
||||
Unpredictable = 0xbadbeaf
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
|
||||
const intptr_t* arguments);
|
||||
|
||||
// Unsupported instructions use Format to print an error and stop execution.
|
||||
void Format(Instruction* instr, const char* format);
|
||||
|
||||
// Helpers for data value tracing.
|
||||
enum TraceType {
|
||||
BYTE,
|
||||
HALF,
|
||||
WORD,
|
||||
DWORD,
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
FLOAT_DOUBLE,
|
||||
WORD_DWORD
|
||||
};
|
||||
|
||||
// Read and write memory.
|
||||
inline uint32_t ReadBU(int64_t addr);
|
||||
inline int32_t ReadB(int64_t addr);
|
||||
inline void WriteB(int64_t addr, uint8_t value);
|
||||
inline void WriteB(int64_t addr, int8_t value);
|
||||
|
||||
inline uint16_t ReadHU(int64_t addr, Instruction* instr);
|
||||
inline int16_t ReadH(int64_t addr, Instruction* instr);
|
||||
// Note: Overloaded on the sign of the value.
|
||||
inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
|
||||
inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
|
||||
|
||||
inline uint32_t ReadWU(int64_t addr, Instruction* instr);
|
||||
inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
|
||||
inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
|
||||
void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
|
||||
int32_t rt_reg);
|
||||
inline int64_t Read2W(int64_t addr, Instruction* instr);
|
||||
inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
|
||||
inline void WriteConditional2W(int64_t addr, int64_t value,
|
||||
Instruction* instr, int32_t rt_reg);
|
||||
|
||||
inline double ReadD(int64_t addr, Instruction* instr);
|
||||
inline void WriteD(int64_t addr, double value, Instruction* instr);
|
||||
|
||||
template <typename T>
|
||||
T ReadMem(int64_t addr, Instruction* instr);
|
||||
template <typename T>
|
||||
void WriteMem(int64_t addr, T value, Instruction* instr);
|
||||
|
||||
// Helper for debugging memory access.
|
||||
inline void DieOrDebug();
|
||||
|
||||
void TraceRegWr(int64_t value, TraceType t = DWORD);
|
||||
void TraceMemWr(int64_t addr, int64_t value, TraceType t);
|
||||
void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
|
||||
template <typename T>
|
||||
void TraceMemRd(int64_t addr, T value);
|
||||
template <typename T>
|
||||
void TraceMemWr(int64_t addr, T value);
|
||||
|
||||
SimInstruction instr_;
|
||||
|
||||
// Executing is handled based on the instruction type.
|
||||
void DecodeTypeOp6();
|
||||
void DecodeTypeOp7();
|
||||
void DecodeTypeOp8();
|
||||
void DecodeTypeOp10();
|
||||
void DecodeTypeOp12();
|
||||
void DecodeTypeOp14();
|
||||
void DecodeTypeOp17();
|
||||
void DecodeTypeOp22();
|
||||
|
||||
inline int32_t rj_reg() const { return instr_.RjValue(); }
|
||||
inline int64_t rj() const { return get_register(rj_reg()); }
|
||||
inline uint64_t rj_u() const {
|
||||
return static_cast<uint64_t>(get_register(rj_reg()));
|
||||
}
|
||||
inline int32_t rk_reg() const { return instr_.RkValue(); }
|
||||
inline int64_t rk() const { return get_register(rk_reg()); }
|
||||
inline uint64_t rk_u() const {
|
||||
return static_cast<uint64_t>(get_register(rk_reg()));
|
||||
}
|
||||
inline int32_t rd_reg() const { return instr_.RdValue(); }
|
||||
inline int64_t rd() const { return get_register(rd_reg()); }
|
||||
inline uint64_t rd_u() const {
|
||||
return static_cast<uint64_t>(get_register(rd_reg()));
|
||||
}
|
||||
inline int32_t fa_reg() const { return instr_.FaValue(); }
|
||||
inline float fa_float() const { return get_fpu_register_float(fa_reg()); }
|
||||
inline double fa_double() const { return get_fpu_register_double(fa_reg()); }
|
||||
inline int32_t fj_reg() const { return instr_.FjValue(); }
|
||||
inline float fj_float() const { return get_fpu_register_float(fj_reg()); }
|
||||
inline double fj_double() const { return get_fpu_register_double(fj_reg()); }
|
||||
inline int32_t fk_reg() const { return instr_.FkValue(); }
|
||||
inline float fk_float() const { return get_fpu_register_float(fk_reg()); }
|
||||
inline double fk_double() const { return get_fpu_register_double(fk_reg()); }
|
||||
inline int32_t fd_reg() const { return instr_.FdValue(); }
|
||||
inline float fd_float() const { return get_fpu_register_float(fd_reg()); }
|
||||
inline double fd_double() const { return get_fpu_register_double(fd_reg()); }
|
||||
inline int32_t cj_reg() const { return instr_.CjValue(); }
|
||||
inline bool cj() const { return get_cf_register(cj_reg()); }
|
||||
inline int32_t cd_reg() const { return instr_.CdValue(); }
|
||||
inline bool cd() const { return get_cf_register(cd_reg()); }
|
||||
inline int32_t ca_reg() const { return instr_.CaValue(); }
|
||||
inline bool ca() const { return get_cf_register(ca_reg()); }
|
||||
inline uint32_t sa2() const { return instr_.Sa2Value(); }
|
||||
inline uint32_t sa3() const { return instr_.Sa3Value(); }
|
||||
inline uint32_t ui5() const { return instr_.Ui5Value(); }
|
||||
inline uint32_t ui6() const { return instr_.Ui6Value(); }
|
||||
inline uint32_t lsbw() const { return instr_.LsbwValue(); }
|
||||
inline uint32_t msbw() const { return instr_.MsbwValue(); }
|
||||
inline uint32_t lsbd() const { return instr_.LsbdValue(); }
|
||||
inline uint32_t msbd() const { return instr_.MsbdValue(); }
|
||||
inline uint32_t cond() const { return instr_.CondValue(); }
|
||||
inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; }
|
||||
inline uint32_t ui12() const { return instr_.Ui12Value(); }
|
||||
inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; }
|
||||
inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; }
|
||||
inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; }
|
||||
|
||||
inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
|
||||
set_register(rd_reg, alu_out);
|
||||
TraceRegWr(alu_out);
|
||||
}
|
||||
|
||||
inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
|
||||
set_fpu_register_word(fd_reg, alu_out);
|
||||
TraceRegWr(get_fpu_register(fd_reg), WORD);
|
||||
}
|
||||
|
||||
inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
|
||||
set_fpu_register_word(fd_reg, alu_out);
|
||||
TraceRegWr(get_fpu_register(fd_reg));
|
||||
}
|
||||
|
||||
inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
|
||||
set_fpu_register(fd_reg, alu_out);
|
||||
TraceRegWr(get_fpu_register(fd_reg));
|
||||
}
|
||||
|
||||
inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
|
||||
set_fpu_register(fd_reg, alu_out);
|
||||
TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
|
||||
}
|
||||
|
||||
inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
|
||||
set_fpu_register_float(fd_reg, alu_out);
|
||||
TraceRegWr(get_fpu_register(fd_reg), FLOAT);
|
||||
}
|
||||
|
||||
inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
|
||||
set_fpu_register_double(fd_reg, alu_out);
|
||||
TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
|
||||
}
|
||||
|
||||
// Used for breakpoints.
|
||||
void SoftwareInterrupt();
|
||||
|
||||
// Stop helper functions.
|
||||
bool IsWatchpoint(uint64_t code);
|
||||
void PrintWatchpoint(uint64_t code);
|
||||
void HandleStop(uint64_t code, Instruction* instr);
|
||||
bool IsStopInstruction(Instruction* instr);
|
||||
bool IsEnabledStop(uint64_t code);
|
||||
void EnableStop(uint64_t code);
|
||||
void DisableStop(uint64_t code);
|
||||
void IncreaseStopCounter(uint64_t code);
|
||||
void PrintStopInfo(uint64_t code);
|
||||
|
||||
// Executes one instruction.
|
||||
void InstructionDecode(Instruction* instr);
|
||||
// Execute one instruction placed in a branch delay slot.
|
||||
|
||||
// ICache.
|
||||
static void CheckICache(base::CustomMatcherHashMap* i_cache,
|
||||
Instruction* instr);
|
||||
static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
|
||||
size_t size);
|
||||
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
|
||||
void* page);
|
||||
|
||||
enum Exception {
|
||||
none,
|
||||
kIntegerOverflow,
|
||||
kIntegerUnderflow,
|
||||
kDivideByZero,
|
||||
kNumExceptions
|
||||
};
|
||||
|
||||
// Exceptions.
|
||||
void SignalException(Exception e);
|
||||
|
||||
// Handle arguments and return value for runtime FP functions.
|
||||
void GetFpArgs(double* x, double* y, int32_t* z);
|
||||
void SetFpResult(const double& result);
|
||||
|
||||
void CallInternal(Address entry);
|
||||
|
||||
// Architecture state.
|
||||
// Registers.
|
||||
int64_t registers_[kNumSimuRegisters];
|
||||
// Floating point Registers.
|
||||
int64_t FPUregisters_[kNumFPURegisters];
|
||||
// Condition flags Registers.
|
||||
bool CFregisters_[kNumCFRegisters];
|
||||
// FPU control register.
|
||||
uint32_t FCSR_;
|
||||
|
||||
// Simulator support.
|
||||
// Allocate 1MB for stack.
|
||||
size_t stack_size_;
|
||||
char* stack_;
|
||||
bool pc_modified_;
|
||||
int64_t icount_;
|
||||
int break_count_;
|
||||
base::EmbeddedVector<char, 128> trace_buf_;
|
||||
|
||||
// Debugger input.
|
||||
char* last_debugger_input_;
|
||||
|
||||
v8::internal::Isolate* isolate_;
|
||||
|
||||
// Registered breakpoints.
|
||||
Instruction* break_pc_;
|
||||
Instr break_instr_;
|
||||
|
||||
// Stop is disabled if bit 31 is set.
|
||||
static const uint32_t kStopDisabledBit = 1 << 31;
|
||||
|
||||
// A stop is enabled, meaning the simulator will stop when meeting the
|
||||
// instruction, if bit 31 of watched_stops_[code].count is unset.
|
||||
// The value watched_stops_[code].count & ~(1 << 31) indicates how many times
|
||||
// the breakpoint was hit or gone through.
|
||||
struct StopCountAndDesc {
|
||||
uint32_t count;
|
||||
char* desc;
|
||||
};
|
||||
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
|
||||
|
||||
// Synchronization primitives.
|
||||
enum class MonitorAccess {
|
||||
Open,
|
||||
RMW,
|
||||
};
|
||||
|
||||
enum class TransactionSize {
|
||||
None = 0,
|
||||
Word = 4,
|
||||
DoubleWord = 8,
|
||||
};
|
||||
|
||||
// The least-significant bits of the address are ignored. The number of bits
|
||||
// is implementation-defined, between 3 and minimum page size.
|
||||
static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
|
||||
|
||||
class LocalMonitor {
|
||||
public:
|
||||
LocalMonitor();
|
||||
|
||||
// These functions manage the state machine for the local monitor, but do
|
||||
// not actually perform loads and stores. NotifyStoreConditional only
|
||||
// returns true if the store conditional is allowed; the global monitor will
|
||||
// still have to be checked to see whether the memory should be updated.
|
||||
void NotifyLoad();
|
||||
void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
|
||||
void NotifyStore();
|
||||
bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
MonitorAccess access_state_;
|
||||
uintptr_t tagged_addr_;
|
||||
TransactionSize size_;
|
||||
};
|
||||
|
||||
class GlobalMonitor {
|
||||
public:
|
||||
class LinkedAddress {
|
||||
public:
|
||||
LinkedAddress();
|
||||
|
||||
private:
|
||||
friend class GlobalMonitor;
|
||||
// These functions manage the state machine for the global monitor, but do
|
||||
// not actually perform loads and stores.
|
||||
void Clear_Locked();
|
||||
void NotifyLoadLinked_Locked(uintptr_t addr);
|
||||
void NotifyStore_Locked();
|
||||
bool NotifyStoreConditional_Locked(uintptr_t addr,
|
||||
bool is_requesting_thread);
|
||||
|
||||
MonitorAccess access_state_;
|
||||
uintptr_t tagged_addr_;
|
||||
LinkedAddress* next_;
|
||||
LinkedAddress* prev_;
|
||||
// A scd can fail due to background cache evictions. Rather than
|
||||
// simulating this, we'll just occasionally introduce cases where an
|
||||
// store conditional fails. This will happen once after every
|
||||
// kMaxFailureCounter exclusive stores.
|
||||
static const int kMaxFailureCounter = 5;
|
||||
int failure_counter_;
|
||||
};
|
||||
|
||||
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
|
||||
base::Mutex mutex;
|
||||
|
||||
void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
|
||||
void NotifyStore_Locked(LinkedAddress* linked_address);
|
||||
bool NotifyStoreConditional_Locked(uintptr_t addr,
|
||||
LinkedAddress* linked_address);
|
||||
|
||||
// Called when the simulator is destroyed.
|
||||
void RemoveLinkedAddress(LinkedAddress* linked_address);
|
||||
|
||||
static GlobalMonitor* Get();
|
||||
|
||||
private:
|
||||
// Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
|
||||
GlobalMonitor() = default;
|
||||
friend class base::LeakyObject<GlobalMonitor>;
|
||||
|
||||
bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
|
||||
void PrependProcessor_Locked(LinkedAddress* linked_address);
|
||||
|
||||
LinkedAddress* head_ = nullptr;
|
||||
};
|
||||
|
||||
LocalMonitor local_monitor_;
|
||||
GlobalMonitor::LinkedAddress global_monitor_thread_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // defined(USE_SIMULATOR)
|
||||
#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
|
@ -88,9 +88,9 @@ class SimulatorBase {
|
||||
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
|
||||
ConvertArg(T arg) {
|
||||
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
|
||||
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
|
||||
// The MIPS64 and RISCV64 calling convention is to sign extend all values,
|
||||
// even unsigned ones.
|
||||
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
|
||||
// The MIPS64, LOONG64 and RISCV64 calling convention is to sign extend all
|
||||
// values, even unsigned ones.
|
||||
using signed_t = typename std::make_signed<T>::type;
|
||||
return static_cast<intptr_t>(static_cast<signed_t>(arg));
|
||||
#else
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "src/execution/mips/simulator-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/execution/mips64/simulator-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/execution/loong64/simulator-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/execution/s390/simulator-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -189,7 +189,7 @@ struct MaybeBoolFlag {
|
||||
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
|
||||
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
|
||||
V8_TARGET_ARCH_MIPS
|
||||
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
|
||||
#define ENABLE_SPARKPLUG true
|
||||
#else
|
||||
// TODO(v8:11421): Enable Sparkplug for other architectures
|
||||
@ -1577,8 +1577,9 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
|
||||
DEFINE_BOOL(check_icache, false,
|
||||
"Check icache flushes in ARM and MIPS simulator")
|
||||
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
|
||||
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
|
||||
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64)
|
||||
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
|
||||
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64) || \
|
||||
defined(V8_TARGET_ARCH_LOONG64)
|
||||
DEFINE_INT(sim_stack_alignment, 16,
|
||||
"Stack alignment in bytes in simulator. This must be a power of two "
|
||||
"and it must be at least 16. 16 is default.")
|
||||
|
48
src/heap/base/asm/loong64/push_registers_asm.cc
Normal file
48
src/heap/base/asm/loong64/push_registers_asm.cc
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Push all callee-saved registers to get them on the stack for conservative
|
||||
// stack scanning.
|
||||
//
|
||||
// See asm/x64/push_registers_clang.cc for why the function is not generated
|
||||
// using clang.
|
||||
//
|
||||
// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
|
||||
// GN toolchain (e.g. ChromeOS) and not provide them.
|
||||
asm(".text \n"
|
||||
".global PushAllRegistersAndIterateStack \n"
|
||||
".type PushAllRegistersAndIterateStack, %function \n"
|
||||
".hidden PushAllRegistersAndIterateStack \n"
|
||||
"PushAllRegistersAndIterateStack: \n"
|
||||
// Push all callee-saved registers and save return address.
|
||||
" addi.d $sp, $sp, -96 \n"
|
||||
" st.d $ra, $sp, 88 \n"
|
||||
" st.d $s8, $sp, 80 \n"
|
||||
" st.d $sp, $sp, 72 \n"
|
||||
" st.d $fp, $sp, 64 \n"
|
||||
" st.d $s7, $sp, 56 \n"
|
||||
" st.d $s6, $sp, 48 \n"
|
||||
" st.d $s5, $sp, 40 \n"
|
||||
" st.d $s4, $sp, 32 \n"
|
||||
" st.d $s3, $sp, 24 \n"
|
||||
" st.d $s2, $sp, 16 \n"
|
||||
" st.d $s1, $sp, 8 \n"
|
||||
" st.d $s0, $sp, 0 \n"
|
||||
// Maintain frame pointer.
|
||||
" addi.d $s8, $sp, 0 \n"
|
||||
// Pass 1st parameter (a0) unchanged (Stack*).
|
||||
// Pass 2nd parameter (a1) unchanged (StackVisitor*).
|
||||
// Save 3rd parameter (a2; IterateStackCallback).
|
||||
" addi.d $a3, $a2, 0 \n"
|
||||
// Call the callback.
|
||||
// Pass 3rd parameter as sp (stack pointer).
|
||||
" addi.d $a2, $sp, 0 \n"
|
||||
" jirl $ra, $a3, 0 \n"
|
||||
// Load return address.
|
||||
" ld.d $ra, $sp, 88 \n"
|
||||
// Restore frame pointer.
|
||||
" ld.d $s8, $sp, 80 \n"
|
||||
// Discard all callee-saved registers.
|
||||
" addi.d $sp, $sp, 96 \n"
|
||||
" jirl $zero, $ra, 0 \n");
|
@ -1357,7 +1357,7 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
|
||||
return false;
|
||||
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
|
||||
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
|
||||
V8_TARGET_ARCH_PPC64
|
||||
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
|
||||
return true;
|
||||
#else
|
||||
#error "Unknown Architecture"
|
||||
|
@ -412,6 +412,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
|
||||
state->pc = reinterpret_cast<void*>(mcontext.pc);
|
||||
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
|
||||
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
|
||||
#elif V8_HOST_ARCH_LOONG64
|
||||
state->pc = reinterpret_cast<void*>(mcontext.__pc);
|
||||
state->sp = reinterpret_cast<void*>(mcontext.__gregs[3]);
|
||||
state->fp = reinterpret_cast<void*>(mcontext.__gregs[22]);
|
||||
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
|
||||
#if V8_LIBC_GLIBC
|
||||
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
|
||||
|
@ -614,6 +614,8 @@ void LowLevelLogger::LogCodeInfo() {
|
||||
const char arch[] = "ppc64";
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
const char arch[] = "mips";
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
const char arch[] = "loong64";
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
const char arch[] = "arm64";
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
|
@ -39,8 +39,8 @@ constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
|
||||
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS64
|
||||
// MIPS64 has a user space of 2^40 bytes on most processors,
|
||||
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
|
||||
// MIPS64 and LOONG64 has a user space of 2^40 bytes on most processors,
|
||||
// address space limits needs to be smaller.
|
||||
constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -333,7 +333,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
|
||||
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64)
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
|
||||
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
|
||||
// On these platforms we emit relative builtin-to-builtin
|
||||
// jumps for isolate independent builtins in the snapshot. They are later
|
||||
|
@ -544,6 +544,8 @@ class Code : public HeapObject {
|
||||
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
static constexpr int kHeaderPaddingSize = 24;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
static constexpr int kHeaderPaddingSize = 24;
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
|
||||
#elif V8_TARGET_ARCH_ARM
|
||||
|
@ -105,7 +105,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
|
||||
state->sp = reinterpret_cast<void*>(simulator->sp());
|
||||
state->fp = reinterpret_cast<void*>(simulator->fp());
|
||||
state->lr = reinterpret_cast<void*>(simulator->lr());
|
||||
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
|
||||
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
|
||||
if (!simulator->has_bad_pc()) {
|
||||
state->pc = reinterpret_cast<void*>(simulator->get_pc());
|
||||
}
|
||||
|
1250
src/regexp/loong64/regexp-macro-assembler-loong64.cc
Normal file
1250
src/regexp/loong64/regexp-macro-assembler-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
215
src/regexp/loong64/regexp-macro-assembler-loong64.h
Normal file
215
src/regexp/loong64/regexp-macro-assembler-loong64.h
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
|
||||
#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
|
||||
|
||||
#include "src/base/strings.h"
|
||||
#include "src/codegen/loong64/assembler-loong64.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/regexp/regexp-macro-assembler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
|
||||
: public NativeRegExpMacroAssembler {
|
||||
public:
|
||||
RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, Mode mode,
|
||||
int registers_to_save);
|
||||
virtual ~RegExpMacroAssemblerLOONG64();
|
||||
virtual int stack_limit_slack();
|
||||
virtual void AdvanceCurrentPosition(int by);
|
||||
virtual void AdvanceRegister(int reg, int by);
|
||||
virtual void Backtrack();
|
||||
virtual void Bind(Label* label);
|
||||
virtual void CheckAtStart(int cp_offset, Label* on_at_start);
|
||||
virtual void CheckCharacter(uint32_t c, Label* on_equal);
|
||||
virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
|
||||
Label* on_equal);
|
||||
virtual void CheckCharacterGT(base::uc16 limit, Label* on_greater);
|
||||
virtual void CheckCharacterLT(base::uc16 limit, Label* on_less);
|
||||
// A "greedy loop" is a loop that is both greedy and with a simple
|
||||
// body. It has a particularly simple implementation.
|
||||
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
|
||||
virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
|
||||
virtual void CheckNotBackReference(int start_reg, bool read_backward,
|
||||
Label* on_no_match);
|
||||
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
|
||||
bool read_backward, bool unicode,
|
||||
Label* on_no_match);
|
||||
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
|
||||
virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
|
||||
Label* on_not_equal);
|
||||
virtual void CheckNotCharacterAfterMinusAnd(base::uc16 c, base::uc16 minus,
|
||||
base::uc16 mask,
|
||||
Label* on_not_equal);
|
||||
virtual void CheckCharacterInRange(base::uc16 from, base::uc16 to,
|
||||
Label* on_in_range);
|
||||
virtual void CheckCharacterNotInRange(base::uc16 from, base::uc16 to,
|
||||
Label* on_not_in_range);
|
||||
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
|
||||
|
||||
// Checks whether the given offset from the current position is before
|
||||
// the end of the string.
|
||||
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
|
||||
virtual bool CheckSpecialCharacterClass(base::uc16 type, Label* on_no_match);
|
||||
virtual void Fail();
|
||||
virtual Handle<HeapObject> GetCode(Handle<String> source);
|
||||
virtual void GoTo(Label* label);
|
||||
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
|
||||
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
|
||||
virtual void IfRegisterEqPos(int reg, Label* if_eq);
|
||||
virtual IrregexpImplementation Implementation();
|
||||
virtual void LoadCurrentCharacterUnchecked(int cp_offset,
|
||||
int character_count);
|
||||
virtual void PopCurrentPosition();
|
||||
virtual void PopRegister(int register_index);
|
||||
virtual void PushBacktrack(Label* label);
|
||||
virtual void PushCurrentPosition();
|
||||
virtual void PushRegister(int register_index,
|
||||
StackCheckFlag check_stack_limit);
|
||||
virtual void ReadCurrentPositionFromRegister(int reg);
|
||||
virtual void ReadStackPointerFromRegister(int reg);
|
||||
virtual void SetCurrentPositionFromEnd(int by);
|
||||
virtual void SetRegister(int register_index, int to);
|
||||
virtual bool Succeed();
|
||||
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
|
||||
virtual void ClearRegisters(int reg_from, int reg_to);
|
||||
virtual void WriteStackPointerToRegister(int reg);
|
||||
virtual bool CanReadUnaligned();
|
||||
|
||||
// Called from RegExp if the stack-guard is triggered.
|
||||
// If the code object is relocated, the return address is fixed before
|
||||
// returning.
|
||||
// {raw_code} is an Address because this is called via ExternalReference.
|
||||
static int64_t CheckStackGuardState(Address* return_address, Address raw_code,
|
||||
Address re_frame);
|
||||
|
||||
void print_regexp_frame_constants();
|
||||
|
||||
private:
|
||||
// Offsets from frame_pointer() of function parameters and stored registers.
|
||||
static const int kFramePointer = 0;
|
||||
|
||||
// Above the frame pointer - Stored registers and stack passed parameters.
|
||||
// Registers s0 to s7, fp, and ra.
|
||||
static const int kStoredRegisters = kFramePointer;
|
||||
// Return address (stored from link register, read into pc on return).
|
||||
|
||||
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
|
||||
|
||||
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
|
||||
// Stack frame header.
|
||||
static const int kStackFrameHeader = kReturnAddress;
|
||||
// Stack parameters placed by caller.
|
||||
static const int kIsolate = kStackFrameHeader + kPointerSize;
|
||||
|
||||
// Below the frame pointer.
|
||||
// Register parameters stored by setup code.
|
||||
static const int kDirectCall = kFramePointer - kPointerSize;
|
||||
static const int kStackHighEnd = kDirectCall - kPointerSize;
|
||||
static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
|
||||
static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
|
||||
static const int kInputEnd = kRegisterOutput - kPointerSize;
|
||||
static const int kInputStart = kInputEnd - kPointerSize;
|
||||
static const int kStartIndex = kInputStart - kPointerSize;
|
||||
static const int kInputString = kStartIndex - kPointerSize;
|
||||
// When adding local variables remember to push space for them in
|
||||
// the frame in GetCode.
|
||||
static const int kSuccessfulCaptures = kInputString - kPointerSize;
|
||||
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
|
||||
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
|
||||
// First register address. Following registers are below it on the stack.
|
||||
static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
|
||||
|
||||
// Initial size of code buffer.
|
||||
static const int kRegExpCodeSize = 1024;
|
||||
|
||||
// Check whether preemption has been requested.
|
||||
void CheckPreemption();
|
||||
|
||||
// Check whether we are exceeding the stack limit on the backtrack stack.
|
||||
void CheckStackLimit();
|
||||
|
||||
// Generate a call to CheckStackGuardState.
|
||||
void CallCheckStackGuardState(Register scratch);
|
||||
|
||||
// The ebp-relative location of a regexp register.
|
||||
MemOperand register_location(int register_index);
|
||||
|
||||
// Register holding the current input position as negative offset from
|
||||
// the end of the string.
|
||||
inline Register current_input_offset() { return a6; }
|
||||
|
||||
// The register containing the current character after LoadCurrentCharacter.
|
||||
inline Register current_character() { return a7; }
|
||||
|
||||
// Register holding address of the end of the input string.
|
||||
inline Register end_of_input_address() { return t2; }
|
||||
|
||||
// Register holding the frame address. Local variables, parameters and
|
||||
// regexp registers are addressed relative to this.
|
||||
inline Register frame_pointer() { return fp; }
|
||||
|
||||
// The register containing the backtrack stack top. Provides a meaningful
|
||||
// name to the register.
|
||||
inline Register backtrack_stackpointer() { return t0; }
|
||||
|
||||
// Register holding pointer to the current code object.
|
||||
inline Register code_pointer() { return a5; }
|
||||
|
||||
// Byte size of chars in the string to match (decided by the Mode argument).
|
||||
inline int char_size() { return static_cast<int>(mode_); }
|
||||
|
||||
// Equivalent to a conditional branch to the label, unless the label
|
||||
// is nullptr, in which case it is a conditional Backtrack.
|
||||
void BranchOrBacktrack(Label* to, Condition condition, Register rs,
|
||||
const Operand& rt);
|
||||
|
||||
// Call and return internally in the generated code in a way that
|
||||
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
|
||||
inline void SafeCall(Label* to, Condition cond, Register rs,
|
||||
const Operand& rt);
|
||||
inline void SafeReturn();
|
||||
inline void SafeCallTarget(Label* name);
|
||||
|
||||
// Pushes the value of a register on the backtrack stack. Decrements the
|
||||
// stack pointer by a word size and stores the register's value there.
|
||||
inline void Push(Register source);
|
||||
|
||||
// Pops a value from the backtrack stack. Reads the word at the stack pointer
|
||||
// and increments it by a word size.
|
||||
inline void Pop(Register target);
|
||||
|
||||
Isolate* isolate() const { return masm_->isolate(); }
|
||||
|
||||
MacroAssembler* masm_;
|
||||
|
||||
// Which mode to generate code for (Latin1 or UC16).
|
||||
Mode mode_;
|
||||
|
||||
// One greater than maximal register index actually used.
|
||||
int num_registers_;
|
||||
|
||||
// Number of registers to output at the end (the saved registers
|
||||
// are always 0..num_saved_registers_-1).
|
||||
int num_saved_registers_;
|
||||
|
||||
// Labels used internally.
|
||||
Label entry_label_;
|
||||
Label start_label_;
|
||||
Label success_label_;
|
||||
Label backtrack_label_;
|
||||
Label exit_label_;
|
||||
Label check_preempt_label_;
|
||||
Label stack_overflow_label_;
|
||||
Label internal_failure_label_;
|
||||
Label fallback_label_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
|
@ -21,6 +21,8 @@
|
||||
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -45,6 +45,7 @@ class RegExpMacroAssembler {
|
||||
V(ARM) \
|
||||
V(ARM64) \
|
||||
V(MIPS) \
|
||||
V(LOONG64) \
|
||||
V(RISCV) \
|
||||
V(S390) \
|
||||
V(PPC) \
|
||||
|
@ -868,6 +868,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
macro_assembler.reset(new RegExpMacroAssemblerRISCV(isolate, zone, mode,
|
||||
output_register_count));
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
macro_assembler.reset(new RegExpMacroAssemblerLOONG64(
|
||||
isolate, zone, mode, output_register_count));
|
||||
#else
|
||||
#error "Unsupported architecture"
|
||||
#endif
|
||||
|
@ -20,7 +20,7 @@ namespace internal {
|
||||
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
|
||||
V8_TARGET_ARCH_RISCV64
|
||||
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
|
||||
|
||||
namespace {
|
||||
|
||||
@ -606,6 +606,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
|
||||
|
||||
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
|
||||
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
|
||||
// || V8_TARGET_ARCH_RISCV64
|
||||
// || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -218,7 +218,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
|
||||
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
|
||||
defined(V8_TARGET_ARCH_RISCV64)
|
||||
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
|
||||
// On these platforms we emit relative builtin-to-builtin
|
||||
// jumps for isolate independent builtins in the snapshot. This fixes up the
|
||||
// relative jumps to the right offsets in the snapshot.
|
||||
@ -246,7 +246,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
|
||||
// indirection through the root register.
|
||||
CHECK(on_heap_it.done());
|
||||
CHECK(off_heap_it.done());
|
||||
#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,8 +152,9 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
|
||||
|
||||
DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective()
|
||||
const {
|
||||
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
|
||||
// MIPS uses a fixed 4 byte instruction set, using .long
|
||||
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
|
||||
defined(V8_TARGET_ARCH_LOONG64)
|
||||
// MIPS and LOONG64 uses a fixed 4 byte instruction set, using .long
|
||||
// to prevent any unnecessary padding.
|
||||
return kLong;
|
||||
#else
|
||||
|
@ -46,6 +46,16 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
|
||||
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
|
||||
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
|
||||
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
|
||||
constexpr RegList kLiftoffAssemblerGpCacheRegs = Register::ListOf(
|
||||
a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, t6, t7, t8, s7);
|
||||
|
||||
// f29: zero, f30-f31: macro-assembler scratch float Registers.
|
||||
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
|
||||
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
|
||||
f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
|
||||
|
||||
#elif V8_TARGET_ARCH_ARM
|
||||
|
||||
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
|
||||
|
@ -1711,6 +1711,8 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
|
||||
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h"
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -306,7 +306,7 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
|
||||
|
||||
// Some externally maintained architectures don't fully implement Liftoff yet.
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
|
||||
return;
|
||||
#endif
|
||||
|
||||
|
2804
src/wasm/baseline/loong64/liftoff-assembler-loong64.h
Normal file
2804
src/wasm/baseline/loong64/liftoff-assembler-loong64.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -268,6 +268,36 @@ void JumpTableAssembler::NopBytes(int bytes) {
|
||||
}
|
||||
}
|
||||
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
|
||||
Address lazy_compile_target) {
|
||||
DCHECK(is_int32(func_index));
|
||||
int start = pc_offset();
|
||||
li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr
|
||||
// Jump produces max 4 instructions.
|
||||
Jump(lazy_compile_target, RelocInfo::NONE);
|
||||
int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
|
||||
DCHECK_EQ(nop_bytes % kInstrSize, 0);
|
||||
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
|
||||
}
|
||||
bool JumpTableAssembler::EmitJumpSlot(Address target) {
|
||||
PatchAndJump(target);
|
||||
return true;
|
||||
}
|
||||
void JumpTableAssembler::EmitFarJumpSlot(Address target) {
|
||||
JumpToInstructionStream(target);
|
||||
}
|
||||
void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void JumpTableAssembler::NopBytes(int bytes) {
|
||||
DCHECK_LE(0, bytes);
|
||||
DCHECK_EQ(0, bytes % kInstrSize);
|
||||
for (; bytes > 0; bytes -= kInstrSize) {
|
||||
nop();
|
||||
}
|
||||
}
|
||||
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
|
||||
Address lazy_compile_target) {
|
||||
|
@ -224,6 +224,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
|
||||
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
|
||||
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
|
||||
static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
|
||||
static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
|
||||
static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
|
||||
static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
|
||||
#else
|
||||
#error Unknown architecture.
|
||||
#endif
|
||||
|
@ -80,6 +80,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1};
|
||||
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
|
||||
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
|
||||
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
// ===========================================================================
|
||||
// == LOONG64 ================================================================
|
||||
// ===========================================================================
|
||||
constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
|
||||
constexpr Register kGpReturnRegisters[] = {a0, a1};
|
||||
constexpr DoubleRegister kFpParamRegisters[] = {f0, f1, f2, f3, f4, f5, f6, f7};
|
||||
constexpr DoubleRegister kFpReturnRegisters[] = {f0, f1};
|
||||
|
||||
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
// ===========================================================================
|
||||
// == ppc & ppc64 ============================================================
|
||||
|
@ -383,6 +383,12 @@ v8_source_set("cctest_sources") {
|
||||
"test-macro-assembler-riscv64.cc",
|
||||
"test-simple-riscv64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "loong64") {
|
||||
sources += [ ### gcmole(arch:loong64) ###
|
||||
"test-assembler-loong64.cc",
|
||||
"test-disasm-loong64.cc",
|
||||
"test-macro-assembler-loong64.cc",
|
||||
]
|
||||
}
|
||||
|
||||
if (v8_use_perfetto) {
|
||||
@ -480,7 +486,7 @@ v8_source_set("cctest_sources") {
|
||||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
|
||||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
|
||||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" ||
|
||||
v8_current_cpu == "riscv64") {
|
||||
v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
|
||||
# Disable fmadd/fmsub so that expected results match generated code in
|
||||
# RunFloat64MulAndFloat64Add1 and friends.
|
||||
if (!is_win) {
|
||||
|
@ -46,6 +46,9 @@ namespace internal {
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
#define GET_STACK_POINTER_TO(sp_addr) \
|
||||
__asm__ __volatile__("add %0, sp, x0" : "=r"(sp_addr))
|
||||
#elif V8_HOST_ARCH_LOONG64
|
||||
#define GET_STACK_POINTER_TO(sp_addr) \
|
||||
__asm__ __volatile__("st.d $sp, %0" : "=m"(sp_addr))
|
||||
#else
|
||||
#error Host architecture was not detected as supported by v8
|
||||
#endif
|
||||
|
@ -458,6 +458,15 @@
|
||||
|
||||
}], # 'arch == riscv64 and simulator_run'
|
||||
|
||||
##############################################################################
|
||||
['arch == loong64', {
|
||||
# The instruction scheduler is disabled on loong64.
|
||||
'test-instruction-scheduler/DeoptInMiddleOfBasicBlock': [SKIP],
|
||||
# The uint32 values are sign-extended on loong64.
|
||||
'test-run-load-store/RunLoadStoreZeroExtend64': [SKIP],
|
||||
'test-run-load-store/RunUnalignedLoadStoreZeroExtend64': [SKIP],
|
||||
}], # 'arch == loong64'
|
||||
|
||||
##############################################################################
|
||||
['system == android', {
|
||||
# Uses too much memory.
|
||||
|
@ -4443,7 +4443,7 @@ TEST(RunTruncateFloat32ToInt32) {
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
CHECK_EQ(std::numeric_limits<int32_t>::min(), m.Call(i));
|
||||
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
|
||||
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64
|
||||
CHECK_EQ(0, m.Call(i));
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
CHECK_EQ(std::numeric_limits<int32_t>::max(), m.Call(i));
|
||||
@ -4465,7 +4465,7 @@ TEST(RunTruncateFloat32ToInt32) {
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
CHECK_EQ(std::numeric_limits<int32_t>::min(), m.Call(i));
|
||||
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
|
||||
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64
|
||||
CHECK_EQ(0, m.Call(i));
|
||||
#endif
|
||||
}
|
||||
|
5180
test/cctest/test-assembler-loong64.cc
Normal file
5180
test/cctest/test-assembler-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
895
test/cctest/test-disasm-loong64.cc
Normal file
895
test/cctest/test-disasm-loong64.cc
Normal file
@ -0,0 +1,895 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/diagnostics/disasm.h"
|
||||
#include "src/diagnostics/disassembler.h"
|
||||
#include "src/execution/frames-inl.h"
|
||||
#include "src/init/v8.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
|
||||
disasm::NameConverter converter;
|
||||
disasm::Disassembler disasm(converter);
|
||||
base::EmbeddedVector<char, 128> disasm_buffer;
|
||||
|
||||
/* if (prev_instr_compact_branch) {
|
||||
disasm.InstructionDecode(disasm_buffer, pc);
|
||||
pc += 4;
|
||||
}*/
|
||||
|
||||
disasm.InstructionDecode(disasm_buffer, pc);
|
||||
|
||||
if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
|
||||
fprintf(stderr,
|
||||
"expected: \n"
|
||||
"%s\n"
|
||||
"disassembled: \n"
|
||||
"%s\n\n",
|
||||
compare_string, disasm_buffer.begin());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Set up V8 to a state where we can at least run the assembler and
|
||||
// disassembler. Declare the variables and allocate the data structures used
|
||||
// in the rest of the macros.
|
||||
#define SET_UP() \
|
||||
CcTest::InitializeVM(); \
|
||||
Isolate* isolate = CcTest::i_isolate(); \
|
||||
HandleScope scope(isolate); \
|
||||
byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
|
||||
Assembler assm(AssemblerOptions{}, \
|
||||
ExternalAssemblerBuffer(buffer, 4 * 1024)); \
|
||||
bool failure = false;
|
||||
|
||||
// This macro assembles one instruction using the preallocated assembler and
|
||||
// disassembles the generated instruction, comparing the output to the expected
|
||||
// value. If the comparison fails an error message is printed, but the test
|
||||
// continues to run until the end.
|
||||
#define COMPARE(asm_, compare_string) \
|
||||
{ \
|
||||
int pc_offset = assm.pc_offset(); \
|
||||
byte* progcounter = &buffer[pc_offset]; \
|
||||
assm.asm_; \
|
||||
if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
|
||||
}
|
||||
|
||||
// Verify that all invocations of the COMPARE macro passed successfully.
|
||||
// Exit with a failure if at least one of the tests failed.
|
||||
#define VERIFY_RUN() \
|
||||
if (failure) { \
|
||||
FATAL("LOONG64 Disassembler tests failed.\n"); \
|
||||
}
|
||||
|
||||
#define COMPARE_PC_REL(asm_, compare_string, offset) \
|
||||
{ \
|
||||
int pc_offset = assm.pc_offset(); \
|
||||
byte* progcounter = &buffer[pc_offset]; \
|
||||
char str_with_address[100]; \
|
||||
printf("%p\n", static_cast<void*>(progcounter)); \
|
||||
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
|
||||
compare_string, static_cast<void*>(progcounter + (offset * 4))); \
|
||||
assm.asm_; \
|
||||
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
|
||||
}
|
||||
|
||||
TEST(TypeOp6) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0");
|
||||
COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 32767");
|
||||
COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, -32768");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp6PC) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 1048575",
|
||||
1048575);
|
||||
COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, -1048576",
|
||||
-1048576);
|
||||
COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 1048575",
|
||||
1048575);
|
||||
COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, -1048576",
|
||||
-1048576);
|
||||
COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bceqz(FCC0, 1048575), "4bfffc0f bceqz fcc0, 1048575",
|
||||
1048575);
|
||||
COMPARE_PC_REL(bceqz(FCC0, -1048576),
|
||||
"48000010 bceqz fcc0, -1048576", -1048576);
|
||||
COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bcnez(FCC0, 1048575), "4bfffd0f bcnez fcc0, 1048575",
|
||||
1048575);
|
||||
COMPARE_PC_REL(bcnez(FCC0, -1048576),
|
||||
"48000110 bcnez fcc0, -1048576", -1048576);
|
||||
COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(b(33554431), "53fffdff b 33554431", 33554431);
|
||||
COMPARE_PC_REL(b(-33554432), "50000200 b -33554432", -33554432);
|
||||
COMPARE_PC_REL(b(0), "50000000 b 0", 0);
|
||||
|
||||
COMPARE_PC_REL(beq(t0, a6, 32767), "59fffd8a beq t0, a6, 32767",
|
||||
32767);
|
||||
COMPARE_PC_REL(beq(t1, a0, -32768), "5a0001a4 beq t1, a0, -32768",
|
||||
-32768);
|
||||
COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bne(a3, a4, 32767), "5dfffce8 bne a3, a4, 32767",
|
||||
32767);
|
||||
COMPARE_PC_REL(bne(a6, a5, -32768), "5e000149 bne a6, a5, -32768",
|
||||
-32768);
|
||||
COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(blt(a4, a6, 32767), "61fffd0a blt a4, a6, 32767",
|
||||
32767);
|
||||
COMPARE_PC_REL(blt(a4, a5, -32768), "62000109 blt a4, a5, -32768",
|
||||
-32768);
|
||||
COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bge(s7, a5, 32767), "65ffffc9 bge s7, a5, 32767",
|
||||
32767);
|
||||
COMPARE_PC_REL(bge(a1, a3, -32768), "660000a7 bge a1, a3, -32768",
|
||||
-32768);
|
||||
COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bltu(a5, s7, 32767), "69fffd3e bltu a5, s7, 32767",
|
||||
32767);
|
||||
COMPARE_PC_REL(bltu(a4, a5, -32768), "6a000109 bltu a4, a5, -32768",
|
||||
-32768);
|
||||
COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0", 0);
|
||||
|
||||
COMPARE_PC_REL(bgeu(a7, a6, 32767), "6dfffd6a bgeu a7, a6, 32767",
|
||||
32767);
|
||||
COMPARE_PC_REL(bgeu(a5, a3, -32768), "6e000127 bgeu a5, a3, -32768",
|
||||
-32768);
|
||||
COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0", 0);
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp7) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 524287");
|
||||
COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, -524288");
|
||||
COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0");
|
||||
|
||||
COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 524287");
|
||||
COMPARE(lu32i_d(t0, 524288), "1700000c lu32i.d t0, -524288");
|
||||
COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0");
|
||||
|
||||
COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 1");
|
||||
COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 524287");
|
||||
COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, -524288");
|
||||
COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0");
|
||||
|
||||
COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 524287");
|
||||
COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, -524288");
|
||||
COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0");
|
||||
|
||||
COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 524287");
|
||||
COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, -524288");
|
||||
COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0");
|
||||
|
||||
COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 524287");
|
||||
COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, -524288");
|
||||
COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp8) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(ll_w(t2, t3, 32764), "207ffdee ll.w t2, t3, 32764");
|
||||
COMPARE(ll_w(t3, t4, -32768), "2080020f ll.w t3, t4, -32768");
|
||||
COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0");
|
||||
|
||||
COMPARE(sc_w(a6, a7, 32764), "217ffd6a sc.w a6, a7, 32764");
|
||||
COMPARE(sc_w(t0, t1, -32768), "218001ac sc.w t0, t1, -32768");
|
||||
COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0");
|
||||
|
||||
COMPARE(ll_d(a0, a1, 32764), "227ffca4 ll.d a0, a1, 32764");
|
||||
COMPARE(ll_d(a2, a3, -32768), "228000e6 ll.d a2, a3, -32768");
|
||||
COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0");
|
||||
|
||||
COMPARE(sc_d(t4, t5, 32764), "237ffe30 sc.d t4, t5, 32764");
|
||||
COMPARE(sc_d(t6, a0, -32768), "23800092 sc.d t6, a0, -32768");
|
||||
COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0");
|
||||
|
||||
COMPARE(ldptr_w(a4, a5, 32764), "247ffd28 ldptr.w a4, a5, 32764");
|
||||
COMPARE(ldptr_w(a6, a7, -32768), "2480016a ldptr.w a6, a7, -32768");
|
||||
COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0");
|
||||
|
||||
COMPARE(stptr_w(a4, a5, 32764), "257ffd28 stptr.w a4, a5, 32764");
|
||||
COMPARE(stptr_w(a6, a7, -32768), "2580016a stptr.w a6, a7, -32768");
|
||||
COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0");
|
||||
|
||||
COMPARE(ldptr_d(t2, t3, 32764), "267ffdee ldptr.d t2, t3, 32764");
|
||||
COMPARE(ldptr_d(t4, t5, -32768), "26800230 ldptr.d t4, t5, -32768");
|
||||
COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0");
|
||||
|
||||
COMPARE(stptr_d(a5, a6, 32764), "277ffd49 stptr.d a5, a6, 32764");
|
||||
COMPARE(stptr_d(a7, t0, -32768), "2780018b stptr.d a7, t0, -32768");
|
||||
COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp10) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(bstrins_w(a4, a5, 31, 16),
|
||||
"007f4128 bstrins.w a4, a5, 31, 16");
|
||||
COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0");
|
||||
|
||||
COMPARE(bstrins_d(a3, zero_reg, 17, 0),
|
||||
"00910007 bstrins.d a3, zero_reg, 17, 0");
|
||||
COMPARE(bstrins_d(t1, zero_reg, 17, 0),
|
||||
"0091000d bstrins.d t1, zero_reg, 17, 0");
|
||||
|
||||
COMPARE(bstrpick_w(t0, t1, 31, 29),
|
||||
"007ff5ac bstrpick.w t0, t1, 31, 29");
|
||||
COMPARE(bstrpick_w(a4, a5, 16, 0),
|
||||
"00708128 bstrpick.w a4, a5, 16, 0");
|
||||
|
||||
COMPARE(bstrpick_d(a5, a5, 31, 0),
|
||||
"00df0129 bstrpick.d a5, a5, 31, 0");
|
||||
COMPARE(bstrpick_d(a4, a4, 25, 2),
|
||||
"00d90908 bstrpick.d a4, a4, 25, 2");
|
||||
|
||||
COMPARE(slti(t2, a5, 2047), "021ffd2e slti t2, a5, 2047");
|
||||
COMPARE(slti(a7, a1, -2048), "022000ab slti a7, a1, -2048");
|
||||
|
||||
COMPARE(sltui(a7, a7, 2047), "025ffd6b sltui a7, a7, 2047");
|
||||
COMPARE(sltui(t1, t1, -2048), "026001ad sltui t1, t1, -2048");
|
||||
|
||||
COMPARE(addi_w(t0, t2, 2047), "029ffdcc addi.w t0, t2, 2047");
|
||||
COMPARE(addi_w(a0, a0, -2048), "02a00084 addi.w a0, a0, -2048");
|
||||
|
||||
COMPARE(addi_d(a0, zero_reg, 2047),
|
||||
"02dffc04 addi.d a0, zero_reg, 2047");
|
||||
COMPARE(addi_d(t7, t7, -2048), "02e00273 addi.d t7, t7, -2048");
|
||||
|
||||
COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 2047");
|
||||
COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, -2048");
|
||||
|
||||
COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff");
|
||||
COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0");
|
||||
|
||||
COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff");
|
||||
COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0");
|
||||
|
||||
COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff");
|
||||
COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0");
|
||||
|
||||
COMPARE(ld_b(a1, a1, 2047), "281ffca5 ld.b a1, a1, 2047");
|
||||
COMPARE(ld_b(a4, a4, -2048), "28200108 ld.b a4, a4, -2048");
|
||||
|
||||
COMPARE(ld_h(a4, a0, 2047), "285ffc88 ld.h a4, a0, 2047");
|
||||
COMPARE(ld_h(a4, a3, -2048), "286000e8 ld.h a4, a3, -2048");
|
||||
|
||||
COMPARE(ld_w(a6, a6, 2047), "289ffd4a ld.w a6, a6, 2047");
|
||||
COMPARE(ld_w(a5, a4, -2048), "28a00109 ld.w a5, a4, -2048");
|
||||
|
||||
COMPARE(ld_d(a0, a3, 2047), "28dffce4 ld.d a0, a3, 2047");
|
||||
COMPARE(ld_d(a6, fp, -2048), "28e002ca ld.d a6, fp, -2048");
|
||||
COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0");
|
||||
|
||||
COMPARE(st_b(a4, a0, 2047), "291ffc88 st.b a4, a0, 2047");
|
||||
COMPARE(st_b(a6, a5, -2048), "2920012a st.b a6, a5, -2048");
|
||||
|
||||
COMPARE(st_h(a4, a0, 2047), "295ffc88 st.h a4, a0, 2047");
|
||||
COMPARE(st_h(t1, t2, -2048), "296001cd st.h t1, t2, -2048");
|
||||
|
||||
COMPARE(st_w(t3, a4, 2047), "299ffd0f st.w t3, a4, 2047");
|
||||
COMPARE(st_w(a3, t2, -2048), "29a001c7 st.w a3, t2, -2048");
|
||||
|
||||
COMPARE(st_d(s3, sp, 2047), "29dffc7a st.d s3, sp, 2047");
|
||||
COMPARE(st_d(fp, s6, -2048), "29e003b6 st.d fp, s6, -2048");
|
||||
|
||||
COMPARE(ld_bu(a6, a0, 2047), "2a1ffc8a ld.bu a6, a0, 2047");
|
||||
COMPARE(ld_bu(a7, a7, -2048), "2a20016b ld.bu a7, a7, -2048");
|
||||
|
||||
COMPARE(ld_hu(a7, a7, 2047), "2a5ffd6b ld.hu a7, a7, 2047");
|
||||
COMPARE(ld_hu(a3, a3, -2048), "2a6000e7 ld.hu a3, a3, -2048");
|
||||
|
||||
COMPARE(ld_wu(a3, a0, 2047), "2a9ffc87 ld.wu a3, a0, 2047");
|
||||
COMPARE(ld_wu(a3, a5, -2048), "2aa00127 ld.wu a3, a5, -2048");
|
||||
|
||||
COMPARE(fld_s(f0, a3, 2047), "2b1ffce0 fld.s f0, a3, 2047");
|
||||
COMPARE(fld_s(f0, a1, -2048), "2b2000a0 fld.s f0, a1, -2048");
|
||||
|
||||
COMPARE(fld_d(f0, a0, 2047), "2b9ffc80 fld.d f0, a0, 2047");
|
||||
COMPARE(fld_d(f0, fp, -2048), "2ba002c0 fld.d f0, fp, -2048");
|
||||
|
||||
COMPARE(fst_d(f0, fp, 2047), "2bdffec0 fst.d f0, fp, 2047");
|
||||
COMPARE(fst_d(f0, a0, -2048), "2be00080 fst.d f0, a0, -2048");
|
||||
|
||||
COMPARE(fst_s(f0, a5, 2047), "2b5ffd20 fst.s f0, a5, 2047");
|
||||
COMPARE(fst_s(f0, a3, -2048), "2b6000e0 fst.s f0, a3, -2048");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp12) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(fmadd_s(f0, f1, f2, f3), "08118820 fmadd.s f0, f1, f2, f3");
|
||||
COMPARE(fmadd_s(f4, f5, f6, f7), "081398a4 fmadd.s f4, f5, f6, f7");
|
||||
|
||||
COMPARE(fmadd_d(f8, f9, f10, f11),
|
||||
"0825a928 fmadd.d f8, f9, f10, f11");
|
||||
COMPARE(fmadd_d(f12, f13, f14, f15),
|
||||
"0827b9ac fmadd.d f12, f13, f14, f15");
|
||||
|
||||
COMPARE(fmsub_s(f0, f1, f2, f3), "08518820 fmsub.s f0, f1, f2, f3");
|
||||
COMPARE(fmsub_s(f4, f5, f6, f7), "085398a4 fmsub.s f4, f5, f6, f7");
|
||||
|
||||
COMPARE(fmsub_d(f8, f9, f10, f11),
|
||||
"0865a928 fmsub.d f8, f9, f10, f11");
|
||||
COMPARE(fmsub_d(f12, f13, f14, f15),
|
||||
"0867b9ac fmsub.d f12, f13, f14, f15");
|
||||
|
||||
COMPARE(fnmadd_s(f0, f1, f2, f3),
|
||||
"08918820 fnmadd.s f0, f1, f2, f3");
|
||||
COMPARE(fnmadd_s(f4, f5, f6, f7),
|
||||
"089398a4 fnmadd.s f4, f5, f6, f7");
|
||||
|
||||
COMPARE(fnmadd_d(f8, f9, f10, f11),
|
||||
"08a5a928 fnmadd.d f8, f9, f10, f11");
|
||||
COMPARE(fnmadd_d(f12, f13, f14, f15),
|
||||
"08a7b9ac fnmadd.d f12, f13, f14, f15");
|
||||
|
||||
COMPARE(fnmsub_s(f0, f1, f2, f3),
|
||||
"08d18820 fnmsub.s f0, f1, f2, f3");
|
||||
COMPARE(fnmsub_s(f4, f5, f6, f7),
|
||||
"08d398a4 fnmsub.s f4, f5, f6, f7");
|
||||
|
||||
COMPARE(fnmsub_d(f8, f9, f10, f11),
|
||||
"08e5a928 fnmsub.d f8, f9, f10, f11");
|
||||
COMPARE(fnmsub_d(f12, f13, f14, f15),
|
||||
"08e7b9ac fnmsub.d f12, f13, f14, f15");
|
||||
|
||||
COMPARE(fcmp_cond_s(CAF, f1, f2, FCC0),
|
||||
"0c100820 fcmp.caf.s fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_s(CUN, f5, f6, FCC0),
|
||||
"0c1418a0 fcmp.cun.s fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_s(CEQ, f9, f10, FCC0),
|
||||
"0c122920 fcmp.ceq.s fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_s(CUEQ, f13, f14, FCC0),
|
||||
"0c1639a0 fcmp.cueq.s fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_s(CLT, f1, f2, FCC0),
|
||||
"0c110820 fcmp.clt.s fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_s(CULT, f5, f6, FCC0),
|
||||
"0c1518a0 fcmp.cult.s fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_s(CLE, f9, f10, FCC0),
|
||||
"0c132920 fcmp.cle.s fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_s(CULE, f13, f14, FCC0),
|
||||
"0c1739a0 fcmp.cule.s fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_s(CNE, f1, f2, FCC0),
|
||||
"0c180820 fcmp.cne.s fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_s(COR, f5, f6, FCC0),
|
||||
"0c1a18a0 fcmp.cor.s fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_s(CUNE, f9, f10, FCC0),
|
||||
"0c1c2920 fcmp.cune.s fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_s(SAF, f13, f14, FCC0),
|
||||
"0c10b9a0 fcmp.saf.s fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_s(SUN, f1, f2, FCC0),
|
||||
"0c148820 fcmp.sun.s fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_s(SEQ, f5, f6, FCC0),
|
||||
"0c1298a0 fcmp.seq.s fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_s(SUEQ, f9, f10, FCC0),
|
||||
"0c16a920 fcmp.sueq.s fcc0, f9, f10");
|
||||
// COMPARE(fcmp_cond_s(SLT, f13, f14, FCC0),
|
||||
// "0c11b9a0 fcmp.slt.s fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_s(SULT, f1, f2, FCC0),
|
||||
"0c158820 fcmp.sult.s fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_s(SLE, f5, f6, FCC0),
|
||||
"0c1398a0 fcmp.sle.s fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_s(SULE, f9, f10, FCC0),
|
||||
"0c17a920 fcmp.sule.s fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_s(SNE, f13, f14, FCC0),
|
||||
"0c18b9a0 fcmp.sne.s fcc0, f13, f14");
|
||||
COMPARE(fcmp_cond_s(SOR, f13, f14, FCC0),
|
||||
"0c1ab9a0 fcmp.sor.s fcc0, f13, f14");
|
||||
COMPARE(fcmp_cond_s(SUNE, f1, f2, FCC0),
|
||||
"0c1c8820 fcmp.sune.s fcc0, f1, f2");
|
||||
|
||||
COMPARE(fcmp_cond_d(CAF, f1, f2, FCC0),
|
||||
"0c200820 fcmp.caf.d fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_d(CUN, f5, f6, FCC0),
|
||||
"0c2418a0 fcmp.cun.d fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_d(CEQ, f9, f10, FCC0),
|
||||
"0c222920 fcmp.ceq.d fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_d(CUEQ, f13, f14, FCC0),
|
||||
"0c2639a0 fcmp.cueq.d fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_d(CLT, f1, f2, FCC0),
|
||||
"0c210820 fcmp.clt.d fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_d(CULT, f5, f6, FCC0),
|
||||
"0c2518a0 fcmp.cult.d fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_d(CLE, f9, f10, FCC0),
|
||||
"0c232920 fcmp.cle.d fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_d(CULE, f13, f14, FCC0),
|
||||
"0c2739a0 fcmp.cule.d fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_d(CNE, f1, f2, FCC0),
|
||||
"0c280820 fcmp.cne.d fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_d(COR, f5, f6, FCC0),
|
||||
"0c2a18a0 fcmp.cor.d fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_d(CUNE, f9, f10, FCC0),
|
||||
"0c2c2920 fcmp.cune.d fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_d(SAF, f13, f14, FCC0),
|
||||
"0c20b9a0 fcmp.saf.d fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_d(SUN, f1, f2, FCC0),
|
||||
"0c248820 fcmp.sun.d fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_d(SEQ, f5, f6, FCC0),
|
||||
"0c2298a0 fcmp.seq.d fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_d(SUEQ, f9, f10, FCC0),
|
||||
"0c26a920 fcmp.sueq.d fcc0, f9, f10");
|
||||
// COMPARE(fcmp_cond_d(SLT, f13, f14, FCC0),
|
||||
// "0c21b9a0 fcmp.slt.d fcc0, f13, f14");
|
||||
|
||||
COMPARE(fcmp_cond_d(SULT, f1, f2, FCC0),
|
||||
"0c258820 fcmp.sult.d fcc0, f1, f2");
|
||||
COMPARE(fcmp_cond_d(SLE, f5, f6, FCC0),
|
||||
"0c2398a0 fcmp.sle.d fcc0, f5, f6");
|
||||
COMPARE(fcmp_cond_d(SULE, f9, f10, FCC0),
|
||||
"0c27a920 fcmp.sule.d fcc0, f9, f10");
|
||||
COMPARE(fcmp_cond_d(SNE, f13, f14, FCC0),
|
||||
"0c28b9a0 fcmp.sne.d fcc0, f13, f14");
|
||||
COMPARE(fcmp_cond_d(SOR, f13, f14, FCC0),
|
||||
"0c2ab9a0 fcmp.sor.d fcc0, f13, f14");
|
||||
COMPARE(fcmp_cond_d(SUNE, f1, f2, FCC0),
|
||||
"0c2c8820 fcmp.sune.d fcc0, f1, f2");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp14) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1");
|
||||
COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3");
|
||||
COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4");
|
||||
|
||||
COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1");
|
||||
COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3");
|
||||
COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4");
|
||||
|
||||
COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1");
|
||||
COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3");
|
||||
COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4");
|
||||
|
||||
COMPARE(bytepick_w(t4, t5, t6, 0),
|
||||
"00084a30 bytepick.w t4, t5, t6, 0");
|
||||
COMPARE(bytepick_w(a0, a1, a2, 3),
|
||||
"000998a4 bytepick.w a0, a1, a2, 3");
|
||||
|
||||
COMPARE(bytepick_d(a6, a7, t0, 0),
|
||||
"000c316a bytepick.d a6, a7, t0, 0");
|
||||
COMPARE(bytepick_d(t4, t5, t6, 7),
|
||||
"000fca30 bytepick.d t4, t5, t6, 7");
|
||||
|
||||
COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31");
|
||||
COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1");
|
||||
|
||||
COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63");
|
||||
COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1");
|
||||
|
||||
COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31");
|
||||
COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1");
|
||||
|
||||
COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63");
|
||||
COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1");
|
||||
|
||||
COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63");
|
||||
COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1");
|
||||
|
||||
COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31");
|
||||
COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1");
|
||||
|
||||
COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp17) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4");
|
||||
COMPARE(sltu(t4, zero_reg, t4), "0012c010 sltu t4, zero_reg, t4");
|
||||
|
||||
COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6");
|
||||
COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3");
|
||||
|
||||
COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1");
|
||||
COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1");
|
||||
|
||||
COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2");
|
||||
COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3");
|
||||
|
||||
COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3");
|
||||
COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2");
|
||||
|
||||
COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6");
|
||||
COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4");
|
||||
|
||||
COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3");
|
||||
COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5");
|
||||
|
||||
COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0");
|
||||
COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3");
|
||||
|
||||
COMPARE(or_(s3, sp, zero_reg), "0015007a or s3, sp, zero_reg");
|
||||
COMPARE(or_(a4, a0, zero_reg), "00150088 or a4, a0, zero_reg");
|
||||
|
||||
COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6");
|
||||
COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7");
|
||||
|
||||
COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7");
|
||||
COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6");
|
||||
|
||||
COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2");
|
||||
COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5");
|
||||
|
||||
COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0");
|
||||
COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3");
|
||||
|
||||
COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6");
|
||||
COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2");
|
||||
|
||||
COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7");
|
||||
COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3");
|
||||
|
||||
COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3");
|
||||
COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4");
|
||||
|
||||
COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4");
|
||||
COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6");
|
||||
|
||||
COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3");
|
||||
COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0");
|
||||
|
||||
COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0");
|
||||
COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0");
|
||||
|
||||
COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5");
|
||||
COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0");
|
||||
|
||||
COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3");
|
||||
COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6");
|
||||
|
||||
COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2");
|
||||
COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5");
|
||||
|
||||
COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7");
|
||||
COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6");
|
||||
|
||||
COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7");
|
||||
COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2");
|
||||
|
||||
COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0");
|
||||
COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3");
|
||||
|
||||
COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1");
|
||||
COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5");
|
||||
|
||||
COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5");
|
||||
COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0");
|
||||
|
||||
COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3");
|
||||
COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6");
|
||||
|
||||
COMPARE(mulw_d_w(a0, a1, a2), "001f18a4 mulw.d.w a0, a1, a2");
|
||||
COMPARE(mulw_d_w(a3, a4, a5), "001f2507 mulw.d.w a3, a4, a5");
|
||||
|
||||
COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0");
|
||||
COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3");
|
||||
|
||||
COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3");
|
||||
COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6");
|
||||
|
||||
COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6");
|
||||
COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3");
|
||||
|
||||
COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3");
|
||||
COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6");
|
||||
|
||||
COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2");
|
||||
COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5");
|
||||
|
||||
COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6");
|
||||
COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5");
|
||||
|
||||
COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0");
|
||||
COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3");
|
||||
|
||||
COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6");
|
||||
COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2");
|
||||
|
||||
COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5");
|
||||
COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0");
|
||||
|
||||
COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5");
|
||||
COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8");
|
||||
|
||||
COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0");
|
||||
COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2");
|
||||
|
||||
COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11");
|
||||
COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14");
|
||||
|
||||
COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30");
|
||||
COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1");
|
||||
|
||||
COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17");
|
||||
COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20");
|
||||
|
||||
COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1");
|
||||
COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0");
|
||||
|
||||
COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2");
|
||||
COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5");
|
||||
|
||||
COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1");
|
||||
COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0");
|
||||
|
||||
COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11");
|
||||
COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8");
|
||||
|
||||
COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0");
|
||||
COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0");
|
||||
|
||||
COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14");
|
||||
COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17");
|
||||
|
||||
COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20");
|
||||
COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2");
|
||||
|
||||
COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2");
|
||||
COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5");
|
||||
COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0");
|
||||
|
||||
COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3");
|
||||
COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6");
|
||||
COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2");
|
||||
|
||||
COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5");
|
||||
COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0");
|
||||
COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3");
|
||||
|
||||
COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6");
|
||||
COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6");
|
||||
|
||||
COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6");
|
||||
COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6");
|
||||
|
||||
COMPARE(dbar(0), "38720000 dbar 0x0(0)");
|
||||
COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)");
|
||||
|
||||
COMPARE(break_(0), "002a0000 break code: 0x0(0)");
|
||||
COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)");
|
||||
|
||||
COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5");
|
||||
COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0");
|
||||
|
||||
COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3");
|
||||
COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6");
|
||||
|
||||
COMPARE(amswap_w(a4, a5, a6), "38602548 amswap.w a4, a5, a6");
|
||||
COMPARE(amswap_d(a7, t0, t1), "3860b1ab amswap.d a7, t0, t1");
|
||||
|
||||
COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4");
|
||||
COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0");
|
||||
|
||||
COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3");
|
||||
COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6");
|
||||
|
||||
COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1");
|
||||
COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4");
|
||||
|
||||
COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0");
|
||||
COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3");
|
||||
|
||||
COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6");
|
||||
COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1");
|
||||
|
||||
COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4");
|
||||
COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0");
|
||||
|
||||
COMPARE(ammax_wu(a1, a2, a3), "386718e5 ammax.wu a1, a2, a3");
|
||||
COMPARE(ammax_du(a4, a5, a6), "3867a548 ammax.du a4, a5, a6");
|
||||
|
||||
COMPARE(ammin_wu(a7, t0, t1), "386831ab ammin.wu a7, t0, t1");
|
||||
COMPARE(ammin_du(t2, t3, t4), "3868be0e ammin.du t2, t3, t4");
|
||||
|
||||
COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2");
|
||||
COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5");
|
||||
|
||||
COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0");
|
||||
COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3");
|
||||
|
||||
COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6");
|
||||
COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2");
|
||||
|
||||
COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5");
|
||||
COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0");
|
||||
|
||||
COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2");
|
||||
COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5");
|
||||
|
||||
COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8");
|
||||
COMPARE(fcopysign_d(f9, f10, f12),
|
||||
"01133149 fcopysign.d f9, f10, f12");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
TEST(TypeOp22) {
|
||||
SET_UP();
|
||||
|
||||
COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0");
|
||||
COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1");
|
||||
COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3");
|
||||
COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5");
|
||||
|
||||
COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1");
|
||||
COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3");
|
||||
COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5");
|
||||
COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7");
|
||||
|
||||
COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7");
|
||||
COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1");
|
||||
COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3");
|
||||
COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5");
|
||||
|
||||
COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1");
|
||||
COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3");
|
||||
|
||||
COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5");
|
||||
COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7");
|
||||
COMPARE(bitrev_w(t0, t1), "000051ac bitrev.w t0, t1");
|
||||
COMPARE(bitrev_d(t2, t3), "000055ee bitrev.d t2, t3");
|
||||
|
||||
COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5");
|
||||
COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1");
|
||||
|
||||
COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3");
|
||||
COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0");
|
||||
|
||||
COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1");
|
||||
COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0");
|
||||
|
||||
COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5");
|
||||
COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0");
|
||||
|
||||
COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7");
|
||||
COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1");
|
||||
COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0");
|
||||
|
||||
COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6");
|
||||
COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6");
|
||||
|
||||
COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3");
|
||||
COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0");
|
||||
|
||||
COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6");
|
||||
COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3");
|
||||
|
||||
COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30");
|
||||
|
||||
COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30");
|
||||
COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30");
|
||||
|
||||
COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0");
|
||||
COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0");
|
||||
|
||||
COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2");
|
||||
COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr");
|
||||
|
||||
COMPARE(movfr2cf(FCC0, f0), "0114d000 movfr2cf fcc0, f0");
|
||||
COMPARE(movcf2fr(f1, FCC1), "0114d421 movcf2fr f1, fcc1");
|
||||
|
||||
COMPARE(movgr2cf(FCC2, a0), "0114d882 movgr2cf fcc2, a0");
|
||||
COMPARE(movcf2gr(a1, FCC3), "0114dc65 movcf2gr a1, fcc3");
|
||||
|
||||
COMPARE(fcvt_s_d(f0, f0), "01191800 fcvt.s.d f0, f0");
|
||||
COMPARE(fcvt_d_s(f0, f0), "01192400 fcvt.d.s f0, f0");
|
||||
|
||||
COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9");
|
||||
COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11");
|
||||
COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13");
|
||||
COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15");
|
||||
|
||||
COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17");
|
||||
COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19");
|
||||
COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21");
|
||||
COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1");
|
||||
|
||||
COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4");
|
||||
COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4");
|
||||
COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0");
|
||||
COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30");
|
||||
|
||||
COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3");
|
||||
COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5");
|
||||
COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7");
|
||||
COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9");
|
||||
|
||||
COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11");
|
||||
COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13");
|
||||
COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15");
|
||||
COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17");
|
||||
|
||||
COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19");
|
||||
COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21");
|
||||
COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1");
|
||||
COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3");
|
||||
|
||||
COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5");
|
||||
COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7");
|
||||
|
||||
COMPARE(frecip_s(f8, f9), "01145528 frecip.s f8, f9");
|
||||
COMPARE(frecip_d(f10, f11), "0114596a frecip.d f10, f11");
|
||||
|
||||
COMPARE(frsqrt_s(f12, f13), "011465ac frsqrt.s f12, f13");
|
||||
COMPARE(frsqrt_d(f14, f15), "011469ee frsqrt.d f14, f15");
|
||||
|
||||
COMPARE(fclass_s(f16, f17), "01143630 fclass.s f16, f17");
|
||||
COMPARE(fclass_d(f18, f19), "01143a72 fclass.d f18, f19");
|
||||
|
||||
COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21");
|
||||
COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1");
|
||||
|
||||
VERIFY_RUN();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -56,6 +56,10 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
|
||||
for (int i = 0; i < kNumInstr; ++i) {
|
||||
__ Addu(v0, v0, Operand(1));
|
||||
}
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
for (int i = 0; i < kNumInstr; ++i) {
|
||||
__ Add_w(a0, a0, Operand(1));
|
||||
}
|
||||
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
|
||||
for (int i = 0; i < kNumInstr; ++i) {
|
||||
__ addi(r3, r3, Operand(1));
|
||||
|
2917
test/cctest/test-macro-assembler-loong64.cc
Normal file
2917
test/cctest/test-macro-assembler-loong64.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -614,6 +614,8 @@ using ArchRegExpMacroAssembler = RegExpMacroAssemblerPPC;
|
||||
using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
using ArchRegExpMacroAssembler = RegExpMacroAssemblerLOONG64;
|
||||
#elif V8_TARGET_ARCH_X87
|
||||
using ArchRegExpMacroAssembler = RegExpMacroAssemblerX87;
|
||||
#elif V8_TARGET_ARCH_RISCV64
|
||||
|
@ -140,6 +140,11 @@ void CompileJumpTableThunk(Address thunk, Address jump_target) {
|
||||
__ Lw(scratch, MemOperand(scratch, 0));
|
||||
__ Branch(&exit, ne, scratch, Operand(zero_reg));
|
||||
__ Jump(jump_target, RelocInfo::NONE);
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
__ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
|
||||
__ Ld_w(scratch, MemOperand(scratch, 0));
|
||||
__ Branch(&exit, ne, scratch, Operand(zero_reg));
|
||||
__ Jump(jump_target, RelocInfo::NONE);
|
||||
#elif V8_TARGET_ARCH_MIPS
|
||||
__ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
|
||||
__ lw(scratch, MemOperand(scratch, 0));
|
||||
|
@ -115,12 +115,12 @@
|
||||
}], # no_simd_hardware
|
||||
|
||||
##############################################################################
|
||||
['arch == riscv64', {
|
||||
['arch == riscv64 or arch == loong64', {
|
||||
# SIMD support is still in progress.
|
||||
'debugger/wasm-scope-info*': [SKIP],
|
||||
'debugger/wasm-step-after-trap': [SKIP],
|
||||
|
||||
}], # 'arch == riscv64'
|
||||
}], # 'arch == riscv64 or arch == loong64'
|
||||
|
||||
['arch == riscv64 and variant == stress_incremental_marking', {
|
||||
'debugger/wasm-gc-breakpoints': [SKIP]
|
||||
|
@ -70,15 +70,10 @@
|
||||
}],
|
||||
|
||||
################################################################################
|
||||
['arch == mips64el or arch == mipsel', {
|
||||
['arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64', {
|
||||
# Tests that require Simd enabled.
|
||||
'wasm-trace-memory': [SKIP],
|
||||
}], # arch == mips64el or arch == mipsel
|
||||
|
||||
['arch == riscv64', {
|
||||
# Tests that require Simd enabled.
|
||||
'wasm-trace-memory': [SKIP],
|
||||
}],
|
||||
}], # arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64
|
||||
|
||||
##############################################################################
|
||||
['no_simd_hardware == True', {
|
||||
|
@ -160,12 +160,12 @@
|
||||
'wasm/compare-exchange-stress': [PASS, SLOW, NO_VARIANTS],
|
||||
'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
|
||||
|
||||
# Very slow on ARM and MIPS, contains no architecture dependent code.
|
||||
'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
|
||||
'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
|
||||
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
|
||||
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64)', SKIP]],
|
||||
'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64)', SKIP]],
|
||||
# Very slow on ARM, MIPS, RISCV and LOONG, contains no architecture dependent code.
|
||||
'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, loong64)', SKIP]],
|
||||
'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, loong64)', SKIP]],
|
||||
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, loong64)', SKIP]],
|
||||
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64, loong64)', SKIP]],
|
||||
'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64, loong64)', SKIP]],
|
||||
|
||||
# TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
|
||||
'unicodelctest': [PASS, NO_VARIANTS],
|
||||
@ -810,6 +810,15 @@
|
||||
'regress/regress-779407': [SKIP],
|
||||
}], # 'arch == mips64el or arch == mips64'
|
||||
|
||||
##############################################################################
|
||||
['arch == loong64', {
|
||||
|
||||
# This test fail because when convert sNaN to qNaN, loong64 use a different
|
||||
# qNaN encoding with x86 architectures
|
||||
'wasm/float-constant-folding': [SKIP],
|
||||
|
||||
}], # 'arch == loong64'
|
||||
|
||||
##############################################################################
|
||||
['arch == riscv64', {
|
||||
|
||||
@ -1427,7 +1436,7 @@
|
||||
|
||||
##############################################################################
|
||||
# TODO(v8:11421): Port baseline compiler to other architectures.
|
||||
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel)', {
|
||||
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, loong64)', {
|
||||
'baseline/*': [SKIP],
|
||||
}],
|
||||
|
||||
|
@ -482,6 +482,11 @@ v8_source_set("unittests_sources") {
|
||||
"assembler/turbo-assembler-s390-unittest.cc",
|
||||
"compiler/s390/instruction-selector-s390-unittest.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "loong64") {
|
||||
sources += [
|
||||
"assembler/turbo-assembler-loong64-unittest.cc",
|
||||
"compiler/loong64/instruction-selector-loong64-unittest.cc",
|
||||
]
|
||||
}
|
||||
|
||||
if (is_posix && v8_enable_webassembly) {
|
||||
|
64
test/unittests/assembler/turbo-assembler-loong64-unittest.cc
Normal file
64
test/unittests/assembler/turbo-assembler-loong64-unittest.cc
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/loong64/assembler-loong64-inl.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/execution/simulator.h"
|
||||
#include "test/common/assembler-tester.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
#include "testing/gtest-support.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define __ tasm.
|
||||
|
||||
// Test the loong64 assembler by compiling some simple functions into
|
||||
// a buffer and executing them. These tests do not initialize the
|
||||
// V8 library, create a context, or use any V8 objects.
|
||||
|
||||
class TurboAssemblerTest : public TestWithIsolate {};
|
||||
|
||||
TEST_F(TurboAssemblerTest, TestHardAbort) {
|
||||
auto buffer = AllocateAssemblerBuffer();
|
||||
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
|
||||
buffer->CreateView());
|
||||
__ set_root_array_available(false);
|
||||
__ set_abort_hard(true);
|
||||
__ Abort(AbortReason::kNoReason);
|
||||
|
||||
CodeDesc desc;
|
||||
tasm.GetCode(isolate(), &desc);
|
||||
buffer->MakeExecutable();
|
||||
// We need an isolate here to execute in the simulator.
|
||||
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
|
||||
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
|
||||
}
|
||||
|
||||
TEST_F(TurboAssemblerTest, TestCheck) {
|
||||
auto buffer = AllocateAssemblerBuffer();
|
||||
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
|
||||
buffer->CreateView());
|
||||
__ set_root_array_available(false);
|
||||
__ set_abort_hard(true);
|
||||
|
||||
// Fail if the first parameter (in {a0}) is 17.
|
||||
__ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17));
|
||||
__ Ret();
|
||||
|
||||
CodeDesc desc;
|
||||
tasm.GetCode(isolate(), &desc);
|
||||
buffer->MakeExecutable();
|
||||
// We need an isolate here to execute in the simulator.
|
||||
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
|
||||
|
||||
f.Call(0);
|
||||
f.Call(18);
|
||||
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
File diff suppressed because it is too large
Load Diff
@ -11,6 +11,8 @@
|
||||
#include "src/execution/mips/frame-constants-mips.h"
|
||||
#elif V8_TARGET_ARCH_MIPS64
|
||||
#include "src/execution/mips64/frame-constants-mips64.h"
|
||||
#elif V8_TARGET_ARCH_LOONG64
|
||||
#include "src/execution/loong64/frame-constants-loong64.h"
|
||||
#elif V8_TARGET_ARCH_ARM
|
||||
#include "src/execution/arm/frame-constants-arm.h"
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
|
@ -89,7 +89,7 @@
|
||||
'conversions': [SKIP],
|
||||
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simulator_run'
|
||||
|
||||
['(arch == mipsel or arch == mips64el) and simulator_run', {
|
||||
['(arch == mipsel or arch == mips64el or arch == loong64) and simulator_run', {
|
||||
# These tests need larger stack size on simulator.
|
||||
'skip-stack-guard-page': '--sim-stack-size=8192',
|
||||
'proposals/tail-call/skip-stack-guard-page': '--sim-stack-size=8192',
|
||||
|
@ -42,7 +42,7 @@ BUILD_TARGETS_ALL = ["all"]
|
||||
|
||||
# All arches that this script understands.
|
||||
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
|
||||
"riscv64", "s390", "s390x", "android_arm", "android_arm64"]
|
||||
"riscv64", "s390", "s390x", "android_arm", "android_arm64", "loong64"]
|
||||
# Arches that get built/run when you don't specify any.
|
||||
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
|
||||
# Modes that this script understands.
|
||||
@ -310,7 +310,7 @@ class Config(object):
|
||||
elif self.arch == "android_arm64":
|
||||
v8_cpu = "arm64"
|
||||
elif self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
|
||||
"riscv64", "s390", "s390x"):
|
||||
"riscv64", "s390", "s390x", "loong64"):
|
||||
v8_cpu = self.arch
|
||||
else:
|
||||
return []
|
||||
|
@ -44,7 +44,7 @@ AUTO_EXCLUDE_PATTERNS = [
|
||||
# platform-specific headers
|
||||
'\\b{}\\b'.format(p) for p in
|
||||
('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
|
||||
'ppc','riscv64')]
|
||||
'ppc', 'riscv64', 'loong64')]
|
||||
|
||||
args = None
|
||||
def parse_args():
|
||||
|
@ -113,7 +113,8 @@ SLOW_ARCHS = [
|
||||
"mips64el",
|
||||
"s390",
|
||||
"s390x",
|
||||
"riscv64"
|
||||
"riscv64",
|
||||
"loong64"
|
||||
]
|
||||
|
||||
|
||||
@ -663,6 +664,9 @@ class BaseTestRunner(object):
|
||||
self.build_config.arch == 'mipsel':
|
||||
no_simd_hardware = not simd_mips
|
||||
|
||||
if self.build_config.arch == 'loong64':
|
||||
no_simd_hardware = True
|
||||
|
||||
# S390 hosts without VEF1 do not support Simd.
|
||||
if self.build_config.arch == 's390x' and \
|
||||
not self.build_config.simulator_run and \
|
||||
|
@ -64,7 +64,7 @@ VARIABLES = {ALWAYS: True}
|
||||
for var in ["debug", "release", "big", "little", "android",
|
||||
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
|
||||
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
|
||||
"linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64"]:
|
||||
"linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64", "loong64"]:
|
||||
VARIABLES[var] = var
|
||||
|
||||
# Allow using variants as keywords.
|
||||
|
Loading…
Reference in New Issue
Block a user