Reland "Add Windows ARM64 ABI support to V8"
This is a reland of fcbb023b0e
Original change's description:
> Add Windows ARM64 ABI support to V8
>
> This change added Windows ARM64 ABI support, major things are:
> 1. Excluding x18 register from any usage because it is reserved as
> platform register. Preserve alignment after the change.
> 2. Fix the assumption of LP64 in arm64 backend. Windows ARM64 is
> still LLP64.
> 3. Stack guard page probe for large allocation on stack.
>
> Reference:
> Windows ARM64 ABI:
> https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=vs-2017
>
> Bug: chromium:893460
> Change-Id: I325884ac8dab719154a0047141e18a9fcb8dff7e
> Reviewed-on: https://chromium-review.googlesource.com/c/1285129
> Commit-Queue: Michael Achenbach <machenbach@chromium.org>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#56881}
CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_arm64_dbg_recipe
TBR=mlippautz@chromium.org
Bug: chromium:893460
Change-Id: Icc45fd091c33f7df805842a70236b79b14756f52
Reviewed-on: https://chromium-review.googlesource.com/c/1297300
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56965}
This commit is contained in:
parent
76968a2ff3
commit
a6423cca4a
1
AUTHORS
1
AUTHORS
@ -156,6 +156,7 @@ Teddy Katz <teddy.katz@gmail.com>
|
||||
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
|
||||
Tobias Burnus <burnus@net-b.de>
|
||||
Tobias Nießen <tniessen@tnie.de>
|
||||
Tom Tan <Tom.Tan@microsoft.com>
|
||||
Ujjwal Sharma <usharma1998@gmail.com>
|
||||
Victor Costan <costan@gmail.com>
|
||||
Vlad Burlik <vladbph@gmail.com>
|
||||
|
@ -83,7 +83,13 @@ if (v8_snapshot_toolchain == "") {
|
||||
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
|
||||
_cpus = v8_current_cpu
|
||||
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
|
||||
_cpus = "x64_v8_${v8_current_cpu}"
|
||||
if (is_win && v8_current_cpu == "arm64") {
|
||||
# set _cpus to blank for Windows ARM64 so host_toolchain could be
|
||||
# selected as snapshot toolchain later.
|
||||
_cpus = ""
|
||||
} else {
|
||||
_cpus = "x64_v8_${v8_current_cpu}"
|
||||
}
|
||||
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
|
||||
_cpus = "x86_v8_${v8_current_cpu}"
|
||||
} else {
|
||||
@ -94,6 +100,9 @@ if (v8_snapshot_toolchain == "") {
|
||||
|
||||
if (_cpus != "") {
|
||||
v8_snapshot_toolchain = "//build/toolchain/${host_os}:${_clang}${_cpus}"
|
||||
} else if (is_win && v8_current_cpu == "arm64") {
|
||||
# cross compile Windows arm64 with Windows x64 toolchain.
|
||||
v8_snapshot_toolchain = host_toolchain
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,14 +89,14 @@ inline void CPURegList::Remove(const CPURegister& other1,
|
||||
inline void CPURegList::Combine(int code) {
|
||||
DCHECK(IsValid());
|
||||
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
|
||||
list_ |= (1UL << code);
|
||||
list_ |= (1ULL << code);
|
||||
}
|
||||
|
||||
|
||||
inline void CPURegList::Remove(int code) {
|
||||
DCHECK(IsValid());
|
||||
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
|
||||
list_ &= ~(1UL << code);
|
||||
list_ &= ~(1ULL << code);
|
||||
}
|
||||
|
||||
|
||||
@ -679,7 +679,7 @@ Address RelocInfo::target_address_address() {
|
||||
return constant_pool_entry_address();
|
||||
} else {
|
||||
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
|
||||
return reinterpret_cast<Address>(pc_);
|
||||
return pc_;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ CPURegister CPURegList::PopLowestIndex() {
|
||||
return NoCPUReg;
|
||||
}
|
||||
int index = CountTrailingZeros(list_, kRegListSizeInBits);
|
||||
DCHECK((1 << index) & list_);
|
||||
DCHECK((1LL << index) & list_);
|
||||
Remove(index);
|
||||
return CPURegister::Create(index, size_, type_);
|
||||
}
|
||||
@ -81,7 +81,7 @@ CPURegister CPURegList::PopHighestIndex() {
|
||||
}
|
||||
int index = CountLeadingZeros(list_, kRegListSizeInBits);
|
||||
index = kRegListSizeInBits - 1 - index;
|
||||
DCHECK((1 << index) & list_);
|
||||
DCHECK((1LL << index) & list_);
|
||||
Remove(index);
|
||||
return CPURegister::Create(index, size_, type_);
|
||||
}
|
||||
@ -110,8 +110,14 @@ CPURegList CPURegList::GetCalleeSavedV(int size) {
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCallerSaved(int size) {
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows arm64.
|
||||
// Registers x0-x17 and lr (x30) are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17);
|
||||
#else
|
||||
// Registers x0-x18 and lr (x30) are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
|
||||
#endif
|
||||
list.Combine(lr);
|
||||
return list;
|
||||
}
|
||||
@ -144,9 +150,13 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
|
||||
list.Remove(16);
|
||||
list.Remove(17);
|
||||
|
||||
// Don't add x18 to safepoint list on Windows arm64 because it is reserved
|
||||
// as platform register.
|
||||
#if !defined(V8_OS_WIN)
|
||||
// Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
|
||||
// is a caller-saved register according to the procedure call standard.
|
||||
list.Combine(18);
|
||||
#endif
|
||||
|
||||
// Add the link register (x30) to the safepoint list.
|
||||
list.Combine(30);
|
||||
@ -506,11 +516,11 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
|
||||
}
|
||||
// Step two: check that the offsets are contiguous and that the range
|
||||
// is OK for ldp/stp.
|
||||
if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
|
||||
if ((operandB.offset() == operandA.offset() + (1LL << access_size_log2)) &&
|
||||
is_int7(operandA.offset() >> access_size_log2)) {
|
||||
return kPairAB;
|
||||
}
|
||||
if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
|
||||
if ((operandA.offset() == operandB.offset() + (1LL << access_size_log2)) &&
|
||||
is_int7(operandB.offset() >> access_size_log2)) {
|
||||
return kPairBA;
|
||||
}
|
||||
@ -4002,16 +4012,16 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
|
||||
// Calculate a new immediate and shift combination to encode the immediate
|
||||
// argument.
|
||||
shift = 0;
|
||||
if ((imm & ~0xFFFFUL) == 0) {
|
||||
if ((imm & ~0xFFFFULL) == 0) {
|
||||
// Nothing to do.
|
||||
} else if ((imm & ~(0xFFFFUL << 16)) == 0) {
|
||||
} else if ((imm & ~(0xFFFFULL << 16)) == 0) {
|
||||
imm >>= 16;
|
||||
shift = 1;
|
||||
} else if ((imm & ~(0xFFFFUL << 32)) == 0) {
|
||||
} else if ((imm & ~(0xFFFFULL << 32)) == 0) {
|
||||
DCHECK(rd.Is64Bits());
|
||||
imm >>= 32;
|
||||
shift = 2;
|
||||
} else if ((imm & ~(0xFFFFUL << 48)) == 0) {
|
||||
} else if ((imm & ~(0xFFFFULL << 48)) == 0) {
|
||||
DCHECK(rd.Is64Bits());
|
||||
imm >>= 48;
|
||||
shift = 3;
|
||||
|
@ -18,6 +18,12 @@
|
||||
#include "src/globals.h"
|
||||
#include "src/utils.h"
|
||||
|
||||
// Windows arm64 SDK defines mvn to NEON intrinsic neon_not which will not
|
||||
// be used here.
|
||||
#if defined(V8_OS_WIN) && defined(mvn)
|
||||
#undef mvn
|
||||
#endif
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
@ -36,11 +42,20 @@ namespace internal {
|
||||
R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
|
||||
R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows ARM64.
|
||||
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
|
||||
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
|
||||
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
|
||||
R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
|
||||
R(x27) R(x28)
|
||||
#else
|
||||
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
|
||||
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
|
||||
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
|
||||
R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
|
||||
R(x27) R(x28)
|
||||
#endif
|
||||
|
||||
#define FLOAT_REGISTERS(V) \
|
||||
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
|
||||
@ -207,7 +222,12 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
|
||||
int reg_size_;
|
||||
RegisterType reg_type_;
|
||||
|
||||
#if defined(V8_OS_WIN) && !defined(__clang__)
|
||||
// MSVC has problem to parse template base class as friend class.
|
||||
friend RegisterBase;
|
||||
#else
|
||||
friend class RegisterBase;
|
||||
#endif
|
||||
|
||||
constexpr CPURegister(int code, int size, RegisterType type)
|
||||
: RegisterBase(code), reg_size_(size), reg_type_(type) {}
|
||||
@ -533,8 +553,8 @@ class CPURegList {
|
||||
((type == CPURegister::kVRegister) &&
|
||||
(last_reg < kNumberOfVRegisters)));
|
||||
DCHECK(last_reg >= first_reg);
|
||||
list_ = (1UL << (last_reg + 1)) - 1;
|
||||
list_ &= ~((1UL << first_reg) - 1);
|
||||
list_ = (1ULL << (last_reg + 1)) - 1;
|
||||
list_ &= ~((1ULL << first_reg) - 1);
|
||||
DCHECK(IsValid());
|
||||
}
|
||||
|
||||
@ -693,7 +713,7 @@ class Immediate {
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operands.
|
||||
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||
constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
|
||||
constexpr uint64_t kSmiShiftMask = (1ULL << kSmiShift) - 1;
|
||||
|
||||
// Represents an operand in a machine instruction.
|
||||
class Operand {
|
||||
|
@ -8,12 +8,16 @@
|
||||
#include "src/base/macros.h"
|
||||
#include "src/globals.h"
|
||||
|
||||
// Assert that this is an LP64 system.
|
||||
// Assert that this is an LP64 system, or LLP64 on Windows.
|
||||
STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
|
||||
#if defined(V8_OS_WIN)
|
||||
STATIC_ASSERT(sizeof(1L) == sizeof(int32_t));
|
||||
#else
|
||||
STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
|
||||
STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
|
||||
#endif
|
||||
STATIC_ASSERT(sizeof(void *) == sizeof(int64_t));
|
||||
STATIC_ASSERT(sizeof(1) == sizeof(int32_t));
|
||||
STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
|
||||
|
||||
|
||||
// Get the standard printf format macros for C99 stdint types.
|
||||
@ -77,17 +81,17 @@ const int64_t kDRegMask = 0xffffffffffffffffL;
|
||||
// TODO(all) check if the expression below works on all compilers or if it
|
||||
// triggers an overflow error.
|
||||
const int64_t kDSignBit = 63;
|
||||
const int64_t kDSignMask = 0x1L << kDSignBit;
|
||||
const int64_t kDSignMask = 0x1LL << kDSignBit;
|
||||
const int64_t kSSignBit = 31;
|
||||
const int64_t kSSignMask = 0x1L << kSSignBit;
|
||||
const int64_t kSSignMask = 0x1LL << kSSignBit;
|
||||
const int64_t kXSignBit = 63;
|
||||
const int64_t kXSignMask = 0x1L << kXSignBit;
|
||||
const int64_t kXSignMask = 0x1LL << kXSignBit;
|
||||
const int64_t kWSignBit = 31;
|
||||
const int64_t kWSignMask = 0x1L << kWSignBit;
|
||||
const int64_t kWSignMask = 0x1LL << kWSignBit;
|
||||
const int64_t kDQuietNanBit = 51;
|
||||
const int64_t kDQuietNanMask = 0x1L << kDQuietNanBit;
|
||||
const int64_t kDQuietNanMask = 0x1LL << kDQuietNanBit;
|
||||
const int64_t kSQuietNanBit = 22;
|
||||
const int64_t kSQuietNanMask = 0x1L << kSQuietNanBit;
|
||||
const int64_t kSQuietNanMask = 0x1LL << kSQuietNanBit;
|
||||
const int64_t kByteMask = 0xffL;
|
||||
const int64_t kHalfWordMask = 0xffffL;
|
||||
const int64_t kWordMask = 0xffffffffL;
|
||||
@ -273,7 +277,7 @@ V_(Flags, 31, 28, Bits, uint32_t) \
|
||||
V_(N, 31, 31, Bits, bool) \
|
||||
V_(Z, 30, 30, Bits, bool) \
|
||||
V_(C, 29, 29, Bits, bool) \
|
||||
V_(V, 28, 28, Bits, uint32_t) \
|
||||
V_(V, 28, 28, Bits, bool) \
|
||||
M_(NZCV, Flags_mask) \
|
||||
\
|
||||
/* FPCR */ \
|
||||
@ -445,14 +449,14 @@ enum SystemRegister {
|
||||
const uint32_t kUnallocatedInstruction = 0xffffffff;
|
||||
|
||||
// Generic fields.
|
||||
enum GenericInstrField {
|
||||
enum GenericInstrField : uint32_t {
|
||||
SixtyFourBits = 0x80000000,
|
||||
ThirtyTwoBits = 0x00000000,
|
||||
FP32 = 0x00000000,
|
||||
FP64 = 0x00400000
|
||||
};
|
||||
|
||||
enum NEONFormatField {
|
||||
enum NEONFormatField : uint32_t {
|
||||
NEONFormatFieldMask = 0x40C00000,
|
||||
NEON_Q = 0x40000000,
|
||||
NEON_8B = 0x00000000,
|
||||
@ -465,14 +469,14 @@ enum NEONFormatField {
|
||||
NEON_2D = 0x00C00000 | NEON_Q
|
||||
};
|
||||
|
||||
enum NEONFPFormatField {
|
||||
enum NEONFPFormatField : uint32_t {
|
||||
NEONFPFormatFieldMask = 0x40400000,
|
||||
NEON_FP_2S = FP32,
|
||||
NEON_FP_4S = FP32 | NEON_Q,
|
||||
NEON_FP_2D = FP64 | NEON_Q
|
||||
};
|
||||
|
||||
enum NEONLSFormatField {
|
||||
enum NEONLSFormatField : uint32_t {
|
||||
NEONLSFormatFieldMask = 0x40000C00,
|
||||
LS_NEON_8B = 0x00000000,
|
||||
LS_NEON_16B = LS_NEON_8B | NEON_Q,
|
||||
@ -484,7 +488,7 @@ enum NEONLSFormatField {
|
||||
LS_NEON_2D = LS_NEON_1D | NEON_Q
|
||||
};
|
||||
|
||||
enum NEONScalarFormatField {
|
||||
enum NEONScalarFormatField : uint32_t {
|
||||
NEONScalarFormatFieldMask = 0x00C00000,
|
||||
NEONScalar = 0x10000000,
|
||||
NEON_B = 0x00000000,
|
||||
@ -494,7 +498,7 @@ enum NEONScalarFormatField {
|
||||
};
|
||||
|
||||
// PC relative addressing.
|
||||
enum PCRelAddressingOp {
|
||||
enum PCRelAddressingOp : uint32_t {
|
||||
PCRelAddressingFixed = 0x10000000,
|
||||
PCRelAddressingFMask = 0x1F000000,
|
||||
PCRelAddressingMask = 0x9F000000,
|
||||
@ -504,7 +508,7 @@ enum PCRelAddressingOp {
|
||||
|
||||
// Add/sub (immediate, shifted and extended.)
|
||||
const int kSFOffset = 31;
|
||||
enum AddSubOp {
|
||||
enum AddSubOp : uint32_t {
|
||||
AddSubOpMask = 0x60000000,
|
||||
AddSubSetFlagsBit = 0x20000000,
|
||||
ADD = 0x00000000,
|
||||
@ -519,7 +523,7 @@ enum AddSubOp {
|
||||
V(SUB), \
|
||||
V(SUBS)
|
||||
|
||||
enum AddSubImmediateOp {
|
||||
enum AddSubImmediateOp : uint32_t {
|
||||
AddSubImmediateFixed = 0x11000000,
|
||||
AddSubImmediateFMask = 0x1F000000,
|
||||
AddSubImmediateMask = 0xFF000000,
|
||||
@ -530,7 +534,7 @@ enum AddSubImmediateOp {
|
||||
#undef ADD_SUB_IMMEDIATE
|
||||
};
|
||||
|
||||
enum AddSubShiftedOp {
|
||||
enum AddSubShiftedOp : uint32_t {
|
||||
AddSubShiftedFixed = 0x0B000000,
|
||||
AddSubShiftedFMask = 0x1F200000,
|
||||
AddSubShiftedMask = 0xFF200000,
|
||||
@ -541,7 +545,7 @@ enum AddSubShiftedOp {
|
||||
#undef ADD_SUB_SHIFTED
|
||||
};
|
||||
|
||||
enum AddSubExtendedOp {
|
||||
enum AddSubExtendedOp : uint32_t {
|
||||
AddSubExtendedFixed = 0x0B200000,
|
||||
AddSubExtendedFMask = 0x1F200000,
|
||||
AddSubExtendedMask = 0xFFE00000,
|
||||
@ -553,7 +557,7 @@ enum AddSubExtendedOp {
|
||||
};
|
||||
|
||||
// Add/sub with carry.
|
||||
enum AddSubWithCarryOp {
|
||||
enum AddSubWithCarryOp : uint32_t {
|
||||
AddSubWithCarryFixed = 0x1A000000,
|
||||
AddSubWithCarryFMask = 0x1FE00000,
|
||||
AddSubWithCarryMask = 0xFFE0FC00,
|
||||
@ -571,7 +575,7 @@ enum AddSubWithCarryOp {
|
||||
|
||||
|
||||
// Logical (immediate and shifted register).
|
||||
enum LogicalOp {
|
||||
enum LogicalOp : uint32_t {
|
||||
LogicalOpMask = 0x60200000,
|
||||
NOT = 0x00200000,
|
||||
AND = 0x00000000,
|
||||
@ -585,7 +589,7 @@ enum LogicalOp {
|
||||
};
|
||||
|
||||
// Logical immediate.
|
||||
enum LogicalImmediateOp {
|
||||
enum LogicalImmediateOp : uint32_t {
|
||||
LogicalImmediateFixed = 0x12000000,
|
||||
LogicalImmediateFMask = 0x1F800000,
|
||||
LogicalImmediateMask = 0xFF800000,
|
||||
@ -600,7 +604,7 @@ enum LogicalImmediateOp {
|
||||
};
|
||||
|
||||
// Logical shifted register.
|
||||
enum LogicalShiftedOp {
|
||||
enum LogicalShiftedOp : uint32_t {
|
||||
LogicalShiftedFixed = 0x0A000000,
|
||||
LogicalShiftedFMask = 0x1F000000,
|
||||
LogicalShiftedMask = 0xFF200000,
|
||||
@ -631,7 +635,7 @@ enum LogicalShiftedOp {
|
||||
};
|
||||
|
||||
// Move wide immediate.
|
||||
enum MoveWideImmediateOp {
|
||||
enum MoveWideImmediateOp : uint32_t {
|
||||
MoveWideImmediateFixed = 0x12800000,
|
||||
MoveWideImmediateFMask = 0x1F800000,
|
||||
MoveWideImmediateMask = 0xFF800000,
|
||||
@ -648,7 +652,7 @@ enum MoveWideImmediateOp {
|
||||
|
||||
// Bitfield.
|
||||
const int kBitfieldNOffset = 22;
|
||||
enum BitfieldOp {
|
||||
enum BitfieldOp : uint32_t {
|
||||
BitfieldFixed = 0x13000000,
|
||||
BitfieldFMask = 0x1F800000,
|
||||
BitfieldMask = 0xFF800000,
|
||||
@ -665,7 +669,7 @@ enum BitfieldOp {
|
||||
};
|
||||
|
||||
// Extract.
|
||||
enum ExtractOp {
|
||||
enum ExtractOp : uint32_t {
|
||||
ExtractFixed = 0x13800000,
|
||||
ExtractFMask = 0x1F800000,
|
||||
ExtractMask = 0xFFA00000,
|
||||
@ -675,7 +679,7 @@ enum ExtractOp {
|
||||
};
|
||||
|
||||
// Unconditional branch.
|
||||
enum UnconditionalBranchOp {
|
||||
enum UnconditionalBranchOp : uint32_t {
|
||||
UnconditionalBranchFixed = 0x14000000,
|
||||
UnconditionalBranchFMask = 0x7C000000,
|
||||
UnconditionalBranchMask = 0xFC000000,
|
||||
@ -684,7 +688,7 @@ enum UnconditionalBranchOp {
|
||||
};
|
||||
|
||||
// Unconditional branch to register.
|
||||
enum UnconditionalBranchToRegisterOp {
|
||||
enum UnconditionalBranchToRegisterOp : uint32_t {
|
||||
UnconditionalBranchToRegisterFixed = 0xD6000000,
|
||||
UnconditionalBranchToRegisterFMask = 0xFE000000,
|
||||
UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
|
||||
@ -694,7 +698,7 @@ enum UnconditionalBranchToRegisterOp {
|
||||
};
|
||||
|
||||
// Compare and branch.
|
||||
enum CompareBranchOp {
|
||||
enum CompareBranchOp : uint32_t {
|
||||
CompareBranchFixed = 0x34000000,
|
||||
CompareBranchFMask = 0x7E000000,
|
||||
CompareBranchMask = 0xFF000000,
|
||||
@ -707,7 +711,7 @@ enum CompareBranchOp {
|
||||
};
|
||||
|
||||
// Test and branch.
|
||||
enum TestBranchOp {
|
||||
enum TestBranchOp : uint32_t {
|
||||
TestBranchFixed = 0x36000000,
|
||||
TestBranchFMask = 0x7E000000,
|
||||
TestBranchMask = 0x7F000000,
|
||||
@ -716,7 +720,7 @@ enum TestBranchOp {
|
||||
};
|
||||
|
||||
// Conditional branch.
|
||||
enum ConditionalBranchOp {
|
||||
enum ConditionalBranchOp : uint32_t {
|
||||
ConditionalBranchFixed = 0x54000000,
|
||||
ConditionalBranchFMask = 0xFE000000,
|
||||
ConditionalBranchMask = 0xFF000010,
|
||||
@ -728,12 +732,12 @@ enum ConditionalBranchOp {
|
||||
// and CR fields to encode parameters. To handle this cleanly, the system
|
||||
// instructions are split into more than one enum.
|
||||
|
||||
enum SystemOp {
|
||||
enum SystemOp : uint32_t {
|
||||
SystemFixed = 0xD5000000,
|
||||
SystemFMask = 0xFFC00000
|
||||
};
|
||||
|
||||
enum SystemSysRegOp {
|
||||
enum SystemSysRegOp : uint32_t {
|
||||
SystemSysRegFixed = 0xD5100000,
|
||||
SystemSysRegFMask = 0xFFD00000,
|
||||
SystemSysRegMask = 0xFFF00000,
|
||||
@ -741,7 +745,7 @@ enum SystemSysRegOp {
|
||||
MSR = SystemSysRegFixed | 0x00000000
|
||||
};
|
||||
|
||||
enum SystemHintOp {
|
||||
enum SystemHintOp : uint32_t {
|
||||
SystemHintFixed = 0xD503201F,
|
||||
SystemHintFMask = 0xFFFFF01F,
|
||||
SystemHintMask = 0xFFFFF01F,
|
||||
@ -749,7 +753,7 @@ enum SystemHintOp {
|
||||
};
|
||||
|
||||
// Exception.
|
||||
enum ExceptionOp {
|
||||
enum ExceptionOp : uint32_t {
|
||||
ExceptionFixed = 0xD4000000,
|
||||
ExceptionFMask = 0xFF000000,
|
||||
ExceptionMask = 0xFFE0001F,
|
||||
@ -765,7 +769,7 @@ enum ExceptionOp {
|
||||
// Code used to spot hlt instructions that should not be hit.
|
||||
const int kHltBadCode = 0xbad;
|
||||
|
||||
enum MemBarrierOp {
|
||||
enum MemBarrierOp : uint32_t {
|
||||
MemBarrierFixed = 0xD503309F,
|
||||
MemBarrierFMask = 0xFFFFF09F,
|
||||
MemBarrierMask = 0xFFFFF0FF,
|
||||
@ -775,13 +779,13 @@ enum MemBarrierOp {
|
||||
};
|
||||
|
||||
// Any load or store (including pair).
|
||||
enum LoadStoreAnyOp {
|
||||
enum LoadStoreAnyOp : uint32_t {
|
||||
LoadStoreAnyFMask = 0x0a000000,
|
||||
LoadStoreAnyFixed = 0x08000000
|
||||
};
|
||||
|
||||
// Any load pair or store pair.
|
||||
enum LoadStorePairAnyOp {
|
||||
enum LoadStorePairAnyOp : uint32_t {
|
||||
LoadStorePairAnyFMask = 0x3a000000,
|
||||
LoadStorePairAnyFixed = 0x28000000
|
||||
};
|
||||
@ -794,7 +798,7 @@ enum LoadStorePairAnyOp {
|
||||
V(LDP, q, 0x84400000)
|
||||
|
||||
// Load/store pair (post, pre and offset.)
|
||||
enum LoadStorePairOp {
|
||||
enum LoadStorePairOp : uint32_t {
|
||||
LoadStorePairMask = 0xC4400000,
|
||||
LoadStorePairLBit = 1 << 22,
|
||||
#define LOAD_STORE_PAIR(A, B, C) \
|
||||
@ -803,7 +807,7 @@ enum LoadStorePairOp {
|
||||
#undef LOAD_STORE_PAIR
|
||||
};
|
||||
|
||||
enum LoadStorePairPostIndexOp {
|
||||
enum LoadStorePairPostIndexOp : uint32_t {
|
||||
LoadStorePairPostIndexFixed = 0x28800000,
|
||||
LoadStorePairPostIndexFMask = 0x3B800000,
|
||||
LoadStorePairPostIndexMask = 0xFFC00000,
|
||||
@ -813,7 +817,7 @@ enum LoadStorePairPostIndexOp {
|
||||
#undef LOAD_STORE_PAIR_POST_INDEX
|
||||
};
|
||||
|
||||
enum LoadStorePairPreIndexOp {
|
||||
enum LoadStorePairPreIndexOp : uint32_t {
|
||||
LoadStorePairPreIndexFixed = 0x29800000,
|
||||
LoadStorePairPreIndexFMask = 0x3B800000,
|
||||
LoadStorePairPreIndexMask = 0xFFC00000,
|
||||
@ -823,7 +827,7 @@ enum LoadStorePairPreIndexOp {
|
||||
#undef LOAD_STORE_PAIR_PRE_INDEX
|
||||
};
|
||||
|
||||
enum LoadStorePairOffsetOp {
|
||||
enum LoadStorePairOffsetOp : uint32_t {
|
||||
LoadStorePairOffsetFixed = 0x29000000,
|
||||
LoadStorePairOffsetFMask = 0x3B800000,
|
||||
LoadStorePairOffsetMask = 0xFFC00000,
|
||||
@ -834,7 +838,7 @@ enum LoadStorePairOffsetOp {
|
||||
};
|
||||
|
||||
// Load literal.
|
||||
enum LoadLiteralOp {
|
||||
enum LoadLiteralOp : uint32_t {
|
||||
LoadLiteralFixed = 0x18000000,
|
||||
LoadLiteralFMask = 0x3B000000,
|
||||
LoadLiteralMask = 0xFF000000,
|
||||
@ -876,7 +880,7 @@ enum LoadLiteralOp {
|
||||
// clang-format on
|
||||
|
||||
// Load/store unscaled offset.
|
||||
enum LoadStoreUnscaledOffsetOp {
|
||||
enum LoadStoreUnscaledOffsetOp : uint32_t {
|
||||
LoadStoreUnscaledOffsetFixed = 0x38000000,
|
||||
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
|
||||
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
|
||||
@ -887,7 +891,7 @@ enum LoadStoreUnscaledOffsetOp {
|
||||
};
|
||||
|
||||
// Load/store (post, pre, offset and unsigned.)
|
||||
enum LoadStoreOp {
|
||||
enum LoadStoreOp : uint32_t {
|
||||
LoadStoreMask = 0xC4C00000,
|
||||
#define LOAD_STORE(A, B, C, D) A##B##_##C = D
|
||||
LOAD_STORE_OP_LIST(LOAD_STORE),
|
||||
@ -896,7 +900,7 @@ enum LoadStoreOp {
|
||||
};
|
||||
|
||||
// Load/store post index.
|
||||
enum LoadStorePostIndex {
|
||||
enum LoadStorePostIndex : uint32_t {
|
||||
LoadStorePostIndexFixed = 0x38000400,
|
||||
LoadStorePostIndexFMask = 0x3B200C00,
|
||||
LoadStorePostIndexMask = 0xFFE00C00,
|
||||
@ -907,7 +911,7 @@ enum LoadStorePostIndex {
|
||||
};
|
||||
|
||||
// Load/store pre index.
|
||||
enum LoadStorePreIndex {
|
||||
enum LoadStorePreIndex : uint32_t {
|
||||
LoadStorePreIndexFixed = 0x38000C00,
|
||||
LoadStorePreIndexFMask = 0x3B200C00,
|
||||
LoadStorePreIndexMask = 0xFFE00C00,
|
||||
@ -918,7 +922,7 @@ enum LoadStorePreIndex {
|
||||
};
|
||||
|
||||
// Load/store unsigned offset.
|
||||
enum LoadStoreUnsignedOffset {
|
||||
enum LoadStoreUnsignedOffset : uint32_t {
|
||||
LoadStoreUnsignedOffsetFixed = 0x39000000,
|
||||
LoadStoreUnsignedOffsetFMask = 0x3B000000,
|
||||
LoadStoreUnsignedOffsetMask = 0xFFC00000,
|
||||
@ -930,7 +934,7 @@ enum LoadStoreUnsignedOffset {
|
||||
};
|
||||
|
||||
// Load/store register offset.
|
||||
enum LoadStoreRegisterOffset {
|
||||
enum LoadStoreRegisterOffset : uint32_t {
|
||||
LoadStoreRegisterOffsetFixed = 0x38200800,
|
||||
LoadStoreRegisterOffsetFMask = 0x3B200C00,
|
||||
LoadStoreRegisterOffsetMask = 0xFFE00C00,
|
||||
@ -942,7 +946,7 @@ enum LoadStoreRegisterOffset {
|
||||
};
|
||||
|
||||
// Load/store acquire/release.
|
||||
enum LoadStoreAcquireReleaseOp {
|
||||
enum LoadStoreAcquireReleaseOp : uint32_t {
|
||||
LoadStoreAcquireReleaseFixed = 0x08000000,
|
||||
LoadStoreAcquireReleaseFMask = 0x3F000000,
|
||||
LoadStoreAcquireReleaseMask = 0xCFC08000,
|
||||
@ -965,14 +969,14 @@ enum LoadStoreAcquireReleaseOp {
|
||||
};
|
||||
|
||||
// Conditional compare.
|
||||
enum ConditionalCompareOp {
|
||||
enum ConditionalCompareOp : uint32_t {
|
||||
ConditionalCompareMask = 0x60000000,
|
||||
CCMN = 0x20000000,
|
||||
CCMP = 0x60000000
|
||||
};
|
||||
|
||||
// Conditional compare register.
|
||||
enum ConditionalCompareRegisterOp {
|
||||
enum ConditionalCompareRegisterOp : uint32_t {
|
||||
ConditionalCompareRegisterFixed = 0x1A400000,
|
||||
ConditionalCompareRegisterFMask = 0x1FE00800,
|
||||
ConditionalCompareRegisterMask = 0xFFE00C10,
|
||||
@ -983,7 +987,7 @@ enum ConditionalCompareRegisterOp {
|
||||
};
|
||||
|
||||
// Conditional compare immediate.
|
||||
enum ConditionalCompareImmediateOp {
|
||||
enum ConditionalCompareImmediateOp : uint32_t {
|
||||
ConditionalCompareImmediateFixed = 0x1A400800,
|
||||
ConditionalCompareImmediateFMask = 0x1FE00800,
|
||||
ConditionalCompareImmediateMask = 0xFFE00C10,
|
||||
@ -994,7 +998,7 @@ enum ConditionalCompareImmediateOp {
|
||||
};
|
||||
|
||||
// Conditional select.
|
||||
enum ConditionalSelectOp {
|
||||
enum ConditionalSelectOp : uint32_t {
|
||||
ConditionalSelectFixed = 0x1A800000,
|
||||
ConditionalSelectFMask = 0x1FE00000,
|
||||
ConditionalSelectMask = 0xFFE00C00,
|
||||
@ -1013,7 +1017,7 @@ enum ConditionalSelectOp {
|
||||
};
|
||||
|
||||
// Data processing 1 source.
|
||||
enum DataProcessing1SourceOp {
|
||||
enum DataProcessing1SourceOp : uint32_t {
|
||||
DataProcessing1SourceFixed = 0x5AC00000,
|
||||
DataProcessing1SourceFMask = 0x5FE00000,
|
||||
DataProcessing1SourceMask = 0xFFFFFC00,
|
||||
@ -1036,7 +1040,7 @@ enum DataProcessing1SourceOp {
|
||||
};
|
||||
|
||||
// Data processing 2 source.
|
||||
enum DataProcessing2SourceOp {
|
||||
enum DataProcessing2SourceOp : uint32_t {
|
||||
DataProcessing2SourceFixed = 0x1AC00000,
|
||||
DataProcessing2SourceFMask = 0x5FE00000,
|
||||
DataProcessing2SourceMask = 0xFFE0FC00,
|
||||
@ -1069,7 +1073,7 @@ enum DataProcessing2SourceOp {
|
||||
};
|
||||
|
||||
// Data processing 3 source.
|
||||
enum DataProcessing3SourceOp {
|
||||
enum DataProcessing3SourceOp : uint32_t {
|
||||
DataProcessing3SourceFixed = 0x1B000000,
|
||||
DataProcessing3SourceFMask = 0x1F000000,
|
||||
DataProcessing3SourceMask = 0xFFE08000,
|
||||
@ -1088,7 +1092,7 @@ enum DataProcessing3SourceOp {
|
||||
};
|
||||
|
||||
// Floating point compare.
|
||||
enum FPCompareOp {
|
||||
enum FPCompareOp : uint32_t {
|
||||
FPCompareFixed = 0x1E202000,
|
||||
FPCompareFMask = 0x5F203C00,
|
||||
FPCompareMask = 0xFFE0FC1F,
|
||||
@ -1105,7 +1109,7 @@ enum FPCompareOp {
|
||||
};
|
||||
|
||||
// Floating point conditional compare.
|
||||
enum FPConditionalCompareOp {
|
||||
enum FPConditionalCompareOp : uint32_t {
|
||||
FPConditionalCompareFixed = 0x1E200400,
|
||||
FPConditionalCompareFMask = 0x5F200C00,
|
||||
FPConditionalCompareMask = 0xFFE00C10,
|
||||
@ -1118,7 +1122,7 @@ enum FPConditionalCompareOp {
|
||||
};
|
||||
|
||||
// Floating point conditional select.
|
||||
enum FPConditionalSelectOp {
|
||||
enum FPConditionalSelectOp : uint32_t {
|
||||
FPConditionalSelectFixed = 0x1E200C00,
|
||||
FPConditionalSelectFMask = 0x5F200C00,
|
||||
FPConditionalSelectMask = 0xFFE00C00,
|
||||
@ -1128,7 +1132,7 @@ enum FPConditionalSelectOp {
|
||||
};
|
||||
|
||||
// Floating point immediate.
|
||||
enum FPImmediateOp {
|
||||
enum FPImmediateOp : uint32_t {
|
||||
FPImmediateFixed = 0x1E201000,
|
||||
FPImmediateFMask = 0x5F201C00,
|
||||
FPImmediateMask = 0xFFE01C00,
|
||||
@ -1137,7 +1141,7 @@ enum FPImmediateOp {
|
||||
};
|
||||
|
||||
// Floating point data processing 1 source.
|
||||
enum FPDataProcessing1SourceOp {
|
||||
enum FPDataProcessing1SourceOp : uint32_t {
|
||||
FPDataProcessing1SourceFixed = 0x1E204000,
|
||||
FPDataProcessing1SourceFMask = 0x5F207C00,
|
||||
FPDataProcessing1SourceMask = 0xFFFFFC00,
|
||||
@ -1183,7 +1187,7 @@ enum FPDataProcessing1SourceOp {
|
||||
};
|
||||
|
||||
// Floating point data processing 2 source.
|
||||
enum FPDataProcessing2SourceOp {
|
||||
enum FPDataProcessing2SourceOp : uint32_t {
|
||||
FPDataProcessing2SourceFixed = 0x1E200800,
|
||||
FPDataProcessing2SourceFMask = 0x5F200C00,
|
||||
FPDataProcessing2SourceMask = 0xFFE0FC00,
|
||||
@ -1217,7 +1221,7 @@ enum FPDataProcessing2SourceOp {
|
||||
};
|
||||
|
||||
// Floating point data processing 3 source.
|
||||
enum FPDataProcessing3SourceOp {
|
||||
enum FPDataProcessing3SourceOp : uint32_t {
|
||||
FPDataProcessing3SourceFixed = 0x1F000000,
|
||||
FPDataProcessing3SourceFMask = 0x5F000000,
|
||||
FPDataProcessing3SourceMask = 0xFFE08000,
|
||||
@ -1232,7 +1236,7 @@ enum FPDataProcessing3SourceOp {
|
||||
};
|
||||
|
||||
// Conversion between floating point and integer.
|
||||
enum FPIntegerConvertOp {
|
||||
enum FPIntegerConvertOp : uint32_t {
|
||||
FPIntegerConvertFixed = 0x1E200000,
|
||||
FPIntegerConvertFMask = 0x5F20FC00,
|
||||
FPIntegerConvertMask = 0xFFFFFC00,
|
||||
@ -1305,7 +1309,7 @@ enum FPIntegerConvertOp {
|
||||
};
|
||||
|
||||
// Conversion between fixed point and floating point.
|
||||
enum FPFixedPointConvertOp {
|
||||
enum FPFixedPointConvertOp : uint32_t {
|
||||
FPFixedPointConvertFixed = 0x1E000000,
|
||||
FPFixedPointConvertFMask = 0x5F200000,
|
||||
FPFixedPointConvertMask = 0xFFFF0000,
|
||||
@ -1332,7 +1336,7 @@ enum FPFixedPointConvertOp {
|
||||
};
|
||||
|
||||
// NEON instructions with two register operands.
|
||||
enum NEON2RegMiscOp {
|
||||
enum NEON2RegMiscOp : uint32_t {
|
||||
NEON2RegMiscFixed = 0x0E200800,
|
||||
NEON2RegMiscFMask = 0x9F3E0C00,
|
||||
NEON2RegMiscMask = 0xBF3FFC00,
|
||||
@ -1414,7 +1418,7 @@ enum NEON2RegMiscOp {
|
||||
};
|
||||
|
||||
// NEON instructions with three same-type operands.
|
||||
enum NEON3SameOp {
|
||||
enum NEON3SameOp : uint32_t {
|
||||
NEON3SameFixed = 0x0E200400,
|
||||
NEON3SameFMask = 0x9F200400,
|
||||
NEON3SameMask = 0xBF20FC00,
|
||||
@ -1510,7 +1514,7 @@ enum NEON3SameOp {
|
||||
};
|
||||
|
||||
// NEON instructions with three different-type operands.
|
||||
enum NEON3DifferentOp {
|
||||
enum NEON3DifferentOp : uint32_t {
|
||||
NEON3DifferentFixed = 0x0E200000,
|
||||
NEON3DifferentFMask = 0x9F200C00,
|
||||
NEON3DifferentMask = 0xFF20FC00,
|
||||
@ -1569,7 +1573,7 @@ enum NEON3DifferentOp {
|
||||
};
|
||||
|
||||
// NEON instructions operating across vectors.
|
||||
enum NEONAcrossLanesOp {
|
||||
enum NEONAcrossLanesOp : uint32_t {
|
||||
NEONAcrossLanesFixed = 0x0E300800,
|
||||
NEONAcrossLanesFMask = 0x9F3E0C00,
|
||||
NEONAcrossLanesMask = 0xBF3FFC00,
|
||||
@ -1593,7 +1597,7 @@ enum NEONAcrossLanesOp {
|
||||
};
|
||||
|
||||
// NEON instructions with indexed element operand.
|
||||
enum NEONByIndexedElementOp {
|
||||
enum NEONByIndexedElementOp : uint32_t {
|
||||
NEONByIndexedElementFixed = 0x0F000000,
|
||||
NEONByIndexedElementFMask = 0x9F000400,
|
||||
NEONByIndexedElementMask = 0xBF00F400,
|
||||
@ -1622,7 +1626,7 @@ enum NEONByIndexedElementOp {
|
||||
};
|
||||
|
||||
// NEON modified immediate.
|
||||
enum NEONModifiedImmediateOp {
|
||||
enum NEONModifiedImmediateOp : uint32_t {
|
||||
NEONModifiedImmediateFixed = 0x0F000400,
|
||||
NEONModifiedImmediateFMask = 0x9FF80400,
|
||||
NEONModifiedImmediateOpBit = 0x20000000,
|
||||
@ -1633,14 +1637,14 @@ enum NEONModifiedImmediateOp {
|
||||
};
|
||||
|
||||
// NEON extract.
|
||||
enum NEONExtractOp {
|
||||
enum NEONExtractOp : uint32_t {
|
||||
NEONExtractFixed = 0x2E000000,
|
||||
NEONExtractFMask = 0xBF208400,
|
||||
NEONExtractMask = 0xBFE08400,
|
||||
NEON_EXT = NEONExtractFixed | 0x00000000
|
||||
};
|
||||
|
||||
enum NEONLoadStoreMultiOp {
|
||||
enum NEONLoadStoreMultiOp : uint32_t {
|
||||
NEONLoadStoreMultiL = 0x00400000,
|
||||
NEONLoadStoreMulti1_1v = 0x00007000,
|
||||
NEONLoadStoreMulti1_2v = 0x0000A000,
|
||||
@ -1652,7 +1656,7 @@ enum NEONLoadStoreMultiOp {
|
||||
};
|
||||
|
||||
// NEON load/store multiple structures.
|
||||
enum NEONLoadStoreMultiStructOp {
|
||||
enum NEONLoadStoreMultiStructOp : uint32_t {
|
||||
NEONLoadStoreMultiStructFixed = 0x0C000000,
|
||||
NEONLoadStoreMultiStructFMask = 0xBFBF0000,
|
||||
NEONLoadStoreMultiStructMask = 0xBFFFF000,
|
||||
@ -1676,7 +1680,7 @@ enum NEONLoadStoreMultiStructOp {
|
||||
};
|
||||
|
||||
// NEON load/store multiple structures with post-index addressing.
|
||||
enum NEONLoadStoreMultiStructPostIndexOp {
|
||||
enum NEONLoadStoreMultiStructPostIndexOp : uint32_t {
|
||||
NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
|
||||
NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
|
||||
NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000,
|
||||
@ -1697,7 +1701,7 @@ enum NEONLoadStoreMultiStructPostIndexOp {
|
||||
NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
|
||||
};
|
||||
|
||||
enum NEONLoadStoreSingleOp {
|
||||
enum NEONLoadStoreSingleOp : uint32_t {
|
||||
NEONLoadStoreSingle1 = 0x00000000,
|
||||
NEONLoadStoreSingle2 = 0x00200000,
|
||||
NEONLoadStoreSingle3 = 0x00002000,
|
||||
@ -1712,7 +1716,7 @@ enum NEONLoadStoreSingleOp {
|
||||
};
|
||||
|
||||
// NEON load/store single structure.
|
||||
enum NEONLoadStoreSingleStructOp {
|
||||
enum NEONLoadStoreSingleStructOp : uint32_t {
|
||||
NEONLoadStoreSingleStructFixed = 0x0D000000,
|
||||
NEONLoadStoreSingleStructFMask = 0xBF9F0000,
|
||||
NEONLoadStoreSingleStructMask = 0xBFFFE000,
|
||||
@ -1777,7 +1781,7 @@ enum NEONLoadStoreSingleStructOp {
|
||||
};
|
||||
|
||||
// NEON load/store single structure with post-index addressing.
|
||||
enum NEONLoadStoreSingleStructPostIndexOp {
|
||||
enum NEONLoadStoreSingleStructPostIndexOp : uint32_t {
|
||||
NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
|
||||
NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
|
||||
NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000,
|
||||
@ -1824,7 +1828,7 @@ enum NEONLoadStoreSingleStructPostIndexOp {
|
||||
};
|
||||
|
||||
// NEON register copy.
|
||||
enum NEONCopyOp {
|
||||
enum NEONCopyOp : uint32_t {
|
||||
NEONCopyFixed = 0x0E000400,
|
||||
NEONCopyFMask = 0x9FE08400,
|
||||
NEONCopyMask = 0x3FE08400,
|
||||
@ -1843,7 +1847,7 @@ enum NEONCopyOp {
|
||||
};
|
||||
|
||||
// NEON scalar instructions with indexed element operand.
|
||||
enum NEONScalarByIndexedElementOp {
|
||||
enum NEONScalarByIndexedElementOp : uint32_t {
|
||||
NEONScalarByIndexedElementFixed = 0x5F000000,
|
||||
NEONScalarByIndexedElementFMask = 0xDF000400,
|
||||
NEONScalarByIndexedElementMask = 0xFF00F400,
|
||||
@ -1866,7 +1870,7 @@ enum NEONScalarByIndexedElementOp {
|
||||
};
|
||||
|
||||
// NEON shift immediate.
|
||||
enum NEONShiftImmediateOp {
|
||||
enum NEONShiftImmediateOp : uint32_t {
|
||||
NEONShiftImmediateFixed = 0x0F000400,
|
||||
NEONShiftImmediateFMask = 0x9F800400,
|
||||
NEONShiftImmediateMask = 0xBF80FC00,
|
||||
@ -1902,7 +1906,7 @@ enum NEONShiftImmediateOp {
|
||||
};
|
||||
|
||||
// NEON scalar register copy.
|
||||
enum NEONScalarCopyOp {
|
||||
enum NEONScalarCopyOp : uint32_t {
|
||||
NEONScalarCopyFixed = 0x5E000400,
|
||||
NEONScalarCopyFMask = 0xDFE08400,
|
||||
NEONScalarCopyMask = 0xFFE0FC00,
|
||||
@ -1910,7 +1914,7 @@ enum NEONScalarCopyOp {
|
||||
};
|
||||
|
||||
// NEON scalar pairwise instructions.
|
||||
enum NEONScalarPairwiseOp {
|
||||
enum NEONScalarPairwiseOp : uint32_t {
|
||||
NEONScalarPairwiseFixed = 0x5E300800,
|
||||
NEONScalarPairwiseFMask = 0xDF3E0C00,
|
||||
NEONScalarPairwiseMask = 0xFFB1F800,
|
||||
@ -1923,7 +1927,7 @@ enum NEONScalarPairwiseOp {
|
||||
};
|
||||
|
||||
// NEON scalar shift immediate.
|
||||
enum NEONScalarShiftImmediateOp {
|
||||
enum NEONScalarShiftImmediateOp : uint32_t {
|
||||
NEONScalarShiftImmediateFixed = 0x5F000400,
|
||||
NEONScalarShiftImmediateFMask = 0xDF800400,
|
||||
NEONScalarShiftImmediateMask = 0xFF80FC00,
|
||||
@ -1954,7 +1958,7 @@ enum NEONScalarShiftImmediateOp {
|
||||
};
|
||||
|
||||
// NEON table.
|
||||
enum NEONTableOp {
|
||||
enum NEONTableOp : uint32_t {
|
||||
NEONTableFixed = 0x0E000000,
|
||||
NEONTableFMask = 0xBF208C00,
|
||||
NEONTableExt = 0x00001000,
|
||||
@ -1970,7 +1974,7 @@ enum NEONTableOp {
|
||||
};
|
||||
|
||||
// NEON perm.
|
||||
enum NEONPermOp {
|
||||
enum NEONPermOp : uint32_t {
|
||||
NEONPermFixed = 0x0E000800,
|
||||
NEONPermFMask = 0xBF208C00,
|
||||
NEONPermMask = 0x3F20FC00,
|
||||
@ -1983,7 +1987,7 @@ enum NEONPermOp {
|
||||
};
|
||||
|
||||
// NEON scalar instructions with two register operands.
|
||||
enum NEONScalar2RegMiscOp {
|
||||
enum NEONScalar2RegMiscOp : uint32_t {
|
||||
NEONScalar2RegMiscFixed = 0x5E200800,
|
||||
NEONScalar2RegMiscFMask = 0xDF3E0C00,
|
||||
NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
|
||||
@ -2030,7 +2034,7 @@ enum NEONScalar2RegMiscOp {
|
||||
};
|
||||
|
||||
// NEON scalar instructions with three same-type operands.
|
||||
enum NEONScalar3SameOp {
|
||||
enum NEONScalar3SameOp : uint32_t {
|
||||
NEONScalar3SameFixed = 0x5E200400,
|
||||
NEONScalar3SameFMask = 0xDF200400,
|
||||
NEONScalar3SameMask = 0xFF20FC00,
|
||||
@ -2073,7 +2077,7 @@ enum NEONScalar3SameOp {
|
||||
};
|
||||
|
||||
// NEON scalar instructions with three different-type operands.
|
||||
enum NEONScalar3DiffOp {
|
||||
enum NEONScalar3DiffOp : uint32_t {
|
||||
NEONScalar3DiffFixed = 0x5E200000,
|
||||
NEONScalar3DiffFMask = 0xDF200C00,
|
||||
NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask,
|
||||
@ -2084,12 +2088,12 @@ enum NEONScalar3DiffOp {
|
||||
|
||||
// Unimplemented and unallocated instructions. These are defined to make fixed
|
||||
// bit assertion easier.
|
||||
enum UnimplementedOp {
|
||||
enum UnimplementedOp : uint32_t {
|
||||
UnimplementedFixed = 0x00000000,
|
||||
UnimplementedFMask = 0x00000000
|
||||
};
|
||||
|
||||
enum UnallocatedOp {
|
||||
enum UnallocatedOp : uint32_t {
|
||||
UnallocatedFixed = 0x00000000,
|
||||
UnallocatedFMask = 0x00000000
|
||||
};
|
||||
|
@ -15,7 +15,7 @@ namespace internal {
|
||||
class CacheLineSizes {
|
||||
public:
|
||||
CacheLineSizes() {
|
||||
#ifdef USE_SIMULATOR
|
||||
#if defined(USE_SIMULATOR) || defined(V8_OS_WIN)
|
||||
cache_type_register_ = 0;
|
||||
#else
|
||||
// Copy the content of the cache type register to a core register.
|
||||
@ -38,7 +38,9 @@ class CacheLineSizes {
|
||||
};
|
||||
|
||||
void CpuFeatures::FlushICache(void* address, size_t length) {
|
||||
#ifdef V8_HOST_ARCH_ARM64
|
||||
#if defined(V8_OS_WIN)
|
||||
FlushInstructionCache(GetCurrentProcess(), address, length);
|
||||
#elif defined(V8_HOST_ARCH_ARM64)
|
||||
// The code below assumes user space cache operations are allowed. The goal
|
||||
// of this routine is to make sure the code generated is visible to the I
|
||||
// side of the CPU.
|
||||
|
@ -69,11 +69,24 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
|
||||
Register src = temps.AcquireX();
|
||||
masm->Add(src, src_base, src_offset);
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows.
|
||||
restore_list.Remove(x18);
|
||||
#endif
|
||||
|
||||
// Restore every register in restore_list from src.
|
||||
while (!restore_list.IsEmpty()) {
|
||||
CPURegister reg0 = restore_list.PopLowestIndex();
|
||||
CPURegister reg1 = restore_list.PopLowestIndex();
|
||||
int offset0 = reg0.code() * reg_size;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
if (reg1 == NoCPUReg) {
|
||||
masm->Ldr(reg0, MemOperand(src, offset0));
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset1 = reg1.code() * reg_size;
|
||||
|
||||
// Pair up adjacent loads, otherwise read them separately.
|
||||
|
@ -3744,7 +3744,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
|
||||
uint64_t imm8 = instr->ImmNEONabcdefgh();
|
||||
uint64_t imm = 0;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
if (imm8 & (1 << i)) {
|
||||
if (imm8 & (1ULL << i)) {
|
||||
imm |= (UINT64_C(0xFF) << (8 * i));
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <cstdint>
|
||||
#include "include/v8config.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -21,6 +22,10 @@ namespace internal {
|
||||
// then move this code back into instructions-arm64.cc with the same types
|
||||
// that client code uses.
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern const uint16_t kFP16PositiveInfinity = 0x7C00;
|
||||
extern const uint16_t kFP16NegativeInfinity = 0xFC00;
|
||||
extern const uint32_t kFP32PositiveInfinity = 0x7F800000;
|
||||
@ -42,5 +47,9 @@ extern const uint64_t kFP64DefaultNaN = 0x7FF8000000000000UL;
|
||||
extern const uint32_t kFP32DefaultNaN = 0x7FC00000;
|
||||
extern const uint16_t kFP16DefaultNaN = 0x7E00;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
} // end of extern "C"
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -72,7 +72,7 @@ static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int width) {
|
||||
DCHECK_LE(width, 64);
|
||||
rotate &= 63;
|
||||
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
|
||||
return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
|
||||
(value >> rotate);
|
||||
}
|
||||
|
||||
@ -83,7 +83,7 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
|
||||
uint64_t result = value & ((1UL << width) - 1UL);
|
||||
uint64_t result = value & ((1ULL << width) - 1ULL);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
}
|
||||
@ -121,7 +121,7 @@ uint64_t Instruction::ImmLogical() {
|
||||
if (imm_s == 0x3F) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (1UL << (imm_s + 1)) - 1;
|
||||
uint64_t bits = (1ULL << (imm_s + 1)) - 1;
|
||||
return RotateRight(bits, imm_r, 64);
|
||||
} else {
|
||||
if ((imm_s >> 1) == 0x1F) {
|
||||
@ -133,7 +133,7 @@ uint64_t Instruction::ImmLogical() {
|
||||
if ((imm_s & mask) == mask) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
|
||||
uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
|
||||
return RepeatBitsAcrossReg(reg_size,
|
||||
RotateRight(bits, imm_r & mask, width),
|
||||
width);
|
||||
|
@ -19,6 +19,10 @@ struct AssemblerOptions;
|
||||
|
||||
typedef uint32_t Instr;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern const float16 kFP16PositiveInfinity;
|
||||
extern const float16 kFP16NegativeInfinity;
|
||||
extern const float kFP32PositiveInfinity;
|
||||
@ -40,6 +44,10 @@ extern const double kFP64DefaultNaN;
|
||||
extern const float kFP32DefaultNaN;
|
||||
extern const float16 kFP16DefaultNaN;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
} // end of extern "C"
|
||||
#endif
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op);
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op);
|
||||
|
||||
|
@ -53,13 +53,27 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
|
||||
Register exclusion) const {
|
||||
int bytes = 0;
|
||||
auto list = kCallerSaved;
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
// We only allow one exclusion register, so if the list is of even length
|
||||
// before exclusions, it must still be afterwards, to maintain alignment.
|
||||
// Therefore, we can ignore the exclusion register in the computation.
|
||||
// However, we leave it in the argument list to mirror the prototype for
|
||||
// Push/PopCallerSaved().
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// X18 is excluded from caller-saved register list on Windows ARM64 which
|
||||
// makes caller-saved registers in odd number. padreg is used accordingly
|
||||
// to maintain the alignment.
|
||||
DCHECK_EQ(list.Count() % 2, 1);
|
||||
if (exclusion.Is(no_reg)) {
|
||||
bytes += kXRegSizeInBits / 8;
|
||||
} else {
|
||||
bytes -= kXRegSizeInBits / 8;
|
||||
}
|
||||
#else
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
USE(exclusion);
|
||||
#endif
|
||||
|
||||
bytes += list.Count() * kXRegSizeInBits / 8;
|
||||
|
||||
if (fp_mode == kSaveFPRegs) {
|
||||
@ -73,12 +87,24 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
|
||||
Register exclusion) {
|
||||
int bytes = 0;
|
||||
auto list = kCallerSaved;
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// X18 is excluded from caller-saved register list on Windows ARM64, use
|
||||
// padreg accordingly to maintain alignment.
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
list.Remove(exclusion);
|
||||
} else {
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#else
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
// Replace the excluded register with padding to maintain alignment.
|
||||
list.Remove(exclusion);
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#endif
|
||||
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
PushCPURegList(list);
|
||||
bytes += list.Count() * kXRegSizeInBits / 8;
|
||||
|
||||
@ -99,12 +125,24 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
|
||||
}
|
||||
|
||||
auto list = kCallerSaved;
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// X18 is excluded from caller-saved register list on Windows ARM64, use
|
||||
// padreg accordingly to maintain alignment.
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
list.Remove(exclusion);
|
||||
} else {
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#else
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
// Replace the excluded register with padding to maintain alignment.
|
||||
list.Remove(exclusion);
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#endif
|
||||
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
PopCPURegList(list);
|
||||
bytes += list.Count() * kXRegSizeInBits / 8;
|
||||
|
||||
|
@ -48,7 +48,14 @@ constexpr Register kReturnRegister2 = x2;
|
||||
constexpr Register kJSFunctionRegister = x1;
|
||||
constexpr Register kContextRegister = cp;
|
||||
constexpr Register kAllocateSizeRegister = x1;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows ARM64.
|
||||
constexpr Register kSpeculationPoisonRegister = x23;
|
||||
#else
|
||||
constexpr Register kSpeculationPoisonRegister = x18;
|
||||
#endif
|
||||
|
||||
constexpr Register kInterpreterAccumulatorRegister = x0;
|
||||
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
|
||||
constexpr Register kInterpreterBytecodeArrayRegister = x20;
|
||||
|
@ -277,7 +277,7 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
|
||||
// The simulator uses a separate JS stack. If we have exhausted the C stack,
|
||||
// we also drop down the JS limit to reflect the exhaustion on the JS stack.
|
||||
if (GetCurrentStackPosition() < c_limit) {
|
||||
return reinterpret_cast<uintptr_t>(get_sp());
|
||||
return get_sp();
|
||||
}
|
||||
|
||||
// Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
|
||||
@ -332,7 +332,7 @@ void Simulator::Init(FILE* stream) {
|
||||
stack_limit_ = stack_ + stack_protection_size_;
|
||||
uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
|
||||
// The stack pointer must be 16-byte aligned.
|
||||
set_sp(tos & ~0xFUL);
|
||||
set_sp(tos & ~0xFULL);
|
||||
|
||||
stream_ = stream;
|
||||
print_disasm_ = new PrintDisassembler(stream_);
|
||||
@ -403,6 +403,14 @@ void Simulator::RunFrom(Instruction* start) {
|
||||
// uses the ObjectPair structure.
|
||||
// The simulator assumes all runtime calls return two 64-bits values. If they
|
||||
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
|
||||
#if defined(V8_OS_WIN)
|
||||
typedef int64_t (*SimulatorRuntimeCall_ReturnPtr)(int64_t arg0, int64_t arg1,
|
||||
int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5,
|
||||
int64_t arg6, int64_t arg7,
|
||||
int64_t arg8);
|
||||
#endif
|
||||
|
||||
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
|
||||
int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5,
|
||||
@ -464,12 +472,16 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
break;
|
||||
|
||||
case ExternalReference::BUILTIN_CALL:
|
||||
case ExternalReference::BUILTIN_CALL_PAIR: {
|
||||
// Object* f(v8::internal::Arguments) or
|
||||
// ObjectPair f(v8::internal::Arguments).
|
||||
#if defined(V8_OS_WIN)
|
||||
{
|
||||
// Object* f(v8::internal::Arguments).
|
||||
TraceSim("Type: BUILTIN_CALL\n");
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
|
||||
// When this simulator runs on Windows x64 host, function with ObjectPair
|
||||
// return type accepts an implicit pointer to caller allocated memory for
|
||||
// ObjectPair as return value. This diverges the calling convention from
|
||||
// function which returns primitive type, so function returns ObjectPair
|
||||
// and primitive type cannot share implementation.
|
||||
|
||||
// We don't know how many arguments are being passed, but we can
|
||||
// pass 8 without touching the stack. They will be ignored by the
|
||||
@ -486,6 +498,43 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
", "
|
||||
"0x%016" PRIx64,
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
|
||||
|
||||
SimulatorRuntimeCall_ReturnPtr target =
|
||||
reinterpret_cast<SimulatorRuntimeCall_ReturnPtr>(external);
|
||||
|
||||
int64_t result =
|
||||
target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
|
||||
TraceSim("Returned: 0x%16\n", result);
|
||||
#ifdef DEBUG
|
||||
CorruptAllCallerSavedCPURegisters();
|
||||
#endif
|
||||
set_xreg(0, result);
|
||||
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case ExternalReference::BUILTIN_CALL_PAIR: {
|
||||
// Object* f(v8::internal::Arguments) or
|
||||
// ObjectPair f(v8::internal::Arguments).
|
||||
TraceSim("Type: BUILTIN_CALL\n");
|
||||
|
||||
// We don't know how many arguments are being passed, but we can
|
||||
// pass 8 without touching the stack. They will be ignored by the
|
||||
// host function if they aren't used.
|
||||
TraceSim(
|
||||
"Arguments: "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64
|
||||
", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64
|
||||
", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64
|
||||
", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64
|
||||
", "
|
||||
"0x%016" PRIx64,
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
ObjectPair result =
|
||||
target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
|
||||
TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
|
||||
@ -1489,7 +1538,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
|
||||
void Simulator::VisitTestBranch(Instruction* instr) {
|
||||
unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
|
||||
instr->ImmTestBranchBit40();
|
||||
bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
|
||||
bool take_branch = ((xreg(instr->Rt()) & (1ULL << bit_pos)) == 0);
|
||||
switch (instr->Mask(TestBranchMask)) {
|
||||
case TBZ: break;
|
||||
case TBNZ: take_branch = !take_branch; break;
|
||||
@ -1858,7 +1907,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
|
||||
unsigned rt = instr->Rt();
|
||||
unsigned rt2 = instr->Rt2();
|
||||
unsigned addr_reg = instr->Rn();
|
||||
size_t access_size = 1 << instr->SizeLSPair();
|
||||
size_t access_size = 1ULL << instr->SizeLSPair();
|
||||
int64_t offset = instr->ImmLSPair() * access_size;
|
||||
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
|
||||
uintptr_t address2 = address + access_size;
|
||||
@ -2266,7 +2315,7 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
|
||||
break;
|
||||
case CSNEG_w:
|
||||
case CSNEG_x:
|
||||
new_val = -new_val;
|
||||
new_val = (uint64_t)(-(int64_t)new_val);
|
||||
break;
|
||||
default: UNIMPLEMENTED();
|
||||
}
|
||||
@ -2396,14 +2445,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
|
||||
uint64_t u0, v0, w0;
|
||||
int64_t u1, v1, w1, w2, t;
|
||||
|
||||
u0 = u & 0xFFFFFFFFL;
|
||||
u0 = u & 0xFFFFFFFFLL;
|
||||
u1 = u >> 32;
|
||||
v0 = v & 0xFFFFFFFFL;
|
||||
v0 = v & 0xFFFFFFFFLL;
|
||||
v1 = v >> 32;
|
||||
|
||||
w0 = u0 * v0;
|
||||
t = u1 * v0 + (w0 >> 32);
|
||||
w1 = t & 0xFFFFFFFFL;
|
||||
w1 = t & 0xFFFFFFFFLL;
|
||||
w2 = t >> 32;
|
||||
w1 = u0 * v1 + w1;
|
||||
|
||||
@ -2458,7 +2507,7 @@ void Simulator::BitfieldHelper(Instruction* instr) {
|
||||
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
|
||||
: static_cast<T>(-1);
|
||||
} else {
|
||||
uint64_t umask = ((1L << (S + 1)) - 1);
|
||||
uint64_t umask = ((1LL << (S + 1)) - 1);
|
||||
umask = (umask >> R) | (umask << (reg_size - R));
|
||||
mask = static_cast<T>(umask);
|
||||
diff += reg_size;
|
||||
@ -2973,7 +3022,11 @@ void Simulator::VisitSystem(Instruction* instr) {
|
||||
default: UNIMPLEMENTED();
|
||||
}
|
||||
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
|
||||
#if defined(V8_OS_WIN)
|
||||
MemoryBarrier();
|
||||
#else
|
||||
__sync_synchronize();
|
||||
#endif
|
||||
} else {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -4797,7 +4850,7 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
|
||||
vform = q ? kFormat2D : kFormat1D;
|
||||
imm = 0;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
if (imm8 & (1 << i)) {
|
||||
if (imm8 & (1ULL << i)) {
|
||||
imm |= (UINT64_C(0xFF) << (8 * i));
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ int HighestSetBitPosition(uint64_t value) {
|
||||
|
||||
|
||||
uint64_t LargestPowerOf2Divisor(uint64_t value) {
|
||||
return value & -value;
|
||||
return value & (-(int64_t)value);
|
||||
}
|
||||
|
||||
|
||||
|
@ -44,7 +44,7 @@ int MaskToBit(uint64_t mask);
|
||||
template <typename T>
|
||||
T ReverseBytes(T value, int block_bytes_log2) {
|
||||
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
|
||||
DCHECK((1U << block_bytes_log2) <= sizeof(value));
|
||||
DCHECK((1ULL << block_bytes_log2) <= sizeof(value));
|
||||
// Split the 64-bit value into an 8-bit array, where b[0] is the least
|
||||
// significant byte, and b[7] is the most significant.
|
||||
uint8_t bytes[8];
|
||||
|
@ -21,7 +21,7 @@
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
#define V8_HOST_ARCH_IA32 1
|
||||
#define V8_HOST_ARCH_32_BIT 1
|
||||
#elif defined(__AARCH64EL__)
|
||||
#elif defined(__AARCH64EL__) || defined(_M_ARM64)
|
||||
#define V8_HOST_ARCH_ARM64 1
|
||||
#define V8_HOST_ARCH_64_BIT 1
|
||||
#elif defined(__ARMEL__)
|
||||
@ -83,7 +83,7 @@
|
||||
#define V8_TARGET_ARCH_X64 1
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
#define V8_TARGET_ARCH_IA32 1
|
||||
#elif defined(__AARCH64EL__)
|
||||
#elif defined(__AARCH64EL__) || defined(_M_ARM64)
|
||||
#define V8_TARGET_ARCH_ARM64 1
|
||||
#elif defined(__ARMEL__)
|
||||
#define V8_TARGET_ARCH_ARM 1
|
||||
|
@ -189,10 +189,19 @@ void StackTrace::InitTrace(const CONTEXT* context_record) {
|
||||
STACKFRAME64 stack_frame;
|
||||
memset(&stack_frame, 0, sizeof(stack_frame));
|
||||
#if defined(_WIN64)
|
||||
#if defined(_M_X64)
|
||||
int machine_type = IMAGE_FILE_MACHINE_AMD64;
|
||||
stack_frame.AddrPC.Offset = context_record->Rip;
|
||||
stack_frame.AddrFrame.Offset = context_record->Rbp;
|
||||
stack_frame.AddrStack.Offset = context_record->Rsp;
|
||||
#elif defined(_M_ARM64)
|
||||
int machine_type = IMAGE_FILE_MACHINE_ARM64;
|
||||
stack_frame.AddrPC.Offset = context_record->Pc;
|
||||
stack_frame.AddrFrame.Offset = context_record->Fp;
|
||||
stack_frame.AddrStack.Offset = context_record->Sp;
|
||||
#else
|
||||
#error Unsupported Arch
|
||||
#endif
|
||||
#else
|
||||
int machine_type = IMAGE_FILE_MACHINE_I386;
|
||||
stack_frame.AddrPC.Offset = context_record->Eip;
|
||||
|
@ -829,6 +829,12 @@ void ThreadTicks::WaitUntilInitializedWin() {
|
||||
::Sleep(10);
|
||||
}
|
||||
|
||||
#ifdef V8_HOST_ARCH_ARM64
|
||||
#define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
|
||||
#else
|
||||
#define ReadCycleCounter() __rdtsc()
|
||||
#endif
|
||||
|
||||
double ThreadTicks::TSCTicksPerSecond() {
|
||||
DCHECK(IsSupported());
|
||||
|
||||
@ -849,12 +855,12 @@ double ThreadTicks::TSCTicksPerSecond() {
|
||||
|
||||
// The first time that this function is called, make an initial reading of the
|
||||
// TSC and the performance counter.
|
||||
static const uint64_t tsc_initial = __rdtsc();
|
||||
static const uint64_t tsc_initial = ReadCycleCounter();
|
||||
static const uint64_t perf_counter_initial = QPCNowRaw();
|
||||
|
||||
// Make a another reading of the TSC and the performance counter every time
|
||||
// that this function is called.
|
||||
uint64_t tsc_now = __rdtsc();
|
||||
uint64_t tsc_now = ReadCycleCounter();
|
||||
uint64_t perf_counter_now = QPCNowRaw();
|
||||
|
||||
// Reset the thread priority.
|
||||
@ -887,6 +893,7 @@ double ThreadTicks::TSCTicksPerSecond() {
|
||||
|
||||
return tsc_ticks_per_second;
|
||||
}
|
||||
#undef ReadCycleCounter
|
||||
#endif // V8_OS_WIN
|
||||
|
||||
} // namespace base
|
||||
|
@ -549,6 +549,26 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
|
||||
// Check if the arguments will overflow the stack.
|
||||
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
|
||||
__ B(le, stack_overflow);
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// Simulate _chkstk to extend stack guard page on Windows ARM64.
|
||||
const int kPageSize = 4096;
|
||||
Label chkstk, chkstk_done;
|
||||
Register probe = temps.AcquireX();
|
||||
|
||||
__ Sub(scratch, sp, Operand(num_args, LSL, kPointerSizeLog2));
|
||||
__ Mov(probe, sp);
|
||||
|
||||
// Loop start of stack probe.
|
||||
__ Bind(&chkstk);
|
||||
__ Sub(probe, probe, kPageSize);
|
||||
__ Cmp(probe, scratch);
|
||||
__ B(lo, &chkstk_done);
|
||||
__ Ldrb(xzr, MemOperand(probe));
|
||||
__ B(&chkstk);
|
||||
|
||||
__ Bind(&chkstk_done);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Input:
|
||||
@ -996,9 +1016,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ Mov(
|
||||
kInterpreterDispatchTableRegister,
|
||||
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
|
||||
#if defined(V8_OS_WIN)
|
||||
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
|
||||
#else
|
||||
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
|
||||
#endif
|
||||
__ Ldr(kJavaScriptCallCodeStartRegister,
|
||||
MemOperand(kInterpreterDispatchTableRegister, x1));
|
||||
__ Call(kJavaScriptCallCodeStartRegister);
|
||||
@ -1232,9 +1258,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Dispatch to the target bytecode.
|
||||
#if defined(V8_OS_WIN)
|
||||
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
|
||||
#else
|
||||
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
|
||||
#endif
|
||||
__ Ldr(kJavaScriptCallCodeStartRegister,
|
||||
MemOperand(kInterpreterDispatchTableRegister, x1));
|
||||
__ Jump(kJavaScriptCallCodeStartRegister);
|
||||
@ -2957,7 +2989,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
// Isolate the mantissa bits, and set the implicit '1'.
|
||||
Register mantissa = scratch2;
|
||||
__ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
|
||||
__ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
|
||||
__ Orr(mantissa, mantissa, 1ULL << HeapNumber::kMantissaBits);
|
||||
|
||||
// Negate the mantissa if necessary.
|
||||
__ Tst(result, kXSignMask);
|
||||
|
@ -1879,7 +1879,15 @@ FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
|
||||
// TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
|
||||
// isn't used before the next safepoint, the GC will try to scan it as a
|
||||
// tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
|
||||
#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
|
||||
// x18 is reserved as platform register on Windows arm64 platform
|
||||
const int kPlatformRegister = 18;
|
||||
if (r != kPlatformRegister) {
|
||||
SetRegister(r, kZapUint32);
|
||||
}
|
||||
#else
|
||||
SetRegister(r, kZapUint32);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Zap all the slots.
|
||||
|
@ -690,6 +690,10 @@ void Sampler::DoSample() {
|
||||
state.pc = reinterpret_cast<void*>(context.Rip);
|
||||
state.sp = reinterpret_cast<void*>(context.Rsp);
|
||||
state.fp = reinterpret_cast<void*>(context.Rbp);
|
||||
#elif V8_HOST_ARCH_ARM64
|
||||
state.pc = reinterpret_cast<void*>(context.Pc);
|
||||
state.sp = reinterpret_cast<void*>(context.Sp);
|
||||
state.fp = reinterpret_cast<void*>(context.Fp);
|
||||
#else
|
||||
state.pc = reinterpret_cast<void*>(context.Eip);
|
||||
state.sp = reinterpret_cast<void*>(context.Esp);
|
||||
|
@ -97,6 +97,10 @@ inline T XorSeqCst(T* p, T value) {
|
||||
#define InterlockedOr32 _InterlockedOr
|
||||
#define InterlockedXor32 _InterlockedXor
|
||||
|
||||
#if defined(V8_HOST_ARCH_ARM64)
|
||||
#define InterlockedExchange8 _InterlockedExchange8
|
||||
#endif
|
||||
|
||||
#define ATOMIC_OPS(type, suffix, vctype) \
|
||||
inline type ExchangeSeqCst(type* p, type value) { \
|
||||
return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
|
||||
@ -160,6 +164,10 @@ inline void StoreSeqCst(T* p, T value) {
|
||||
#undef InterlockedOr32
|
||||
#undef InterlockedXor32
|
||||
|
||||
#if defined(V8_HOST_ARCH_ARM64)
|
||||
#undef InterlockedExchange8
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#error Unsupported platform!
|
||||
|
@ -250,7 +250,7 @@ class LiftoffCompiler {
|
||||
int reg_code = param_loc.AsRegister();
|
||||
RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
|
||||
: kLiftoffAssemblerFpCacheRegs;
|
||||
if (cache_regs & (1 << reg_code)) {
|
||||
if (cache_regs & (1ULL << reg_code)) {
|
||||
LiftoffRegister in_reg = LiftoffRegister::from_code(rc, reg_code);
|
||||
param_regs.set(in_reg);
|
||||
}
|
||||
@ -278,7 +278,7 @@ class LiftoffCompiler {
|
||||
int reg_code = param_loc.AsRegister();
|
||||
RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
|
||||
: kLiftoffAssemblerFpCacheRegs;
|
||||
if (cache_regs & (1 << reg_code)) {
|
||||
if (cache_regs & (1ULL << reg_code)) {
|
||||
// This is a cache register, just use it.
|
||||
in_reg = LiftoffRegister::from_code(rc, reg_code);
|
||||
} else {
|
||||
|
@ -418,7 +418,9 @@ v8_source_set("cctest_sources") {
|
||||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64") {
|
||||
# Disable fmadd/fmsub so that expected results match generated code in
|
||||
# RunFloat64MulAndFloat64Add1 and friends.
|
||||
cflags += [ "-ffp-contract=off" ]
|
||||
if (!is_win) {
|
||||
cflags += [ "-ffp-contract=off" ]
|
||||
}
|
||||
}
|
||||
|
||||
if (is_win) {
|
||||
|
@ -354,9 +354,9 @@ TEST(mov) {
|
||||
|
||||
__ Mov(x0, 0x0123456789ABCDEFL);
|
||||
|
||||
__ movz(x1, 0xABCDL << 16);
|
||||
__ movk(x2, 0xABCDL << 32);
|
||||
__ movn(x3, 0xABCDL << 48);
|
||||
__ movz(x1, 0xABCDLL << 16);
|
||||
__ movk(x2, 0xABCDLL << 32);
|
||||
__ movn(x3, 0xABCDLL << 48);
|
||||
|
||||
__ Mov(x4, 0x0123456789ABCDEFL);
|
||||
__ Mov(x5, x4);
|
||||
@ -6769,7 +6769,11 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
|
||||
// can be handled by this test.
|
||||
CHECK_LE(code_size, range);
|
||||
|
||||
#if defined(_M_ARM64) && !defined(__clang__)
|
||||
auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
|
||||
#else
|
||||
auto PoolSizeAt = [](int pc_offset) {
|
||||
#endif
|
||||
// To determine padding, consider the size of the prologue of the pool,
|
||||
// and the jump around the pool, which we always need.
|
||||
size_t prologue_size = 2 * kInstrSize + kInstrSize;
|
||||
@ -6947,7 +6951,7 @@ TEST(add_sub_wide_imm) {
|
||||
CHECK_EQUAL_32(kWMinInt, w18);
|
||||
CHECK_EQUAL_32(kWMinInt, w19);
|
||||
|
||||
CHECK_EQUAL_64(-0x1234567890ABCDEFUL, x20);
|
||||
CHECK_EQUAL_64(-0x1234567890ABCDEFLL, x20);
|
||||
CHECK_EQUAL_32(-0x12345678, w21);
|
||||
|
||||
TEARDOWN();
|
||||
@ -7724,7 +7728,7 @@ TEST(adc_sbc_shift) {
|
||||
RUN();
|
||||
|
||||
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL, x5);
|
||||
CHECK_EQUAL_64(1L << 60, x6);
|
||||
CHECK_EQUAL_64(1LL << 60, x6);
|
||||
CHECK_EQUAL_64(0xF0123456789ABCDDL, x7);
|
||||
CHECK_EQUAL_64(0x0111111111111110L, x8);
|
||||
CHECK_EQUAL_64(0x1222222222222221L, x9);
|
||||
@ -7735,13 +7739,13 @@ TEST(adc_sbc_shift) {
|
||||
CHECK_EQUAL_32(0x91111110, w13);
|
||||
CHECK_EQUAL_32(0x9A222221, w14);
|
||||
|
||||
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL + 1, x18);
|
||||
CHECK_EQUAL_64((1L << 60) + 1, x19);
|
||||
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x18);
|
||||
CHECK_EQUAL_64((1LL << 60) + 1, x19);
|
||||
CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20);
|
||||
CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
|
||||
CHECK_EQUAL_64(0x1222222222222221L + 1, x22);
|
||||
|
||||
CHECK_EQUAL_32(0xFFFFFFFF + 1, w23);
|
||||
CHECK_EQUAL_32(0xFFFFFFFFULL + 1, w23);
|
||||
CHECK_EQUAL_32((1 << 30) + 1, w24);
|
||||
CHECK_EQUAL_32(0xF89ABCDD + 1, w25);
|
||||
CHECK_EQUAL_32(0x91111110 + 1, w26);
|
||||
@ -12008,19 +12012,19 @@ TEST(register_bit) {
|
||||
// teardown.
|
||||
|
||||
// Simple tests.
|
||||
CHECK(x0.bit() == (1UL << 0));
|
||||
CHECK(x1.bit() == (1UL << 1));
|
||||
CHECK(x10.bit() == (1UL << 10));
|
||||
CHECK(x0.bit() == (1ULL << 0));
|
||||
CHECK(x1.bit() == (1ULL << 1));
|
||||
CHECK(x10.bit() == (1ULL << 10));
|
||||
|
||||
// AAPCS64 definitions.
|
||||
CHECK(fp.bit() == (1UL << kFramePointerRegCode));
|
||||
CHECK(lr.bit() == (1UL << kLinkRegCode));
|
||||
CHECK(fp.bit() == (1ULL << kFramePointerRegCode));
|
||||
CHECK(lr.bit() == (1ULL << kLinkRegCode));
|
||||
|
||||
// Fixed (hardware) definitions.
|
||||
CHECK(xzr.bit() == (1UL << kZeroRegCode));
|
||||
CHECK(xzr.bit() == (1ULL << kZeroRegCode));
|
||||
|
||||
// Internal ABI definitions.
|
||||
CHECK(sp.bit() == (1UL << kSPRegInternalCode));
|
||||
CHECK(sp.bit() == (1ULL << kSPRegInternalCode));
|
||||
CHECK(sp.bit() != xzr.bit());
|
||||
|
||||
// xn.bit() == wn.bit() at all times, for the same n.
|
||||
|
@ -29,6 +29,12 @@
|
||||
#include "src/arm64/decoder-arm64-inl.h"
|
||||
#include "src/arm64/disasm-arm64.h"
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
#define RANDGEN() rand()
|
||||
#else
|
||||
#define RANDGEN() mrand48()
|
||||
#endif
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
@ -37,14 +43,18 @@ TEST(FUZZ_decoder) {
|
||||
// 43 million = ~1% of the instruction space.
|
||||
static const int instruction_count = 43 * 1024 * 1024;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
srand(1);
|
||||
#else
|
||||
uint16_t seed[3] = {1, 2, 3};
|
||||
seed48(seed);
|
||||
#endif
|
||||
|
||||
Decoder<DispatchingDecoderVisitor> decoder;
|
||||
Instruction buffer[kInstrSize];
|
||||
|
||||
for (int i = 0; i < instruction_count; i++) {
|
||||
uint32_t instr = static_cast<uint32_t>(mrand48());
|
||||
uint32_t instr = static_cast<uint32_t>(RANDGEN());
|
||||
buffer->SetInstructionBits(instr);
|
||||
decoder.Decode(buffer);
|
||||
}
|
||||
@ -56,8 +66,12 @@ TEST(FUZZ_disasm) {
|
||||
// 9 million = ~0.2% of the instruction space.
|
||||
static const int instruction_count = 9 * 1024 * 1024;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
srand(42);
|
||||
#else
|
||||
uint16_t seed[3] = {42, 43, 44};
|
||||
seed48(seed);
|
||||
#endif
|
||||
|
||||
Decoder<DispatchingDecoderVisitor> decoder;
|
||||
DisassemblingDecoder disasm;
|
||||
@ -65,7 +79,7 @@ TEST(FUZZ_disasm) {
|
||||
|
||||
decoder.AppendVisitor(&disasm);
|
||||
for (int i = 0; i < instruction_count; i++) {
|
||||
uint32_t instr = static_cast<uint32_t>(mrand48());
|
||||
uint32_t instr = static_cast<uint32_t>(RANDGEN());
|
||||
buffer->SetInstructionBits(instr);
|
||||
decoder.Decode(buffer);
|
||||
}
|
||||
@ -73,3 +87,5 @@ TEST(FUZZ_disasm) {
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#undef RANDGEN
|
||||
|
@ -233,7 +233,7 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
|
||||
RegList list = 0;
|
||||
int i = 0;
|
||||
for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
|
||||
if (((1UL << n) & allowed) != 0) {
|
||||
if (((1ULL << n) & allowed) != 0) {
|
||||
// Only assign allowed registers.
|
||||
if (r) {
|
||||
r[i] = Register::Create(n, reg_size);
|
||||
@ -244,7 +244,7 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
|
||||
if (w) {
|
||||
w[i] = Register::Create(n, kWRegSizeInBits);
|
||||
}
|
||||
list |= (1UL << n);
|
||||
list |= (1ULL << n);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
@ -259,7 +259,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
|
||||
RegList list = 0;
|
||||
int i = 0;
|
||||
for (unsigned n = 0; (n < kNumberOfVRegisters) && (i < reg_count); n++) {
|
||||
if (((1UL << n) & allowed) != 0) {
|
||||
if (((1ULL << n) & allowed) != 0) {
|
||||
// Only assigned allowed registers.
|
||||
if (v) {
|
||||
v[i] = VRegister::Create(n, reg_size);
|
||||
@ -270,7 +270,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
|
||||
if (s) {
|
||||
s[i] = VRegister::Create(n, kSRegSizeInBits);
|
||||
}
|
||||
list |= (1UL << n);
|
||||
list |= (1ULL << n);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
@ -284,7 +284,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
|
||||
void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
|
||||
Register first = NoReg;
|
||||
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
|
||||
if (reg_list & (1UL << i)) {
|
||||
if (reg_list & (1ULL << i)) {
|
||||
Register xn = Register::Create(i, kXRegSizeInBits);
|
||||
// We should never write into sp here.
|
||||
CHECK(!xn.Is(sp));
|
||||
@ -307,7 +307,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
|
||||
void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
|
||||
VRegister first = NoVReg;
|
||||
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
|
||||
if (reg_list & (1UL << i)) {
|
||||
if (reg_list & (1ULL << i)) {
|
||||
VRegister dn = VRegister::Create(i, kDRegSizeInBits);
|
||||
if (!first.IsValid()) {
|
||||
// This is the first register we've hit, so construct the literal.
|
||||
|
@ -1288,7 +1288,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
|
||||
|
||||
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
|
||||
TRACED_FORRANGE(int, bit, 0, 63) {
|
||||
uint64_t mask = 1L << bit;
|
||||
uint64_t mask = 1LL << bit;
|
||||
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
|
||||
RawMachineLabel a, b;
|
||||
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
|
||||
@ -1309,7 +1309,7 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
|
||||
|
||||
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
|
||||
TRACED_FORRANGE(int, bit, 0, 63) {
|
||||
uint64_t mask = 1L << bit;
|
||||
uint64_t mask = 1LL << bit;
|
||||
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
|
||||
RawMachineLabel a, b;
|
||||
m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
|
||||
|
Loading…
Reference in New Issue
Block a user