[arm64] Rename csp to sp

Rename csp to sp and remove support for the stack pointer abstraction and
switching stack pointers.

Bug: v8:6644
Change-Id: I616633aabc1cee9926249fe95ce6c37ed6544fe3
Reviewed-on: https://chromium-review.googlesource.com/870870
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Martyn Capewell <martyn.capewell@arm.com>
Cr-Commit-Position: refs/heads/master@{#50687}
This commit is contained in:
Martyn Capewell 2018-01-17 15:46:46 +00:00 committed by Commit Bot
parent f47c824b8e
commit abe3bcdc3d
20 changed files with 477 additions and 749 deletions

View File

@ -95,7 +95,7 @@ inline void CPURegList::Remove(int code) {
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return csp;
return sp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
@ -105,7 +105,7 @@ inline Register Register::XRegFromCode(unsigned code) {
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wcsp;
return wsp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);

View File

@ -455,8 +455,8 @@ constexpr Register no_reg = NoReg;
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \

View File

@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
__ Str(x1, MemOperand(__ StackPointer(), x5));
__ Poke(x1, Operand(x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@ -314,7 +314,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ EnterExitFrame(
save_doubles(), x10, extra_stack_space,
is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
__ Poke(argv, 1 * kPointerSize);
@ -349,12 +348,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
// csp[...]: Saved doubles, if saved_doubles is true.
// csp[32]: Alignment padding, if necessary.
// csp[24]: Preserved x23 (used for target).
// csp[16]: Preserved x22 (used for argc).
// csp[8]: Preserved x21 (used for argv).
// csp -> csp[0]: Space reserved for the return address.
// sp[...]: Saved doubles, if saved_doubles is true.
// sp[32]: Alignment padding, if necessary.
// sp[24]: Preserved x23 (used for target).
// sp[16]: Preserved x22 (used for argc).
// sp[8]: Preserved x21 (used for argv).
// sp -> sp[0]: Space reserved for the return address.
//
// After a successful call, the exit frame, preserved registers (x21-x23) and
// the arguments (including the receiver) are dropped or popped as
@ -364,8 +363,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// untouched, and the stub either throws an exception by jumping to one of
// the exception_returned label.
DCHECK(csp.Is(__ StackPointer()));
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
__ Mov(x1, argv);
@ -437,7 +434,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Mov(x0, 0); // argc.
@ -454,7 +450,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Register scratch = temps.AcquireX();
__ Mov(scratch, Operand(pending_handler_sp_address));
__ Ldr(scratch, MemOperand(scratch));
__ Mov(csp, scratch);
__ Mov(sp, scratch);
}
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@ -511,7 +507,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
__ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
__ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@ -582,7 +578,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Mov(scratch, __ StackPointer());
__ Mov(scratch, sp);
__ Str(scratch, MemOperand(x11));
}
@ -740,10 +736,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
// Make sure the caller configured the stack pointer (see comment in
// DirectCEntryStub::Generate).
DCHECK(csp.Is(__ StackPointer()));
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
@ -1260,7 +1252,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Prepare arguments.
Register args = x6;
__ Mov(args, masm->StackPointer());
__ Mov(args, sp);
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
@ -1344,7 +1336,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
__ Mov(x0, sp); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;

View File

@ -33,7 +33,7 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, masm->StackPointer(), src_offset);
masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
@ -140,8 +140,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
__ Add(fp_to_sp, __ StackPointer(),
kSavedRegistersAreaSize + (2 * kPointerSize));
__ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@ -222,7 +221,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
__ Mov(__ StackPointer(), scratch);
__ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.

View File

@ -3327,7 +3327,7 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
}
}
if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) {
if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
@ -3339,9 +3339,9 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
AppendToOutput("%c%d", reg_char, reg.code());
}
} else if (reg.Aliases(csp)) {
// Disassemble w31/x31 as stack pointer wcsp/csp.
AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp");
} else if (reg.Aliases(sp)) {
// Disassemble w31/x31 as stack pointer wsp/sp.
AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
} else {
// Disassemble w31/x31 as zero register wzr/xzr.
AppendToOutput("%czr", reg_char);

View File

@ -11,7 +11,7 @@ namespace internal {
static const int kX0DwarfCode = 0;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
static const int kCSpDwarfCode = 31;
static const int kSpDwarfCode = 31;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -8;
@ -33,7 +33,7 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
case kRegCode_x30:
return kLrDwarfCode;
case kSPRegInternalCode:
return kCSpDwarfCode;
return kSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
default:
@ -51,8 +51,8 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
case kCSpDwarfCode:
return "csp"; // This could be zr as well
case kSpDwarfCode:
return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
return nullptr;

View File

@ -258,7 +258,7 @@ class Instruction {
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
// The following instructions use csp or wsp as Rd:
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
@ -272,7 +272,7 @@ class Instruction {
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into csp.
// can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
@ -287,7 +287,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
// The following instructions use csp or wsp as Rn:
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).

View File

@ -91,7 +91,6 @@ static const CounterDescriptor kCounterList[] = {
{"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
@ -238,16 +237,8 @@ void Instrument::VisitPCRelAddressing(Instruction* instr) {
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
@ -470,16 +461,8 @@ void Instrument::VisitAddSubShifted(Instruction* instr) {
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}

View File

@ -1042,58 +1042,6 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK_GE(sp_alignment, 16);
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
}
void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(StackPointer()));
if (!TmpList()->IsEmpty()) {
Sub(csp, StackPointer(), space);
} else {
// TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this means
// that large immediate values of 'space' cannot be handled cleanly. (Only
// 24-bits immediates or values of 'space' that can be encoded in one
// instruction are accepted.) Once we implement our flexible scratch
// register idea, we could greatly simplify this function.
InstructionAccurateScope scope(this);
DCHECK(space.IsImmediate());
// Align to 16 bytes.
uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
DCHECK(is_uint24(imm));
Register source = StackPointer();
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
bic(csp, source, 0xf);
source = csp;
}
if (!is_uint12(imm)) {
int64_t imm_top_12_bits = imm >> 12;
sub(csp, source, imm_top_12_bits << 12);
source = csp;
imm -= imm_top_12_bits << 12;
}
if (imm > 0) {
sub(csp, source, imm);
}
}
AssertStackConsistency();
}
void TurboAssembler::SyncSystemStackPointer() {
DCHECK(emit_debug_code());
DCHECK(!csp.Is(StackPointer()));
{ InstructionAccurateScope scope(this);
mov(csp, StackPointer());
}
AssertStackConsistency();
}
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@ -1249,14 +1197,9 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
if (size == 0) {
return;
}
DCHECK_EQ(size % 16, 0);
if (csp.Is(StackPointer())) {
DCHECK_EQ(size % 16, 0);
} else {
BumpSystemStackPointer(size);
}
Sub(StackPointer(), StackPointer(), size);
Sub(sp, sp, size);
}
void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
@ -1269,13 +1212,9 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (size.IsZero()) {
return;
}
AssertPositiveOrZero(count);
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
Sub(StackPointer(), StackPointer(), size);
Sub(sp, sp, size);
}
@ -1290,11 +1229,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
Sub(StackPointer(), StackPointer(), size);
Sub(sp, sp, size);
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@ -1305,16 +1240,8 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
return;
}
Add(StackPointer(), StackPointer(), size);
if (csp.Is(StackPointer())) {
DCHECK_EQ(size % 16, 0);
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
Add(sp, sp, size);
DCHECK_EQ(size % 16, 0);
}
void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
@ -1329,14 +1256,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
Add(sp, sp, size);
}
void TurboAssembler::DropArguments(const Register& count,
@ -1378,14 +1298,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
Add(sp, sp, size);
}

View File

@ -188,15 +188,14 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
if (rd.Is(csp)) {
if (rd.IsSP()) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
Logical(temp, rn, imm_operand, op);
Mov(csp, temp);
AssertStackConsistency();
Mov(sp, temp);
} else {
Logical(rd, rn, imm_operand, op);
}
@ -294,7 +293,6 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
AssertStackConsistency();
}
}
}
@ -337,7 +335,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// registers is not required to clear the top word of the X register. In
// this case, the instruction is discarded.
//
// If csp is an operand, add #0 is emitted, otherwise, orr #0.
// If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
@ -724,11 +722,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
if (rd.Is(csp)) {
if (rd.Is(sp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
} else if (rn.Is(csp)) {
} else if (rn.Is(sp)) {
mode = kLimitShiftForSP;
}
@ -1105,9 +1103,9 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
PushPreamble(size);
// Reserve room for src0 and push src1.
str(src1, MemOperand(StackPointer(), -size, PreIndex));
str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
@ -1166,9 +1164,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
// Push up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for csp.
// Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@ -1183,9 +1179,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
// Pop up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in
// order to maintain the 16-byte alignment for csp.
// Pop up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@ -1258,23 +1252,23 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
switch (count) {
case 1:
DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
str(src0, MemOperand(sp, -1 * size, PreIndex));
break;
case 2:
DCHECK(src2.IsNone() && src3.IsNone());
stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
break;
case 3:
DCHECK(src3.IsNone());
stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
str(src0, MemOperand(StackPointer(), 2 * size));
stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
str(src0, MemOperand(sp, 2 * size));
break;
case 4:
// Skip over 4 * size, then fill in the gap. This allows four W registers
// to be pushed using csp, whilst maintaining 16-byte alignment for csp
// to be pushed using sp, whilst maintaining 16-byte alignment for sp
// at all times.
stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
stp(src1, src0, MemOperand(StackPointer(), 2 * size));
stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
stp(src1, src0, MemOperand(sp, 2 * size));
break;
default:
UNREACHABLE();
@ -1295,24 +1289,24 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
switch (count) {
case 1:
DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
break;
case 2:
DCHECK(dst2.IsNone() && dst3.IsNone());
ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
break;
case 3:
DCHECK(dst3.IsNone());
ldr(dst2, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
ldr(dst2, MemOperand(sp, 2 * size));
ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
break;
case 4:
// Load the higher addresses first, then load the lower addresses and
// skip the whole block in the second instruction. This allows four W
// registers to be popped using csp, whilst maintaining 16-byte alignment
// for csp at all times.
ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
// registers to be popped using sp, whilst maintaining 16-byte alignment
// for sp at all times.
ldp(dst2, dst3, MemOperand(sp, 2 * size));
ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
break;
default:
UNREACHABLE();
@ -1322,43 +1316,27 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
void TurboAssembler::PushPreamble(Operand total_size) {
if (total_size.IsZero()) return;
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
} else {
// Even if the current stack pointer is not the system stack pointer (csp),
// the system stack pointer will still be modified in order to comply with
// ABI rules about accessing memory below the system stack pointer.
BumpSystemStackPointer(total_size);
// The stack pointer must be aligned to 16 bytes on entry, and the total
// size of the specified registers must also be a multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PopPostamble(Operand total_size) {
if (total_size.IsZero()) return;
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
// The stack pointer must be aligned to 16 bytes on entry, and the total
// size of the specified registers must also be a multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PushPreamble(int count, int size) {
@ -1376,7 +1354,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Str(src, MemOperand(StackPointer(), offset));
Str(src, MemOperand(sp, offset));
}
@ -1388,14 +1366,14 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Ldr(dst, MemOperand(StackPointer(), offset));
Ldr(dst, MemOperand(sp, offset));
}
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
Stp(src1, src2, MemOperand(StackPointer(), offset));
Stp(src1, src2, MemOperand(sp, offset));
}
@ -1404,7 +1382,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
Ldp(dst1, dst2, MemOperand(sp, offset));
}
@ -1412,11 +1390,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@ -1436,11 +1410,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, 2 * kXRegSize, PostIndex);
MemOperand tos(sp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@ -1455,44 +1425,15 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
void TurboAssembler::AssertStackConsistency() {
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
// much code to be generated.
void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
if (csp.Is(StackPointer())) {
// Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
// can't check the alignment of csp without using a scratch register (or
// clobbering the flags), but the processor (or simulator) will abort if
// it is not properly aligned during a load.
ldr(xzr, MemOperand(csp, 0));
}
if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
Label ok;
// Check that csp <= StackPointer(), preserving all registers and NZCV.
sub(StackPointer(), csp, StackPointer());
cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
// Avoid generating AssertStackConsistency checks for the Push in Abort.
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
}
bind(&ok);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
}
}
}
void TurboAssembler::AssertCspAligned() {
if (emit_debug_code() && use_real_aborts()) {
// TODO(titzer): use a real assert for alignment check?
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
ldr(temp, MemOperand(csp));
Mov(temp, sp);
Tst(temp, 15);
Check(eq, AbortReason::kUnexpectedStackPointer);
}
}
@ -1568,11 +1509,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
Add(dst, sp, slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@ -2110,12 +2051,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
Add(src_reg, StackPointer(),
Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, src_reg, kPointerSize);
} else {
Add(src_reg, StackPointer(),
(callee_args_count.immediate() + 1) * kPointerSize);
Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@ -2145,12 +2084,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
Cmp(StackPointer(), src_reg);
Cmp(sp, src_reg);
B(ne, &loop);
// Leave current frame.
Mov(StackPointer(), dst_reg);
AssertStackConsistency();
Mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@ -2402,7 +2340,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@ -2414,21 +2352,20 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Mov(fp, csp);
Mov(fp, sp);
Push(type_reg, padreg);
// csp[3] : lr
// csp[2] : fp
// csp[1] : type
// csp[0] : for alignment
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
// sp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
Mov(type_reg, StackFrame::TypeToMarker(type));
@ -2439,8 +2376,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
Add(fp, StackPointer(),
TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@ -2450,15 +2386,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(csp, fp);
AssertStackConsistency();
Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(StackPointer(), fp);
AssertStackConsistency();
Mov(sp, fp);
Pop(fp, lr);
}
}
@ -2493,7 +2426,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
Mov(fp, StackPointer());
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
@ -2540,13 +2473,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
DCHECK(csp.Is(StackPointer()));
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
Add(scratch, csp, kXRegSize);
Add(scratch, sp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@ -2555,8 +2486,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@ -2582,8 +2511,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
Mov(csp, fp);
AssertStackConsistency();
Mov(sp, fp);
Pop(fp, lr);
}
@ -2752,7 +2680,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
// - x31 (csp) because the system stack pointer doesn't need to be included
// - x31 (sp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
// This function implements the mapping of register code to index into the
@ -3052,7 +2980,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
DCHECK(!kCallerSaved.IncludesAliasOf(sp));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@ -3164,12 +3092,6 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bind(&after_data);
}
// We don't pass any arguments on the stack, but we still need to align the C
// stack pointer to a 16-byte boundary for PCS compliance.
if (!csp.Is(StackPointer())) {
Bic(csp, StackPointer(), 0xF);
}
CallPrintf(arg_count, pcs);
}
@ -3208,14 +3130,6 @@ void MacroAssembler::Printf(const char * format,
CPURegister arg1,
CPURegister arg2,
CPURegister arg3) {
// We can only print sp if it is the current stack pointer.
if (!csp.Is(StackPointer())) {
DCHECK(!csp.Aliases(arg0));
DCHECK(!csp.Aliases(arg1));
DCHECK(!csp.Aliases(arg2));
DCHECK(!csp.Aliases(arg3));
}
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@ -3224,8 +3138,8 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(0);
// Preserve all caller-saved registers as well as NZCV.
// If csp is the stack pointer, PushCPURegList asserts that the size of each
// list is a multiple of 16 bytes.
// PushCPURegList asserts that the size of each list is a multiple of 16
// bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
@ -3241,15 +3155,15 @@ void MacroAssembler::Printf(const char * format,
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
bool arg0_sp = StackPointer().Aliases(arg0);
bool arg1_sp = StackPointer().Aliases(arg1);
bool arg2_sp = StackPointer().Aliases(arg2);
bool arg3_sp = StackPointer().Aliases(arg3);
bool arg0_sp = sp.Aliases(arg0);
bool arg1_sp = sp.Aliases(arg1);
bool arg2_sp = sp.Aliases(arg2);
bool arg3_sp = sp.Aliases(arg3);
if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
// Allocate a register to hold the original stack pointer value, to pass
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
Add(arg_sp, StackPointer(),
Add(arg_sp, sp,
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
@ -3302,7 +3216,7 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
DCHECK(!AreAliased(result, xzr, csp));
DCHECK(!AreAliased(result, xzr, sp));
return result;
}

View File

@ -581,20 +581,6 @@ class TurboAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
// If emit_debug_code() is true, emit a run-time check to ensure that
// StackPointer() does not point below the system stack pointer.
//
// Whilst it is architecturally legal for StackPointer() to point below csp,
// it can be evidence of a potential bug because the ABI forbids accesses
// below csp.
//
// If StackPointer() is the system stack pointer (csp), then csp will be
// dereferenced to cause the processor (or simulator) to abort if it is not
// properly aligned.
//
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
@ -614,9 +600,6 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
// Return the stack pointer.
inline const Register& StackPointer() const { return csp; }
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
void Mvn(const Register& rd, const Operand& operand);
@ -651,8 +634,8 @@ class TurboAssembler : public Assembler {
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
// Emits a runtime assert that the CSP is aligned.
void AssertCspAligned();
// Emits a runtime assert that the stack pointer is aligned.
void AssertSpAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
@ -695,9 +678,8 @@ class TurboAssembler : public Assembler {
// In debug mode, both of these will write invalid data into the claimed or
// dropped space.
//
// If the current stack pointer (according to StackPointer()) is csp, then it
// must be aligned to 16 bytes and the size claimed or dropped must be a
// multiple of 16 bytes.
// The stack pointer must be aligned to 16 bytes and the size claimed or
// dropped must be a multiple of 16 bytes.
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
@ -724,26 +706,6 @@ class TurboAssembler : public Assembler {
// Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()).
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void SyncSystemStackPointer();
// Push the system stack pointer (csp) down to allow the same to be done to
// the current stack pointer (according to StackPointer()). This must be
// called _before_ accessing the memory.
//
// This is necessary when pushing or otherwise adding things to the stack, to
// satisfy the AAPCS64 constraint that the memory below the system stack
// pointer is not accessed. The amount pushed will be increased as necessary
// to ensure csp remains aligned to 16 bytes.
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void BumpSystemStackPointer(const Operand& space);
// Add and sub macros.
inline void Add(const Register& rd, const Register& rn,
const Operand& operand);
@ -778,11 +740,6 @@ class TurboAssembler : public Assembler {
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
// Even if the current stack pointer is not the system stack pointer (csp),
// Push (and derived methods) will still modify the system stack pointer in
// order to comply with ABI rules about accessing memory below the system
// stack pointer.
//
// Other than the registers passed into Pop, the stack pointer and (possibly)
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
@ -1011,17 +968,13 @@ class TurboAssembler : public Assembler {
inline void Clz(const Register& rd, const Register& rn);
// Poke 'src' onto the stack. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
// be 16 byte aligned.
void Poke(const CPURegister& src, const Operand& offset);
// Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
// with 'src2' at a higher address than 'src1'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// with 'src2' at a higher address than 'src1'. The offset is in bytes. The
// stack pointer must be 16 byte aligned.
void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
@ -1257,8 +1210,8 @@ class TurboAssembler : public Assembler {
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack (csp) must be prepared by the caller as for a normal
// AAPCS64 call to 'printf'.
// arguments and stack must be prepared by the caller as for a normal AAPCS64
// call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
@ -1641,17 +1594,13 @@ class MacroAssembler : public TurboAssembler {
};
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// The stack pointer must be aligned to 16 bytes.
void Peek(const CPURegister& dst, const Operand& offset);
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
// higher address than 'dst1'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// higher address than 'dst1'. The offset is in bytes. The stack pointer must
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
@ -1704,10 +1653,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
void PopCalleeSavedRegisters();
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
// current stack pointer.
inline void AlignAndSetCSPForFrame();
// Helpers ------------------------------------------------------------------
static int SafepointRegisterStackIndex(int reg_code);
@ -1940,12 +1885,12 @@ class MacroAssembler : public TurboAssembler {
// Set up a stack frame and registers as follows:
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: SPOffset (new csp)
// fp[-8]: SPOffset (new sp)
// fp[-16]: CodeObject()
// fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
// csp[8]: Memory reserved for the caller if extra_space != 0.
// sp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
// sp -> sp[0]: Space reserved for the return address.
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
@ -1960,8 +1905,6 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
//
// The stack pointer must be csp on entry.
void LeaveExitFrame(bool save_doubles, const Register& scratch,
const Register& scratch2);
@ -2030,11 +1973,6 @@ class MacroAssembler : public TurboAssembler {
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
// At the moment it is only possible to print the value of csp if it is the
// current stack pointer. Otherwise, the MacroAssembler will automatically
// update csp on every push (using BumpSystemStackPointer), so determining its
// value is difficult.
//
// Format placeholders that refer to more than one argument, or to a specific
// argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
@ -2210,7 +2148,7 @@ class InlineSmiCheckInfo {
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
// indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
// indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
//
// The generated patch information can be read using the InlineSMICheckInfo
// class.
@ -2230,8 +2168,8 @@ class InlineSmiCheckInfo {
// Fields in the data encoded by InlineData.
// A width of 5 (Rd_width) for the SMI register preclues the use of csp,
// since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
// A width of 5 (Rd_width) for the SMI register precludes the use of sp,
// since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
// used in a patchable check. The Emit() method checks this.
//
// Note that the total size of the fields is restricted by the underlying

View File

@ -626,16 +626,15 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
const char* Simulator::xreg_names[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
"x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
"x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"cp", "x28", "fp", "lr", "xzr", "csp"};
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
"x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
"x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
const char* Simulator::wreg_names[] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
"w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
"w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
"wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
"wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@ -768,7 +767,7 @@ int Simulator::CodeFromName(const char* name) {
return i;
}
}
if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
return kSPRegInternalCode;
}
return -1;
@ -2996,15 +2995,15 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
bool Simulator::PrintValue(const char* desc) {
if (strcmp(desc, "csp") == 0) {
if (strcmp(desc, "sp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
return true;
} else if (strcmp(desc, "wcsp") == 0) {
} else if (strcmp(desc, "wsp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
return true;
}

View File

@ -552,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
__ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@ -663,7 +663,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ Sub(scratch, masm->StackPointer(), scratch);
__ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
@ -745,7 +745,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Poke the result into the stack.
__ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(__ StackPointer(), scratch);
__ Cmp(sp, scratch);
__ B(lt, &loop);
__ Bind(&done);
@ -1009,7 +1009,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
__ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@ -1060,7 +1060,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Sub(x10, __ StackPointer(), Operand(x11));
__ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@ -1613,7 +1613,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
__ Ldr(w4, UntagSmiMemOperand(__ StackPointer(), 3 * kPointerSize));
__ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@ -1646,7 +1646,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
__ Add(fp, __ StackPointer(), frame_size);
__ Add(fp, sp, frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@ -1682,7 +1682,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
// Restore fp, lr.
__ Mov(__ StackPointer(), fp);
__ Mov(sp, fp);
__ Pop(fp, lr);
// Call builtin.
@ -2090,8 +2090,7 @@ void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
__ Push(x11, padreg);
__ Add(fp, __ StackPointer(),
ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
__ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@ -2101,7 +2100,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Mov(__ StackPointer(), fp);
__ Mov(sp, fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@ -2194,7 +2193,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, masm->StackPointer(), x10);
__ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@ -2467,7 +2466,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, masm->StackPointer(), x10);
__ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@ -2539,8 +2538,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
__ Add(scratch, __ StackPointer(),
Operand(total_argc, LSL, kPointerSizeLog2));
__ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
__ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
@ -2854,7 +2852,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
__ Mov(copy_to, __ StackPointer());
__ Mov(copy_to, sp);
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@ -2918,8 +2916,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
__ Str(scratch1,
MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
__ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");

View File

@ -256,8 +256,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp,
offset.offset());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@ -297,8 +296,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@ -455,7 +453,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(csp, fp);
__ Mov(sp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@ -799,7 +797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), tasm()->StackPointer());
__ mov(i.OutputRegister(), sp);
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
@ -844,7 +842,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
Register base = offset.from_stack_pointer() ? __ StackPointer() : fp;
Register base = offset.from_stack_pointer() ? sp : fp;
__ Add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
@ -1195,7 +1193,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Claim: {
int count = i.InputInt32(0);
DCHECK_EQ(count % 2, 0);
__ AssertCspAligned();
__ AssertSpAligned();
if (count > 0) {
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
@ -2195,7 +2193,6 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Drop(pop_count);
__ Ret();
} else {
DCHECK(csp.Is(__ StackPointer()));
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
@ -2291,7 +2288,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
__ AssertCspAligned();
__ AssertSpAligned();
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
@ -2312,7 +2309,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Prologue();
} else {
__ Push(lr, fp);
__ Mov(fp, __ StackPointer());
__ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@ -2346,7 +2343,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ isolate())));
__ Ldr(scratch, MemOperand(scratch));
__ Add(scratch, scratch, shrink_slots * kPointerSize);
__ Cmp(__ StackPointer(), scratch);
__ Cmp(sp, scratch);
__ B(hs, &done);
}
@ -2356,8 +2353,6 @@ void CodeGenerator::AssembleConstructFrame() {
// runtime call.
__ EnterFrame(StackFrame::WASM_COMPILED);
}
DCHECK(__ StackPointer().Is(csp));
__ AssertStackConsistency();
__ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
@ -2367,7 +2362,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_debug_code) {
__ Brk(0);
}
__ AssertStackConsistency();
__ Bind(&done);
}
@ -2473,7 +2467,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ DropArguments(pop_reg);
}
__ AssertCspAligned();
__ AssertSpAligned();
__ Ret();
}

View File

@ -1692,7 +1692,7 @@ void InstructionSelector::EmitPrepareArguments(
// Bump the stack pointer(s).
if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the csp here only
// TODO(titzer): it would be better to bump the sp here only
// and emit paired stores with increment for non c frames.
Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
}

View File

@ -35,10 +35,9 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ Mov(fp, x1);
__ AssertStackConsistency();
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Mov(masm->StackPointer(), Operand(fp));
__ Mov(sp, fp);
__ Pop(fp, lr); // Frame, Return address.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));

View File

@ -41,7 +41,7 @@ namespace internal {
* - x29/fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - x16/x17 : IP registers, used by assembler. Very volatile.
* - csp : Points to tip of C stack.
* - sp : Points to tip of C stack.
*
* - x0-x7 : Used as a cache to store 32 bit capture registers. These
* registers need to be retained every time a call to C code
@ -57,7 +57,7 @@ namespace internal {
* the code)
*
* - fp[96] isolate Address of the current isolate.
* ^^^ csp when called ^^^
* ^^^ sp when called ^^^
* - fp[88] lr Return from the RegExp code.
* - fp[80] r29 Old frame pointer (CalleeSaved).
* - fp[0..72] r19-r28 Backup of CalleeSaved registers.
@ -77,7 +77,7 @@ namespace internal {
* - ... num_saved_registers_ registers.
* - ...
* - register N + num_registers - 1
* ^^^^^^^^^ csp ^^^^^^^^^
* ^^^^^^^^^ sp ^^^^^^^^^
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
@ -704,9 +704,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// x6: Address stack_base
// x7: int direct_call
// The stack pointer should be csp on entry.
// csp[8]: address of the current isolate
// csp[0]: secondary link/return address used by native call
// sp[8]: address of the current isolate
// sp[0]: secondary link/return address used by native call
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
@ -719,12 +718,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
DCHECK_EQ(11, kCalleeSaved.Count());
registers_to_retain.Combine(lr);
DCHECK(csp.Is(__ StackPointer()));
__ PushCPURegList(registers_to_retain);
__ PushCPURegList(argument_registers);
// Set frame pointer in place.
__ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
__ Add(frame_pointer(), sp, argument_registers.Count() * kPointerSize);
// Initialize callee-saved registers.
__ Mov(start_offset(), w1);
@ -755,7 +753,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
__ Subs(x10, csp, x10);
__ Subs(x10, sp, x10);
// Handle it if the stack pointer is already below the stack limit.
__ B(ls, &stack_limit_hit);
@ -1015,9 +1013,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Bind(&return_w0);
// Set stack pointer back to first register to retain
DCHECK(csp.Is(__ StackPointer()));
__ Mov(csp, fp);
__ AssertStackConsistency();
__ Mov(sp, fp);
// Restore registers.
__ PopCPURegList(registers_to_retain);
@ -1036,7 +1032,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
CallCheckStackGuardState(x10);
// Returning from the regexp code restores the stack (csp <- fp)
// Returning from the regexp code restores the stack (sp <- fp)
// so we don't need to drop the link register from it before exiting.
__ Cbnz(w0, &return_w0);
// Reset the cached registers.
@ -1059,7 +1055,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, 3);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
// Returning from the regexp code restores the stack (csp <- fp)
// Returning from the regexp code restores the stack (sp <- fp)
// so we don't need to drop the link register from it before exiting.
__ Cbz(w0, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@ -1366,14 +1362,13 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
int align_mask = (alignment / kXRegSize) - 1;
int xreg_to_claim = (3 + align_mask) & ~align_mask;
DCHECK(csp.Is(__ StackPointer()));
__ Claim(xreg_to_claim);
// CheckStackGuardState needs the end and start addresses of the input string.
__ Poke(input_end(), 2 * kPointerSize);
__ Add(x5, csp, 2 * kPointerSize);
__ Add(x5, sp, 2 * kPointerSize);
__ Poke(input_start(), kPointerSize);
__ Add(x4, csp, kPointerSize);
__ Add(x4, sp, kPointerSize);
__ Mov(w3, start_offset());
// RegExp code frame pointer.
@ -1384,7 +1379,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
// We need to pass a pointer to the return address as first argument.
// The DirectCEntry stub will place the return address on the stack before
// calling so the stack pointer will point to it.
__ Mov(x0, csp);
__ Mov(x0, sp);
ExternalReference check_stack_guard_state =
ExternalReference::re_check_stack_guard_state(isolate());
@ -1396,7 +1391,6 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Peek(input_start(), kPointerSize);
__ Peek(input_end(), 2 * kPointerSize);
DCHECK(csp.Is(__ StackPointer()));
__ Drop(xreg_to_claim);
// Reload the Code pointer.
@ -1445,8 +1439,7 @@ void RegExpMacroAssemblerARM64::CheckPreemption() {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
DCHECK(csp.Is(__ StackPointer()));
__ Cmp(csp, x10);
__ Cmp(sp, x10);
CallIf(&check_preempt_label_, ls);
}
@ -1557,14 +1550,12 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
DCHECK(csp.Is(__ StackPointer()));
__ Pop(lr, xzr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
DCHECK(csp.Is(__ StackPointer()));
__ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push(xzr, lr);
}

View File

@ -251,37 +251,37 @@ TEST(stack_ops) {
SETUP();
START();
// save csp.
__ Mov(x29, csp);
// save sp.
__ Mov(x29, sp);
// Set the csp to a known value.
// Set the sp to a known value.
__ Mov(x16, 0x1000);
__ Mov(csp, x16);
__ Mov(x0, csp);
__ Mov(sp, x16);
__ Mov(x0, sp);
// Add immediate to the csp, and move the result to a normal register.
__ Add(csp, csp, Operand(0x50));
__ Mov(x1, csp);
// Add immediate to the sp, and move the result to a normal register.
__ Add(sp, sp, Operand(0x50));
__ Mov(x1, sp);
// Add extended to the csp, and move the result to a normal register.
// Add extended to the sp, and move the result to a normal register.
__ Mov(x17, 0xFFF);
__ Add(csp, csp, Operand(x17, SXTB));
__ Mov(x2, csp);
__ Add(sp, sp, Operand(x17, SXTB));
__ Mov(x2, sp);
// Create an csp using a logical instruction, and move to normal register.
__ Orr(csp, xzr, Operand(0x1FFF));
__ Mov(x3, csp);
// Create an sp using a logical instruction, and move to normal register.
__ Orr(sp, xzr, Operand(0x1FFF));
__ Mov(x3, sp);
// Write wcsp using a logical instruction.
__ Orr(wcsp, wzr, Operand(0xFFFFFFF8L));
__ Mov(x4, csp);
// Write wsp using a logical instruction.
__ Orr(wsp, wzr, Operand(0xFFFFFFF8L));
__ Mov(x4, sp);
// Write csp, and read back wcsp.
__ Orr(csp, xzr, Operand(0xFFFFFFF8L));
__ Mov(w5, wcsp);
// Write sp, and read back wsp.
__ Orr(sp, xzr, Operand(0xFFFFFFF8L));
__ Mov(w5, wsp);
// restore csp.
__ Mov(csp, x29);
// restore sp.
__ Mov(sp, x29);
END();
RUN();
@ -839,15 +839,15 @@ TEST(bic) {
__ Bic(x10, x0, Operand(0x1F));
__ Bic(x11, x0, Operand(0x100));
// Test bic into csp when the constant cannot be encoded in the immediate
// Test bic into sp when the constant cannot be encoded in the immediate
// field.
// Use x20 to preserve csp. We check for the result via x21 because the
// test infrastructure requires that csp be restored to its original value.
__ Mov(x20, csp);
// Use x20 to preserve sp. We check for the result via x21 because the
// test infrastructure requires that sp be restored to its original value.
__ Mov(x20, sp);
__ Mov(x0, 0xFFFFFF);
__ Bic(csp, x0, Operand(0xABCDEF));
__ Mov(x21, csp);
__ Mov(csp, x20);
__ Bic(sp, x0, Operand(0xABCDEF));
__ Mov(x21, sp);
__ Mov(sp, x20);
END();
RUN();
@ -7158,12 +7158,12 @@ TEST(preshift_immediates) {
// pre-shifted encodable immediate followed by a post-shift applied to
// the arithmetic or logical operation.
// Save csp.
__ Mov(x29, csp);
// Save sp.
__ Mov(x29, sp);
// Set the registers to known values.
__ Mov(x0, 0x1000);
__ Mov(csp, 0x1000);
__ Mov(sp, 0x1000);
// Arithmetic ops.
__ Add(x1, x0, 0x1F7DE);
@ -7181,21 +7181,21 @@ TEST(preshift_immediates) {
__ Eor(x11, x0, 0x18001);
// Ops using the stack pointer.
__ Add(csp, csp, 0x1F7F0);
__ Mov(x12, csp);
__ Mov(csp, 0x1000);
__ Add(sp, sp, 0x1F7F0);
__ Mov(x12, sp);
__ Mov(sp, 0x1000);
__ Adds(x13, csp, 0x1F7F0);
__ Adds(x13, sp, 0x1F7F0);
__ Orr(csp, x0, 0x1F7F0);
__ Mov(x14, csp);
__ Mov(csp, 0x1000);
__ Orr(sp, x0, 0x1F7F0);
__ Mov(x14, sp);
__ Mov(sp, 0x1000);
__ Add(csp, csp, 0x10100);
__ Mov(x15, csp);
__ Add(sp, sp, 0x10100);
__ Mov(x15, sp);
// Restore csp.
__ Mov(csp, x29);
// Restore sp.
__ Mov(sp, x29);
END();
RUN();
@ -11867,7 +11867,7 @@ TEST(zero_dest) {
START();
// Preserve the system stack pointer, in case we clobber it.
__ Mov(x30, csp);
__ Mov(x30, sp);
// Initialize the other registers used in this test.
uint64_t literal_base = 0x0100001000100101UL;
__ Mov(x0, 0);
@ -11907,12 +11907,12 @@ TEST(zero_dest) {
__ sub(xzr, x7, xzr);
__ sub(xzr, xzr, x7);
// Swap the saved system stack pointer with the real one. If csp was written
// Swap the saved system stack pointer with the real one. If sp was written
// during the test, it will show up in x30. This is done because the test
// framework assumes that csp will be valid at the end of the test.
// framework assumes that sp will be valid at the end of the test.
__ Mov(x29, x30);
__ Mov(x30, csp);
__ Mov(csp, x29);
__ Mov(x30, sp);
__ Mov(sp, x29);
// We used x29 as a scratch register, so reset it to make sure it doesn't
// trigger a test failure.
__ Add(x29, x28, x1);
@ -11934,7 +11934,7 @@ TEST(zero_dest_setflags) {
START();
// Preserve the system stack pointer, in case we clobber it.
__ Mov(x30, csp);
__ Mov(x30, sp);
// Initialize the other registers used in this test.
uint64_t literal_base = 0x0100001000100101UL;
__ Mov(x0, 0);
@ -11972,12 +11972,12 @@ TEST(zero_dest_setflags) {
__ subs(xzr, x3, xzr);
__ subs(xzr, xzr, x3);
// Swap the saved system stack pointer with the real one. If csp was written
// Swap the saved system stack pointer with the real one. If sp was written
// during the test, it will show up in x30. This is done because the test
// framework assumes that csp will be valid at the end of the test.
// framework assumes that sp will be valid at the end of the test.
__ Mov(x29, x30);
__ Mov(x30, csp);
__ Mov(csp, x29);
__ Mov(x30, sp);
__ Mov(sp, x29);
// We used x29 as a scratch register, so reset it to make sure it doesn't
// trigger a test failure.
__ Add(x29, x28, x1);
@ -12008,15 +12008,15 @@ TEST(register_bit) {
CHECK(xzr.bit() == (1UL << kZeroRegCode));
// Internal ABI definitions.
CHECK(csp.bit() == (1UL << kSPRegInternalCode));
CHECK(csp.bit() != xzr.bit());
CHECK(sp.bit() == (1UL << kSPRegInternalCode));
CHECK(sp.bit() != xzr.bit());
// xn.bit() == wn.bit() at all times, for the same n.
CHECK(x0.bit() == w0.bit());
CHECK(x1.bit() == w1.bit());
CHECK(x10.bit() == w10.bit());
CHECK(xzr.bit() == wzr.bit());
CHECK(csp.bit() == wcsp.bit());
CHECK(sp.bit() == wsp.bit());
}
@ -12478,7 +12478,6 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
uint64_t literal_base = 0x0100001000100101UL;
{
CHECK(__ StackPointer().Is(csp));
int i;
// Initialize the registers, using X registers to load the literal.
@ -12637,8 +12636,6 @@ static void PushPopMixedMethodsHelper(int reg_size) {
START();
{
CHECK(__ StackPointer().Is(csp));
__ Mov(x[3], literal_base * 3);
__ Mov(x[2], literal_base * 2);
__ Mov(x[1], literal_base * 1);
@ -12681,15 +12678,11 @@ TEST(push_pop_mixed_methods_64) {
PushPopMixedMethodsHelper(kXRegSizeInBits);
}
TEST(push_pop_csp) {
TEST(push_pop) {
INIT_V8();
SETUP();
START();
CHECK(csp.Is(__ StackPointer()));
__ Mov(x3, 0x3333333333333333UL);
__ Mov(x2, 0x2222222222222222UL);
__ Mov(x1, 0x1111111111111111UL);
@ -13863,8 +13856,8 @@ TEST(isvalid) {
CHECK(xzr.IsValid());
CHECK(wzr.IsValid());
CHECK(csp.IsValid());
CHECK(wcsp.IsValid());
CHECK(sp.IsValid());
CHECK(wsp.IsValid());
CHECK(d0.IsValid());
CHECK(s0.IsValid());
@ -13875,14 +13868,14 @@ TEST(isvalid) {
CHECK(w0.IsRegister());
CHECK(xzr.IsRegister());
CHECK(wzr.IsRegister());
CHECK(csp.IsRegister());
CHECK(wcsp.IsRegister());
CHECK(sp.IsRegister());
CHECK(wsp.IsRegister());
CHECK(!x0.IsVRegister());
CHECK(!w0.IsVRegister());
CHECK(!xzr.IsVRegister());
CHECK(!wzr.IsVRegister());
CHECK(!csp.IsVRegister());
CHECK(!wcsp.IsVRegister());
CHECK(!sp.IsVRegister());
CHECK(!wsp.IsVRegister());
CHECK(d0.IsVRegister());
CHECK(s0.IsVRegister());
@ -13898,8 +13891,8 @@ TEST(isvalid) {
CHECK(static_cast<CPURegister>(xzr).IsValid());
CHECK(static_cast<CPURegister>(wzr).IsValid());
CHECK(static_cast<CPURegister>(csp).IsValid());
CHECK(static_cast<CPURegister>(wcsp).IsValid());
CHECK(static_cast<CPURegister>(sp).IsValid());
CHECK(static_cast<CPURegister>(wsp).IsValid());
CHECK(static_cast<CPURegister>(d0).IsValid());
CHECK(static_cast<CPURegister>(s0).IsValid());
@ -13910,14 +13903,14 @@ TEST(isvalid) {
CHECK(static_cast<CPURegister>(w0).IsRegister());
CHECK(static_cast<CPURegister>(xzr).IsRegister());
CHECK(static_cast<CPURegister>(wzr).IsRegister());
CHECK(static_cast<CPURegister>(csp).IsRegister());
CHECK(static_cast<CPURegister>(wcsp).IsRegister());
CHECK(static_cast<CPURegister>(sp).IsRegister());
CHECK(static_cast<CPURegister>(wsp).IsRegister());
CHECK(!static_cast<CPURegister>(x0).IsVRegister());
CHECK(!static_cast<CPURegister>(w0).IsVRegister());
CHECK(!static_cast<CPURegister>(xzr).IsVRegister());
CHECK(!static_cast<CPURegister>(wzr).IsVRegister());
CHECK(!static_cast<CPURegister>(csp).IsVRegister());
CHECK(!static_cast<CPURegister>(wcsp).IsVRegister());
CHECK(!static_cast<CPURegister>(sp).IsVRegister());
CHECK(!static_cast<CPURegister>(wsp).IsVRegister());
CHECK(static_cast<CPURegister>(d0).IsVRegister());
CHECK(static_cast<CPURegister>(s0).IsVRegister());
@ -13995,11 +13988,11 @@ TEST(cpureglist_utils_x) {
CHECK(!test.IncludesAliasOf(x4));
CHECK(!test.IncludesAliasOf(x30));
CHECK(!test.IncludesAliasOf(xzr));
CHECK(!test.IncludesAliasOf(csp));
CHECK(!test.IncludesAliasOf(sp));
CHECK(!test.IncludesAliasOf(w4));
CHECK(!test.IncludesAliasOf(w30));
CHECK(!test.IncludesAliasOf(wzr));
CHECK(!test.IncludesAliasOf(wcsp));
CHECK(!test.IncludesAliasOf(wsp));
CHECK(!test.IncludesAliasOf(d0));
CHECK(!test.IncludesAliasOf(d1));
@ -14059,13 +14052,13 @@ TEST(cpureglist_utils_w) {
CHECK(!test.IncludesAliasOf(x14));
CHECK(!test.IncludesAliasOf(x30));
CHECK(!test.IncludesAliasOf(xzr));
CHECK(!test.IncludesAliasOf(csp));
CHECK(!test.IncludesAliasOf(sp));
CHECK(!test.IncludesAliasOf(w0));
CHECK(!test.IncludesAliasOf(w9));
CHECK(!test.IncludesAliasOf(w14));
CHECK(!test.IncludesAliasOf(w30));
CHECK(!test.IncludesAliasOf(wzr));
CHECK(!test.IncludesAliasOf(wcsp));
CHECK(!test.IncludesAliasOf(wsp));
CHECK(!test.IncludesAliasOf(d10));
CHECK(!test.IncludesAliasOf(d11));
@ -14140,8 +14133,8 @@ TEST(cpureglist_utils_d) {
CHECK(!test.IncludesAliasOf(xzr));
CHECK(!test.IncludesAliasOf(wzr));
CHECK(!test.IncludesAliasOf(csp));
CHECK(!test.IncludesAliasOf(wcsp));
CHECK(!test.IncludesAliasOf(sp));
CHECK(!test.IncludesAliasOf(wsp));
CHECK(!test.IsEmpty());
@ -14238,7 +14231,7 @@ TEST(printf) {
// Initialize x29 to the value of the stack pointer. We will use x29 as a
// temporary stack pointer later, and initializing it in this way allows the
// RegisterDump check to pass.
__ Mov(x29, __ StackPointer());
__ Mov(x29, sp);
// Test simple integer arguments.
__ Mov(x0, 1234);
@ -14288,10 +14281,8 @@ TEST(printf) {
__ Printf("%g\n", d10);
__ Printf("%%%%%s%%%c%%\n", x2, w13);
// Print the stack pointer (csp).
CHECK(csp.Is(__ StackPointer()));
__ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
__ StackPointer(), __ StackPointer().W());
// Print the stack pointer.
__ Printf("StackPointer(sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n", sp, wsp);
// Test with three arguments.
__ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);

View File

@ -121,8 +121,8 @@ TEST_(bootstrap) {
// Instructions generated by C compiler, disassembled by objdump, and
// reformatted to suit our disassembly style.
COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [csp, #-96]!");
COMPARE(dci(0x910003fd), "mov fp, csp");
COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [sp, #-96]!");
COMPARE(dci(0x910003fd), "mov fp, sp");
COMPARE(dci(0x9100e3a0), "add x0, fp, #0x38 (56)");
COMPARE(dci(0xb900001f), "str wzr, [x0]");
COMPARE(dci(0x528000e1), "movz w1, #0x7");
@ -140,7 +140,7 @@ TEST_(bootstrap) {
COMPARE(dci(0x2a0103e0), "mov w0, w1");
COMPARE(dci(0x93407c00), "sxtw x0, w0");
COMPARE(dci(0x2a000020), "orr w0, w1, w0");
COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [csp], #96");
COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [sp], #96");
CLEANUP();
}
@ -160,12 +160,12 @@ TEST_(mov_mvn) {
COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
COMPARE(Mov(x16, Operand(x20, SXTW, 3)), "sbfiz x16, x20, #3, #32");
COMPARE(Mov(x0, csp), "mov x0, csp");
COMPARE(Mov(w0, wcsp), "mov w0, wcsp");
COMPARE(Mov(x0, sp), "mov x0, sp");
COMPARE(Mov(w0, wsp), "mov w0, wsp");
COMPARE(Mov(x0, xzr), "mov x0, xzr");
COMPARE(Mov(w0, wzr), "mov w0, wzr");
COMPARE(mov(x0, csp), "mov x0, csp");
COMPARE(mov(w0, wcsp), "mov w0, wcsp");
COMPARE(mov(x0, sp), "mov x0, sp");
COMPARE(mov(w0, wsp), "mov w0, wsp");
COMPARE(mov(x0, xzr), "mov x0, xzr");
COMPARE(mov(w0, wzr), "mov w0, wzr");
@ -290,14 +290,14 @@ TEST_(add_immediate) {
"adds x16, x17, #0xaa000 (696320)");
COMPARE(cmn(w18, Operand(0xff)), "cmn w18, #0xff (255)");
COMPARE(cmn(x19, Operand(0xff000)), "cmn x19, #0xff000 (1044480)");
COMPARE(add(w0, wcsp, Operand(0)), "mov w0, wcsp");
COMPARE(add(csp, x0, Operand(0)), "mov csp, x0");
COMPARE(add(w0, wsp, Operand(0)), "mov w0, wsp");
COMPARE(add(sp, x0, Operand(0)), "mov sp, x0");
COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
COMPARE(add(w1, wsp, Operand(8)), "add w1, wsp, #0x8 (8)");
COMPARE(add(x2, sp, Operand(16)), "add x2, sp, #0x10 (16)");
COMPARE(add(wsp, wsp, Operand(42)), "add wsp, wsp, #0x2a (42)");
COMPARE(cmn(sp, Operand(24)), "cmn sp, #0x18 (24)");
COMPARE(adds(wzr, wsp, Operand(9)), "cmn wsp, #0x9 (9)");
CLEANUP();
}
@ -321,11 +321,11 @@ TEST_(sub_immediate) {
COMPARE(cmp(w18, Operand(0xff)), "cmp w18, #0xff (255)");
COMPARE(cmp(x19, Operand(0xff000)), "cmp x19, #0xff000 (1044480)");
COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
COMPARE(add(w1, wsp, Operand(8)), "add w1, wsp, #0x8 (8)");
COMPARE(add(x2, sp, Operand(16)), "add x2, sp, #0x10 (16)");
COMPARE(add(wsp, wsp, Operand(42)), "add wsp, wsp, #0x2a (42)");
COMPARE(cmn(sp, Operand(24)), "cmn sp, #0x18 (24)");
COMPARE(adds(wzr, wsp, Operand(9)), "cmn wsp, #0x9 (9)");
CLEANUP();
}
@ -345,12 +345,12 @@ TEST_(add_shifted) {
COMPARE(cmn(w24, Operand(w25)), "cmn w24, w25");
COMPARE(cmn(x26, Operand(cp, LSL, 63)), "cmn x26, cp, lsl #63");
COMPARE(add(x0, csp, Operand(x1)), "add x0, csp, x1");
COMPARE(add(w2, wcsp, Operand(w3)), "add w2, wcsp, w3");
COMPARE(add(x4, csp, Operand(x5, LSL, 1)), "add x4, csp, x5, lsl #1");
COMPARE(add(x0, sp, Operand(x1)), "add x0, sp, x1");
COMPARE(add(w2, wsp, Operand(w3)), "add w2, wsp, w3");
COMPARE(add(x4, sp, Operand(x5, LSL, 1)), "add x4, sp, x5, lsl #1");
COMPARE(add(x4, xzr, Operand(x5, LSL, 1)), "add x4, xzr, x5, lsl #1");
COMPARE(add(w6, wcsp, Operand(w7, LSL, 3)), "add w6, wcsp, w7, lsl #3");
COMPARE(adds(xzr, csp, Operand(x8, LSL, 4)), "cmn csp, x8, lsl #4");
COMPARE(add(w6, wsp, Operand(w7, LSL, 3)), "add w6, wsp, w7, lsl #3");
COMPARE(adds(xzr, sp, Operand(x8, LSL, 4)), "cmn sp, x8, lsl #4");
COMPARE(adds(xzr, xzr, Operand(x8, LSL, 5)), "cmn xzr, x8, lsl #5");
CLEANUP();
@ -375,12 +375,12 @@ TEST_(sub_shifted) {
COMPARE(negs(w1, Operand(w2)), "negs w1, w2");
COMPARE(negs(x3, Operand(x4, ASR, 61)), "negs x3, x4, asr #61");
COMPARE(sub(x0, csp, Operand(x1)), "sub x0, csp, x1");
COMPARE(sub(w2, wcsp, Operand(w3)), "sub w2, wcsp, w3");
COMPARE(sub(x4, csp, Operand(x5, LSL, 1)), "sub x4, csp, x5, lsl #1");
COMPARE(sub(x0, sp, Operand(x1)), "sub x0, sp, x1");
COMPARE(sub(w2, wsp, Operand(w3)), "sub w2, wsp, w3");
COMPARE(sub(x4, sp, Operand(x5, LSL, 1)), "sub x4, sp, x5, lsl #1");
COMPARE(sub(x4, xzr, Operand(x5, LSL, 1)), "neg x4, x5, lsl #1");
COMPARE(sub(w6, wcsp, Operand(w7, LSL, 3)), "sub w6, wcsp, w7, lsl #3");
COMPARE(subs(xzr, csp, Operand(x8, LSL, 4)), "cmp csp, x8, lsl #4");
COMPARE(sub(w6, wsp, Operand(w7, LSL, 3)), "sub w6, wsp, w7, lsl #3");
COMPARE(subs(xzr, sp, Operand(x8, LSL, 4)), "cmp sp, x8, lsl #4");
COMPARE(subs(xzr, xzr, Operand(x8, LSL, 5)), "cmp xzr, x8, lsl #5");
CLEANUP();
@ -403,11 +403,11 @@ TEST_(add_extended) {
COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
COMPARE(add(w0, wcsp, Operand(w1, UXTB)), "add w0, wcsp, w1, uxtb");
COMPARE(add(x2, csp, Operand(x3, UXTH, 1)), "add x2, csp, w3, uxth #1");
COMPARE(add(wcsp, wcsp, Operand(w4, UXTW, 2)), "add wcsp, wcsp, w4, lsl #2");
COMPARE(cmn(csp, Operand(xzr, UXTX, 3)), "cmn csp, xzr, lsl #3");
COMPARE(cmn(csp, Operand(xzr, LSL, 4)), "cmn csp, xzr, lsl #4");
COMPARE(add(w0, wsp, Operand(w1, UXTB)), "add w0, wsp, w1, uxtb");
COMPARE(add(x2, sp, Operand(x3, UXTH, 1)), "add x2, sp, w3, uxth #1");
COMPARE(add(wsp, wsp, Operand(w4, UXTW, 2)), "add wsp, wsp, w4, lsl #2");
COMPARE(cmn(sp, Operand(xzr, UXTX, 3)), "cmn sp, xzr, lsl #3");
COMPARE(cmn(sp, Operand(xzr, LSL, 4)), "cmn sp, xzr, lsl #4");
CLEANUP();
}
@ -429,11 +429,11 @@ TEST_(sub_extended) {
COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
COMPARE(sub(w0, wcsp, Operand(w1, UXTB)), "sub w0, wcsp, w1, uxtb");
COMPARE(sub(x2, csp, Operand(x3, UXTH, 1)), "sub x2, csp, w3, uxth #1");
COMPARE(sub(wcsp, wcsp, Operand(w4, UXTW, 2)), "sub wcsp, wcsp, w4, lsl #2");
COMPARE(cmp(csp, Operand(xzr, UXTX, 3)), "cmp csp, xzr, lsl #3");
COMPARE(cmp(csp, Operand(xzr, LSL, 4)), "cmp csp, xzr, lsl #4");
COMPARE(sub(w0, wsp, Operand(w1, UXTB)), "sub w0, wsp, w1, uxtb");
COMPARE(sub(x2, sp, Operand(x3, UXTH, 1)), "sub x2, sp, w3, uxth #1");
COMPARE(sub(wsp, wsp, Operand(w4, UXTW, 2)), "sub wsp, wsp, w4, lsl #2");
COMPARE(cmp(sp, Operand(xzr, UXTX, 3)), "cmp sp, xzr, lsl #3");
COMPARE(cmp(sp, Operand(xzr, LSL, 4)), "cmp sp, xzr, lsl #4");
CLEANUP();
}
@ -684,10 +684,10 @@ TEST_(logical_immediate) {
"ands fp, x0, #0x100000000");
// Test stack pointer.
COMPARE(and_(wcsp, wzr, Operand(7)), "and wcsp, wzr, #0x7");
COMPARE(and_(wsp, wzr, Operand(7)), "and wsp, wzr, #0x7");
COMPARE(ands(xzr, xzr, Operand(7)), "tst xzr, #0x7");
COMPARE(orr(csp, xzr, Operand(15)), "orr csp, xzr, #0xf");
COMPARE(eor(wcsp, w0, Operand(31)), "eor wcsp, w0, #0x1f");
COMPARE(orr(sp, xzr, Operand(15)), "orr sp, xzr, #0xf");
COMPARE(eor(wsp, w0, Operand(31)), "eor wsp, w0, #0x1f");
// Test move aliases.
COMPARE(orr(w0, wzr, Operand(0x00000780)), "orr w0, wzr, #0x780");
@ -1089,11 +1089,11 @@ TEST(load_store_v_offset) {
COMPARE(str(q31, MemOperand(x1, 16)), "str q31, [x1, #16]");
COMPARE(str(q0, MemOperand(x3, 65520)), "str q0, [x3, #65520]");
COMPARE(ldr(s24, MemOperand(csp)), "ldr s24, [csp]");
COMPARE(ldr(d25, MemOperand(csp, 8)), "ldr d25, [csp, #8]");
COMPARE(ldr(b26, MemOperand(csp, 1)), "ldr b26, [csp, #1]");
COMPARE(ldr(h27, MemOperand(csp, 2)), "ldr h27, [csp, #2]");
COMPARE(ldr(q28, MemOperand(csp, 16)), "ldr q28, [csp, #16]");
COMPARE(ldr(s24, MemOperand(sp)), "ldr s24, [sp]");
COMPARE(ldr(d25, MemOperand(sp, 8)), "ldr d25, [sp, #8]");
COMPARE(ldr(b26, MemOperand(sp, 1)), "ldr b26, [sp, #1]");
COMPARE(ldr(h27, MemOperand(sp, 2)), "ldr h27, [sp, #2]");
COMPARE(ldr(q28, MemOperand(sp, 16)), "ldr q28, [sp, #16]");
CLEANUP();
}
@ -1135,11 +1135,11 @@ TEST(load_store_v_pre) {
COMPARE(str(q31, MemOperand(x3, 255, PreIndex)), "str q31, [x3, #255]!");
COMPARE(str(q0, MemOperand(x5, -256, PreIndex)), "str q0, [x5, #-256]!");
COMPARE(str(b24, MemOperand(csp, 1, PreIndex)), "str b24, [csp, #1]!");
COMPARE(str(h25, MemOperand(csp, -2, PreIndex)), "str h25, [csp, #-2]!");
COMPARE(str(s26, MemOperand(csp, 4, PreIndex)), "str s26, [csp, #4]!");
COMPARE(str(d27, MemOperand(csp, -8, PreIndex)), "str d27, [csp, #-8]!");
COMPARE(str(q28, MemOperand(csp, 16, PreIndex)), "str q28, [csp, #16]!");
COMPARE(str(b24, MemOperand(sp, 1, PreIndex)), "str b24, [sp, #1]!");
COMPARE(str(h25, MemOperand(sp, -2, PreIndex)), "str h25, [sp, #-2]!");
COMPARE(str(s26, MemOperand(sp, 4, PreIndex)), "str s26, [sp, #4]!");
COMPARE(str(d27, MemOperand(sp, -8, PreIndex)), "str d27, [sp, #-8]!");
COMPARE(str(q28, MemOperand(sp, 16, PreIndex)), "str q28, [sp, #16]!");
CLEANUP();
}
@ -1181,11 +1181,11 @@ TEST(load_store_v_post) {
COMPARE(str(q31, MemOperand(x3, 255, PostIndex)), "str q31, [x3], #255");
COMPARE(str(q0, MemOperand(x5, -256, PostIndex)), "str q0, [x5], #-256");
COMPARE(ldr(b24, MemOperand(csp, -1, PreIndex)), "ldr b24, [csp, #-1]!");
COMPARE(ldr(h25, MemOperand(csp, 2, PreIndex)), "ldr h25, [csp, #2]!");
COMPARE(ldr(s26, MemOperand(csp, -4, PreIndex)), "ldr s26, [csp, #-4]!");
COMPARE(ldr(d27, MemOperand(csp, 8, PreIndex)), "ldr d27, [csp, #8]!");
COMPARE(ldr(q28, MemOperand(csp, -16, PreIndex)), "ldr q28, [csp, #-16]!");
COMPARE(ldr(b24, MemOperand(sp, -1, PreIndex)), "ldr b24, [sp, #-1]!");
COMPARE(ldr(h25, MemOperand(sp, 2, PreIndex)), "ldr h25, [sp, #2]!");
COMPARE(ldr(s26, MemOperand(sp, -4, PreIndex)), "ldr s26, [sp, #-4]!");
COMPARE(ldr(d27, MemOperand(sp, 8, PreIndex)), "ldr d27, [sp, #8]!");
COMPARE(ldr(q28, MemOperand(sp, -16, PreIndex)), "ldr q28, [sp, #-16]!");
CLEANUP();
}
@ -1198,88 +1198,88 @@ TEST(load_store_v_regoffset) {
COMPARE(ldr(b2, MemOperand(x3, w4, SXTW)), "ldr b2, [x3, w4, sxtw]");
// We can't assemble this instruction, but we check it disassembles correctly.
COMPARE(dci(0x3c657883), "ldr b3, [x4, x5, lsl #0]");
COMPARE(ldr(b30, MemOperand(csp, xzr)), "ldr b30, [csp, xzr]");
COMPARE(ldr(b31, MemOperand(csp, wzr, UXTW)), "ldr b31, [csp, wzr, uxtw]");
COMPARE(ldr(b30, MemOperand(sp, xzr)), "ldr b30, [sp, xzr]");
COMPARE(ldr(b31, MemOperand(sp, wzr, UXTW)), "ldr b31, [sp, wzr, uxtw]");
COMPARE(ldr(h0, MemOperand(x1, x2)), "ldr h0, [x1, x2]");
COMPARE(ldr(h1, MemOperand(x2, w3, UXTW)), "ldr h1, [x2, w3, uxtw]");
COMPARE(ldr(h2, MemOperand(x3, w4, SXTW)), "ldr h2, [x3, w4, sxtw]");
COMPARE(ldr(h3, MemOperand(x4, w5, UXTW, 1)), "ldr h3, [x4, w5, uxtw #1]");
COMPARE(ldr(h4, MemOperand(x5, w5, SXTW, 1)), "ldr h4, [x5, w5, sxtw #1]");
COMPARE(ldr(h30, MemOperand(csp, xzr)), "ldr h30, [csp, xzr]");
COMPARE(ldr(h31, MemOperand(csp, wzr, SXTW, 1)),
"ldr h31, [csp, wzr, sxtw #1]");
COMPARE(ldr(h30, MemOperand(sp, xzr)), "ldr h30, [sp, xzr]");
COMPARE(ldr(h31, MemOperand(sp, wzr, SXTW, 1)),
"ldr h31, [sp, wzr, sxtw #1]");
COMPARE(ldr(s0, MemOperand(x1, x2)), "ldr s0, [x1, x2]");
COMPARE(ldr(s1, MemOperand(x2, w3, UXTW)), "ldr s1, [x2, w3, uxtw]");
COMPARE(ldr(s2, MemOperand(x3, w4, SXTW)), "ldr s2, [x3, w4, sxtw]");
COMPARE(ldr(s3, MemOperand(x4, w5, UXTW, 2)), "ldr s3, [x4, w5, uxtw #2]");
COMPARE(ldr(s4, MemOperand(x5, w5, SXTW, 2)), "ldr s4, [x5, w5, sxtw #2]");
COMPARE(ldr(s30, MemOperand(csp, xzr)), "ldr s30, [csp, xzr]");
COMPARE(ldr(s31, MemOperand(csp, wzr, SXTW, 2)),
"ldr s31, [csp, wzr, sxtw #2]");
COMPARE(ldr(s30, MemOperand(sp, xzr)), "ldr s30, [sp, xzr]");
COMPARE(ldr(s31, MemOperand(sp, wzr, SXTW, 2)),
"ldr s31, [sp, wzr, sxtw #2]");
COMPARE(ldr(d0, MemOperand(x1, x2)), "ldr d0, [x1, x2]");
COMPARE(ldr(d1, MemOperand(x2, w3, UXTW)), "ldr d1, [x2, w3, uxtw]");
COMPARE(ldr(d2, MemOperand(x3, w4, SXTW)), "ldr d2, [x3, w4, sxtw]");
COMPARE(ldr(d3, MemOperand(x4, w5, UXTW, 3)), "ldr d3, [x4, w5, uxtw #3]");
COMPARE(ldr(d4, MemOperand(x5, w5, SXTW, 3)), "ldr d4, [x5, w5, sxtw #3]");
COMPARE(ldr(d30, MemOperand(csp, xzr)), "ldr d30, [csp, xzr]");
COMPARE(ldr(d31, MemOperand(csp, wzr, SXTW, 3)),
"ldr d31, [csp, wzr, sxtw #3]");
COMPARE(ldr(d30, MemOperand(sp, xzr)), "ldr d30, [sp, xzr]");
COMPARE(ldr(d31, MemOperand(sp, wzr, SXTW, 3)),
"ldr d31, [sp, wzr, sxtw #3]");
COMPARE(ldr(q0, MemOperand(x1, x2)), "ldr q0, [x1, x2]");
COMPARE(ldr(q1, MemOperand(x2, w3, UXTW)), "ldr q1, [x2, w3, uxtw]");
COMPARE(ldr(q2, MemOperand(x3, w4, SXTW)), "ldr q2, [x3, w4, sxtw]");
COMPARE(ldr(q3, MemOperand(x4, w5, UXTW, 4)), "ldr q3, [x4, w5, uxtw #4]");
COMPARE(ldr(q4, MemOperand(x5, w5, SXTW, 4)), "ldr q4, [x5, w5, sxtw #4]");
COMPARE(ldr(q30, MemOperand(csp, xzr)), "ldr q30, [csp, xzr]");
COMPARE(ldr(q31, MemOperand(csp, wzr, SXTW, 4)),
"ldr q31, [csp, wzr, sxtw #4]");
COMPARE(ldr(q30, MemOperand(sp, xzr)), "ldr q30, [sp, xzr]");
COMPARE(ldr(q31, MemOperand(sp, wzr, SXTW, 4)),
"ldr q31, [sp, wzr, sxtw #4]");
COMPARE(str(b0, MemOperand(x1, x2)), "str b0, [x1, x2]");
COMPARE(str(b1, MemOperand(x2, w3, UXTW)), "str b1, [x2, w3, uxtw]");
COMPARE(str(b2, MemOperand(x3, w4, SXTW)), "str b2, [x3, w4, sxtw]");
// We can't assemble this instruction, but we check it disassembles correctly.
COMPARE(dci(0x3c257883), "str b3, [x4, x5, lsl #0]");
COMPARE(str(b30, MemOperand(csp, xzr)), "str b30, [csp, xzr]");
COMPARE(str(b31, MemOperand(csp, wzr, UXTW)), "str b31, [csp, wzr, uxtw]");
COMPARE(str(b30, MemOperand(sp, xzr)), "str b30, [sp, xzr]");
COMPARE(str(b31, MemOperand(sp, wzr, UXTW)), "str b31, [sp, wzr, uxtw]");
COMPARE(str(h0, MemOperand(x1, x2)), "str h0, [x1, x2]");
COMPARE(str(h1, MemOperand(x2, w3, UXTW)), "str h1, [x2, w3, uxtw]");
COMPARE(str(h2, MemOperand(x3, w4, SXTW)), "str h2, [x3, w4, sxtw]");
COMPARE(str(h3, MemOperand(x4, w5, UXTW, 1)), "str h3, [x4, w5, uxtw #1]");
COMPARE(str(h4, MemOperand(x5, w5, SXTW, 1)), "str h4, [x5, w5, sxtw #1]");
COMPARE(str(h30, MemOperand(csp, xzr)), "str h30, [csp, xzr]");
COMPARE(str(h31, MemOperand(csp, wzr, SXTW, 1)),
"str h31, [csp, wzr, sxtw #1]");
COMPARE(str(h30, MemOperand(sp, xzr)), "str h30, [sp, xzr]");
COMPARE(str(h31, MemOperand(sp, wzr, SXTW, 1)),
"str h31, [sp, wzr, sxtw #1]");
COMPARE(str(s0, MemOperand(x1, x2)), "str s0, [x1, x2]");
COMPARE(str(s1, MemOperand(x2, w3, UXTW)), "str s1, [x2, w3, uxtw]");
COMPARE(str(s2, MemOperand(x3, w4, SXTW)), "str s2, [x3, w4, sxtw]");
COMPARE(str(s3, MemOperand(x4, w5, UXTW, 2)), "str s3, [x4, w5, uxtw #2]");
COMPARE(str(s4, MemOperand(x5, w5, SXTW, 2)), "str s4, [x5, w5, sxtw #2]");
COMPARE(str(s30, MemOperand(csp, xzr)), "str s30, [csp, xzr]");
COMPARE(str(s31, MemOperand(csp, wzr, SXTW, 2)),
"str s31, [csp, wzr, sxtw #2]");
COMPARE(str(s30, MemOperand(sp, xzr)), "str s30, [sp, xzr]");
COMPARE(str(s31, MemOperand(sp, wzr, SXTW, 2)),
"str s31, [sp, wzr, sxtw #2]");
COMPARE(str(d0, MemOperand(x1, x2)), "str d0, [x1, x2]");
COMPARE(str(d1, MemOperand(x2, w3, UXTW)), "str d1, [x2, w3, uxtw]");
COMPARE(str(d2, MemOperand(x3, w4, SXTW)), "str d2, [x3, w4, sxtw]");
COMPARE(str(d3, MemOperand(x4, w5, UXTW, 3)), "str d3, [x4, w5, uxtw #3]");
COMPARE(str(d4, MemOperand(x5, w5, SXTW, 3)), "str d4, [x5, w5, sxtw #3]");
COMPARE(str(d30, MemOperand(csp, xzr)), "str d30, [csp, xzr]");
COMPARE(str(d31, MemOperand(csp, wzr, SXTW, 3)),
"str d31, [csp, wzr, sxtw #3]");
COMPARE(str(d30, MemOperand(sp, xzr)), "str d30, [sp, xzr]");
COMPARE(str(d31, MemOperand(sp, wzr, SXTW, 3)),
"str d31, [sp, wzr, sxtw #3]");
COMPARE(str(q0, MemOperand(x1, x2)), "str q0, [x1, x2]");
COMPARE(str(q1, MemOperand(x2, w3, UXTW)), "str q1, [x2, w3, uxtw]");
COMPARE(str(q2, MemOperand(x3, w4, SXTW)), "str q2, [x3, w4, sxtw]");
COMPARE(str(q3, MemOperand(x4, w5, UXTW, 4)), "str q3, [x4, w5, uxtw #4]");
COMPARE(str(q4, MemOperand(x5, w5, SXTW, 4)), "str q4, [x5, w5, sxtw #4]");
COMPARE(str(q30, MemOperand(csp, xzr)), "str q30, [csp, xzr]");
COMPARE(str(q31, MemOperand(csp, wzr, SXTW, 4)),
"str q31, [csp, wzr, sxtw #4]");
COMPARE(str(q30, MemOperand(sp, xzr)), "str q30, [sp, xzr]");
COMPARE(str(q31, MemOperand(sp, wzr, SXTW, 4)),
"str q31, [sp, wzr, sxtw #4]");
CLEANUP();
}
@ -1304,8 +1304,8 @@ TEST_(load_store_unscaled) {
COMPARE(str(x26, MemOperand(x27, -1)), "stur x26, [cp, #-1]");
COMPARE(str(x28, MemOperand(x29, 255)), "stur x28, [fp, #255]");
COMPARE(str(x30, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
COMPARE(ldr(w0, MemOperand(sp, 1)), "ldur w0, [sp, #1]");
COMPARE(str(x1, MemOperand(sp, -1)), "stur x1, [sp, #-1]");
COMPARE(ldrb(w2, MemOperand(x3, -2)), "ldurb w2, [x3, #-2]");
COMPARE(ldrsb(w4, MemOperand(x5, -3)), "ldursb w4, [x5, #-3]");
COMPARE(ldrsb(x6, MemOperand(x7, -4)), "ldursb x6, [x7, #-4]");
@ -1476,18 +1476,18 @@ TEST_(load_store_acquire_release) {
COMPARE(stlxrb(w21, w22, x23), "stlxrb w21, w22, [x23]");
COMPARE(stlxrh(w24, w25, x26), "stlxrh w24, w25, [x26]");
COMPARE(ldarb(wzr, csp), "ldarb wzr, [csp]");
COMPARE(ldarh(wzr, csp), "ldarh wzr, [csp]");
COMPARE(ldar(wzr, csp), "ldar wzr, [csp]");
COMPARE(stlrb(wzr, csp), "stlrb wzr, [csp]");
COMPARE(stlrh(wzr, csp), "stlrh wzr, [csp]");
COMPARE(stlr(wzr, csp), "stlr wzr, [csp]");
COMPARE(ldaxrb(wzr, csp), "ldaxrb wzr, [csp]");
COMPARE(ldaxrh(wzr, csp), "ldaxrh wzr, [csp]");
COMPARE(ldaxr(wzr, csp), "ldaxr wzr, [csp]");
COMPARE(stlxrb(w0, wzr, csp), "stlxrb w0, wzr, [csp]");
COMPARE(stlxrh(wzr, w1, csp), "stlxrh wzr, w1, [csp]");
COMPARE(stlxr(w2, wzr, csp), "stlxr w2, wzr, [csp]");
COMPARE(ldarb(wzr, sp), "ldarb wzr, [sp]");
COMPARE(ldarh(wzr, sp), "ldarh wzr, [sp]");
COMPARE(ldar(wzr, sp), "ldar wzr, [sp]");
COMPARE(stlrb(wzr, sp), "stlrb wzr, [sp]");
COMPARE(stlrh(wzr, sp), "stlrh wzr, [sp]");
COMPARE(stlr(wzr, sp), "stlr wzr, [sp]");
COMPARE(ldaxrb(wzr, sp), "ldaxrb wzr, [sp]");
COMPARE(ldaxrh(wzr, sp), "ldaxrh wzr, [sp]");
COMPARE(ldaxr(wzr, sp), "ldaxr wzr, [sp]");
COMPARE(stlxrb(w0, wzr, sp), "stlxrb w0, wzr, [sp]");
COMPARE(stlxrh(wzr, w1, sp), "stlxrh wzr, w1, [sp]");
COMPARE(stlxr(w2, wzr, sp), "stlxr w2, wzr, [sp]");
CLEANUP();
}
@ -2099,24 +2099,24 @@ TEST_(barriers) {
TEST(neon_load_store_vector) {
SET_UP_MASM();
#define DISASM_INST(M, S) \
COMPARE(Ld1(v0.M, MemOperand(x15)), "ld1 {v0." S "}, [x15]"); \
COMPARE(Ld1(v1.M, v2.M, MemOperand(x16)), \
"ld1 {v1." S ", v2." S "}, [x16]"); \
COMPARE(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \
"ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \
COMPARE(Ld1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
"ld1 {v6." S ", v7." S ", v8_." S ", v9." S "}, [x18]") \
COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
"ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]") \
COMPARE(Ld2(v1.M, v2.M, MemOperand(x16)), \
"ld2 {v1." S ", v2." S "}, [x16]"); \
COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \
"ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \
COMPARE(Ld4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
"ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
"ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]") \
#define DISASM_INST(M, S) \
COMPARE(Ld1(v0.M, MemOperand(x15)), "ld1 {v0." S "}, [x15]"); \
COMPARE(Ld1(v1.M, v2.M, MemOperand(x16)), \
"ld1 {v1." S ", v2." S "}, [x16]"); \
COMPARE(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \
"ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \
COMPARE(Ld1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
"ld1 {v6." S ", v7." S ", v8_." S ", v9." S "}, [x18]") \
COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
"ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
COMPARE(Ld2(v1.M, v2.M, MemOperand(x16)), \
"ld2 {v1." S ", v2." S "}, [x16]"); \
COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \
"ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \
COMPARE(Ld4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
"ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
"ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
@ -2130,7 +2130,7 @@ TEST(neon_load_store_vector) {
COMPARE(Ld1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"ld1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
"ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24") \
"ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
COMPARE(Ld2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
"ld2 {v1." S ", v2." S "}, [x16], x21"); \
COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
@ -2138,7 +2138,7 @@ TEST(neon_load_store_vector) {
COMPARE(Ld4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"ld4 {v6." S ", v7." S ", v8_." S ", v9." S "}, [x18], x23") \
COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
"ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24") \
"ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
@ -2195,36 +2195,36 @@ TEST(neon_load_store_vector) {
"st1 {v23." S ", v24." S ", v25." S "}, [x17]"); \
COMPARE(St1(v26.M, v27.M, v28.M, v29.M, MemOperand(x18)), \
"st1 {v26." S ", v27." S ", v28." S ", v29." S "}, [x18]") \
COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(csp)), \
"st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]") \
COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
"st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
COMPARE(St2(VLIST2(v21.M), MemOperand(x16)), \
"st2 {v21." S ", v22." S "}, [x16]"); \
COMPARE(St3(v23.M, v24.M, v25.M, MemOperand(x17)), \
"st3 {v23." S ", v24." S ", v25." S "}, [x17]"); \
COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(csp)), \
"st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]")
COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
"st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]")
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
#define DISASM_INST(M, S) \
COMPARE(St1(v0.M, MemOperand(x15, x20, PostIndex)), \
"st1 {v0." S "}, [x15], x20"); \
COMPARE(St1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
"st1 {v1." S ", v2." S "}, [x16], x21"); \
COMPARE(St1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
"st1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
COMPARE(St1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"st1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(csp, x24, PostIndex)), \
"st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24") \
COMPARE(St2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
"st2 {v1." S ", v2." S "}, [x16], x21"); \
COMPARE(St3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
"st3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
COMPARE(St4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"st4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(csp, x24, PostIndex)), \
"st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24")
#define DISASM_INST(M, S) \
COMPARE(St1(v0.M, MemOperand(x15, x20, PostIndex)), \
"st1 {v0." S "}, [x15], x20"); \
COMPARE(St1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
"st1 {v1." S ", v2." S "}, [x16], x21"); \
COMPARE(St1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
"st1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
COMPARE(St1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"st1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
"st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
COMPARE(St2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
"st2 {v1." S ", v2." S "}, [x16], x21"); \
COMPARE(St3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
"st3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
COMPARE(St4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"st4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
"st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24")
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
@ -2357,7 +2357,7 @@ TEST(neon_load_store_lane) {
COMPARE(Ld1(v10.H(), 7, MemOperand(x25)), "ld1 {v10.h}[7], [x25]");
COMPARE(Ld1(v11.S(), 1, MemOperand(x26)), "ld1 {v11.s}[1], [x26]");
COMPARE(Ld1(v12.S(), 3, MemOperand(x27)), "ld1 {v12.s}[3], [cp]");
COMPARE(Ld1(v13.D(), 1, MemOperand(csp)), "ld1 {v13.d}[1], [csp]");
COMPARE(Ld1(v13.D(), 1, MemOperand(sp)), "ld1 {v13.d}[1], [sp]");
COMPARE(Ld1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"ld1 {v0.b}[0], [x15], x0");
@ -2387,10 +2387,10 @@ TEST(neon_load_store_lane) {
"ld1 {v12.s}[3], [cp], x5");
COMPARE(Ld1(v12.S(), 3, MemOperand(x27, 4, PostIndex)),
"ld1 {v12.s}[3], [cp], #4");
COMPARE(Ld1(v13.D(), 1, MemOperand(csp, x6, PostIndex)),
"ld1 {v13.d}[1], [csp], x6");
COMPARE(Ld1(v13.D(), 1, MemOperand(csp, 8, PostIndex)),
"ld1 {v13.d}[1], [csp], #8");
COMPARE(Ld1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
"ld1 {v13.d}[1], [sp], x6");
COMPARE(Ld1(v13.D(), 1, MemOperand(sp, 8, PostIndex)),
"ld1 {v13.d}[1], [sp], #8");
COMPARE(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15)),
"ld2 {v0.b, v1.b}[0], [x15]");
@ -2418,8 +2418,8 @@ TEST(neon_load_store_lane) {
"ld2 {v11.s, v12.s}[1], [x26]");
COMPARE(Ld2(v12.S(), v13.S(), 3, MemOperand(x27)),
"ld2 {v12.s, v13.s}[3], [cp]");
COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(csp)),
"ld2 {v13.d, v14.d}[1], [csp]");
COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp)),
"ld2 {v13.d, v14.d}[1], [sp]");
COMPARE(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"ld2 {v0.b, v1.b}[0], [x15], x0");
@ -2449,10 +2449,10 @@ TEST(neon_load_store_lane) {
"ld2 {v12.s, v13.s}[3], [cp], x5");
COMPARE(Ld2(v11.S(), v12.S(), 3, MemOperand(x26, 8, PostIndex)),
"ld2 {v11.s, v12.s}[3], [x26], #8");
COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(csp, x6, PostIndex)),
"ld2 {v13.d, v14.d}[1], [csp], x6");
COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(csp, 16, PostIndex)),
"ld2 {v13.d, v14.d}[1], [csp], #16");
COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
"ld2 {v13.d, v14.d}[1], [sp], x6");
COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, 16, PostIndex)),
"ld2 {v13.d, v14.d}[1], [sp], #16");
COMPARE(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0, MemOperand(x15)),
"ld3 {v0.b, v1.b, v2.b}[0], [x15]");
@ -2480,8 +2480,8 @@ TEST(neon_load_store_lane) {
"ld3 {v11.s, v12.s, v13.s}[1], [x26]");
COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3, MemOperand(x27)),
"ld3 {v12.s, v13.s, v14.s}[3], [cp]");
COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(csp)),
"ld3 {v13.d, v14.d, v15.d}[1], [csp]");
COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp)),
"ld3 {v13.d, v14.d, v15.d}[1], [sp]");
COMPARE(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"ld3 {v0.b, v1.b, v2.b}[0], [x15], x0");
@ -2512,10 +2512,10 @@ TEST(neon_load_store_lane) {
"ld3 {v12.s, v13.s, v14.s}[3], [cp], x5");
COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3, MemOperand(x27, 12, PostIndex)),
"ld3 {v12.s, v13.s, v14.s}[3], [cp], #12");
COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(csp, x6, PostIndex)),
"ld3 {v13.d, v14.d, v15.d}[1], [csp], x6");
COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(csp, 24, PostIndex)),
"ld3 {v13.d, v14.d, v15.d}[1], [csp], #24");
COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp, x6, PostIndex)),
"ld3 {v13.d, v14.d, v15.d}[1], [sp], x6");
COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp, 24, PostIndex)),
"ld3 {v13.d, v14.d, v15.d}[1], [sp], #24");
COMPARE(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0, MemOperand(x15)),
"ld4 {v0.b, v1.b, v2.b, v3.b}[0], [x15]");
@ -2543,8 +2543,8 @@ TEST(neon_load_store_lane) {
"ld4 {v11.s, v12.s, v13.s, v14.s}[1], [x26]");
COMPARE(Ld4(v12.S(), v13.S(), v14.S(), v15.S(), 3, MemOperand(x27)),
"ld4 {v12.s, v13.s, v14.s, v15.s}[3], [cp]");
COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(csp)),
"ld4 {v13.d, v14.d, v15.d, v16.d}[1], [csp]");
COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp)),
"ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp]");
COMPARE(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0,
MemOperand(x15, x0, PostIndex)),
@ -2588,12 +2588,12 @@ TEST(neon_load_store_lane) {
COMPARE(Ld4(v11.S(), v12.S(), v13.S(), v14.S(), 3,
MemOperand(x26, 16, PostIndex)),
"ld4 {v11.s, v12.s, v13.s, v14.s}[3], [x26], #16");
COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
MemOperand(csp, x6, PostIndex)),
"ld4 {v13.d, v14.d, v15.d, v16.d}[1], [csp], x6");
COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
MemOperand(csp, 32, PostIndex)),
"ld4 {v13.d, v14.d, v15.d, v16.d}[1], [csp], #32");
COMPARE(
Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp, x6, PostIndex)),
"ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
COMPARE(
Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp, 32, PostIndex)),
"ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], #32");
COMPARE(St1(v0.V8B(), 0, MemOperand(x15)), "st1 {v0.b}[0], [x15]");
COMPARE(St1(v1.V16B(), 1, MemOperand(x16)), "st1 {v1.b}[1], [x16]");
@ -2608,7 +2608,7 @@ TEST(neon_load_store_lane) {
COMPARE(St1(v10.H(), 7, MemOperand(x25)), "st1 {v10.h}[7], [x25]");
COMPARE(St1(v11.S(), 1, MemOperand(x26)), "st1 {v11.s}[1], [x26]");
COMPARE(St1(v12.S(), 3, MemOperand(x27)), "st1 {v12.s}[3], [cp]");
COMPARE(St1(v13.D(), 1, MemOperand(csp)), "st1 {v13.d}[1], [csp]");
COMPARE(St1(v13.D(), 1, MemOperand(sp)), "st1 {v13.d}[1], [sp]");
COMPARE(St1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"st1 {v0.b}[0], [x15], x0");
@ -2636,8 +2636,8 @@ TEST(neon_load_store_lane) {
"st1 {v11.s}[1], [x26], #4");
COMPARE(St1(v12.S(), 3, MemOperand(x27, x5, PostIndex)),
"st1 {v12.s}[3], [cp], x5");
COMPARE(St1(v13.D(), 1, MemOperand(csp, x6, PostIndex)),
"st1 {v13.d}[1], [csp], x6");
COMPARE(St1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
"st1 {v13.d}[1], [sp], x6");
COMPARE(St2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"st2 {v0.b, v1.b}[0], [x15], x0");
COMPARE(St2(v1.V16B(), v2.V16B(), 1, MemOperand(x16, 2, PostIndex)),
@ -2664,8 +2664,8 @@ TEST(neon_load_store_lane) {
"st2 {v11.s, v12.s}[1], [x26], #8");
COMPARE(St2(v12.S(), v13.S(), 3, MemOperand(x27, x5, PostIndex)),
"st2 {v12.s, v13.s}[3], [cp], x5");
COMPARE(St2(v13.D(), v14.D(), 1, MemOperand(csp, x6, PostIndex)),
"st2 {v13.d, v14.d}[1], [csp], x6");
COMPARE(St2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
"st2 {v13.d, v14.d}[1], [sp], x6");
COMPARE(St3(VLIST3(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
"st3 {v0.b, v1.b, v2.b}[0], [x15], x0");
COMPARE(St3(VLIST3(v1.V16B()), 1, MemOperand(x16, 3, PostIndex)),
@ -2692,8 +2692,8 @@ TEST(neon_load_store_lane) {
"st3 {v11.s, v12.s, v13.s}[1], [x26], #12");
COMPARE(St3(VLIST3(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
"st3 {v12.s, v13.s, v14.s}[3], [cp], x5");
COMPARE(St3(VLIST3(v13.D()), 1, MemOperand(csp, x6, PostIndex)),
"st3 {v13.d, v14.d, v15.d}[1], [csp], x6");
COMPARE(St3(VLIST3(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
"st3 {v13.d, v14.d, v15.d}[1], [sp], x6");
COMPARE(St4(VLIST4(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
"st4 {v0.b, v1.b, v2.b, v3.b}[0], [x15], x0");
@ -2721,8 +2721,8 @@ TEST(neon_load_store_lane) {
"st4 {v11.s, v12.s, v13.s, v14.s}[1], [x26], #16");
COMPARE(St4(VLIST4(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
"st4 {v12.s, v13.s, v14.s, v15.s}[3], [cp], x5");
COMPARE(St4(VLIST4(v13.D()), 1, MemOperand(csp, x6, PostIndex)),
"st4 {v13.d, v14.d, v15.d, v16.d}[1], [csp], x6");
COMPARE(St4(VLIST4(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
"st4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
CLEANUP();
}
@ -2810,7 +2810,7 @@ TEST(neon_load_all_lanes) {
COMPARE(Ld1r(v17.V8H(), MemOperand(x3)), "ld1r {v17.8h}, [x3]");
COMPARE(Ld1r(v18.V2S(), MemOperand(x4)), "ld1r {v18.2s}, [x4]");
COMPARE(Ld1r(v19.V4S(), MemOperand(x5)), "ld1r {v19.4s}, [x5]");
COMPARE(Ld1r(v20.V2D(), MemOperand(csp)), "ld1r {v20.2d}, [csp]");
COMPARE(Ld1r(v20.V2D(), MemOperand(sp)), "ld1r {v20.2d}, [sp]");
COMPARE(Ld1r(v21.V1D(), MemOperand(x30)), "ld1r {v21.1d}, [lr]");
COMPARE(Ld1r(v22.V8B(), MemOperand(x6, 1, PostIndex)),
@ -2842,8 +2842,8 @@ TEST(neon_load_all_lanes) {
"ld2r {v18.2s, v19.2s}, [x4]");
COMPARE(Ld2r(v19.V4S(), v20.V4S(), MemOperand(x5)),
"ld2r {v19.4s, v20.4s}, [x5]");
COMPARE(Ld2r(v20.V2D(), v21.V2D(), MemOperand(csp)),
"ld2r {v20.2d, v21.2d}, [csp]");
COMPARE(Ld2r(v20.V2D(), v21.V2D(), MemOperand(sp)),
"ld2r {v20.2d, v21.2d}, [sp]");
COMPARE(Ld2r(v21.V8B(), v22.V8B(), MemOperand(x6, 2, PostIndex)),
"ld2r {v21.8b, v22.8b}, [x6], #2");
COMPARE(Ld2r(v22.V16B(), v23.V16B(), MemOperand(x7, x16, PostIndex)),
@ -2871,8 +2871,8 @@ TEST(neon_load_all_lanes) {
"ld3r {v18.2s, v19.2s, v20.2s}, [x4]");
COMPARE(Ld3r(v19.V4S(), v20.V4S(), v21.V4S(), MemOperand(x5)),
"ld3r {v19.4s, v20.4s, v21.4s}, [x5]");
COMPARE(Ld3r(v20.V2D(), v21.V2D(), v22.V2D(), MemOperand(csp)),
"ld3r {v20.2d, v21.2d, v22.2d}, [csp]");
COMPARE(Ld3r(v20.V2D(), v21.V2D(), v22.V2D(), MemOperand(sp)),
"ld3r {v20.2d, v21.2d, v22.2d}, [sp]");
COMPARE(Ld3r(v21.V8B(), v22.V8B(), v23.V8B(), MemOperand(x6, 3, PostIndex)),
"ld3r {v21.8b, v22.8b, v23.8b}, [x6], #3");
COMPARE(
@ -2902,8 +2902,8 @@ TEST(neon_load_all_lanes) {
"ld4r {v18.2s, v19.2s, v20.2s, v21.2s}, [x4]");
COMPARE(Ld4r(v19.V4S(), v20.V4S(), v21.V4S(), v22.V4S(), MemOperand(x5)),
"ld4r {v19.4s, v20.4s, v21.4s, v22.4s}, [x5]");
COMPARE(Ld4r(v20.V2D(), v21.V2D(), v22.V2D(), v23.V2D(), MemOperand(csp)),
"ld4r {v20.2d, v21.2d, v22.2d, v23.2d}, [csp]");
COMPARE(Ld4r(v20.V2D(), v21.V2D(), v22.V2D(), v23.V2D(), MemOperand(sp)),
"ld4r {v20.2d, v21.2d, v22.2d, v23.2d}, [sp]");
COMPARE(Ld4r(v21.V8B(), v22.V8B(), v23.V8B(), v24.V8B(),
MemOperand(x6, 4, PostIndex)),
"ld4r {v21.8b, v22.8b, v23.8b, v24.8b}, [x6], #4");
@ -4955,5 +4955,25 @@ TEST(neon_shift_immediate) {
CLEANUP();
}
#undef TEST_
#undef EXP_SIZE
#undef INSTR_SIZE
#undef SET_UP_MASM
#undef SET_UP_ASM
#undef COMPARE
#undef COMPARE_PREFIX
#undef CLEANUP
#undef VLIST2
#undef VLIST3
#undef VLIST4
#undef NEON_FORMAT_LIST
#undef NEON_FORMAT_LIST_LP
#undef NEON_FORMAT_LIST_LW
#undef NEON_FORMAT_LIST_LW2
#undef NEON_FORMAT_LIST_BHS
#undef NEON_FORMAT_LIST_HS
#undef NEON_FORMAT_LIST_FP
#undef NEON_SCALAR_FORMAT_LIST
} // namespace internal
} // namespace v8

View File

@ -287,7 +287,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
if (reg_list & (1UL << i)) {
Register xn = Register::Create(i, kXRegSizeInBits);
// We should never write into csp here.
CHECK(!xn.Is(csp));
CHECK(!xn.Is(sp));
if (!xn.IsZero()) {
if (!first.IsValid()) {
// This is the first register we've hit, so construct the literal.
@ -337,8 +337,6 @@ void Clobber(MacroAssembler* masm, CPURegList reg_list) {
void RegisterDump::Dump(MacroAssembler* masm) {
CHECK(__ StackPointer().Is(csp));
// Ensure that we don't unintentionally clobber any registers.
RegList old_tmp_list = masm->TmpList()->list();
RegList old_fptmp_list = masm->FPTmpList()->list();
@ -372,9 +370,9 @@ void RegisterDump::Dump(MacroAssembler* masm) {
// The stack pointer cannot be stored directly; it needs to be moved into
// another register first. Also, we pushed four X registers, so we need to
// compensate here.
__ Add(tmp, csp, 4 * kXRegSize);
__ Add(tmp, sp, 4 * kXRegSize);
__ Str(tmp, MemOperand(dump_base, sp_offset));
__ Add(tmp_w, wcsp, 4 * kXRegSize);
__ Add(tmp_w, wsp, 4 * kXRegSize);
__ Str(tmp_w, MemOperand(dump_base, wsp_offset));
// Dump X registers.