A64: Rename k<Y>RegSize to k<Y>RegSizeInBits, and k<Y>RegSizeInBytes to k<Y>RegSize.

R=ulan@chromium.org

Review URL: https://codereview.chromium.org/194473005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19855 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
alexandre.rames@arm.com 2014-03-12 15:18:40 +00:00
parent 021d9e778c
commit dafba1207d
24 changed files with 372 additions and 344 deletions

View File

@ -109,14 +109,14 @@ inline bool CPURegister::IsValid() const {
inline bool CPURegister::IsValidRegister() const {
return IsRegister() &&
((reg_size == kWRegSize) || (reg_size == kXRegSize)) &&
((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
}
inline bool CPURegister::IsValidFPRegister() const {
return IsFPRegister() &&
((reg_size == kSRegSize) || (reg_size == kDRegSize)) &&
((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
(reg_code < kNumberOfFPRegisters);
}
@ -221,25 +221,25 @@ inline Register Register::XRegFromCode(unsigned code) {
// This function returns the zero register when code = 31. The stack pointer
// can not be returned.
ASSERT(code < kNumberOfRegisters);
return Register::Create(code, kXRegSize);
return Register::Create(code, kXRegSizeInBits);
}
inline Register Register::WRegFromCode(unsigned code) {
ASSERT(code < kNumberOfRegisters);
return Register::Create(code, kWRegSize);
return Register::Create(code, kWRegSizeInBits);
}
inline FPRegister FPRegister::SRegFromCode(unsigned code) {
ASSERT(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kSRegSize);
return FPRegister::Create(code, kSRegSizeInBits);
}
inline FPRegister FPRegister::DRegFromCode(unsigned code) {
ASSERT(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kDRegSize);
return FPRegister::Create(code, kDRegSizeInBits);
}
@ -334,8 +334,8 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
extend_(NO_EXTEND),
shift_amount_(shift_amount),
rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
ASSERT(!reg.IsSP());
}
@ -1006,16 +1006,16 @@ Instr Assembler::ImmAddSub(int64_t imm) {
Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
((reg_size == kWRegSize) && is_uint5(imms)));
ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
((reg_size == kWRegSizeInBits) && is_uint5(imms)));
USE(reg_size);
return imms << ImmS_offset;
}
Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
USE(reg_size);
ASSERT(is_uint6(immr));
return immr << ImmR_offset;
@ -1023,18 +1023,18 @@ Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
ASSERT(is_uint6(imms));
ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
USE(reg_size);
return imms << ImmSetBits_offset;
}
Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
USE(reg_size);
return immr << ImmRotate_offset;
}
@ -1047,8 +1047,8 @@ Instr Assembler::ImmLLiteral(int imm19) {
Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
ASSERT((reg_size == kXRegSize) || (bitn == 0));
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
USE(reg_size);
return bitn << BitN_offset;
}

View File

@ -120,7 +120,8 @@ CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
// this mapping.
CPURegList CPURegList::GetSafepointSavedRegisters() {
CPURegList list = CPURegList::GetCalleeSaved();
list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved));
list.Combine(
CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
// Note that unfortunately we can't use symbolic names for registers and have
// to directly use register codes. This is because this function is used to
@ -748,7 +749,7 @@ void Assembler::tbz(const Register& rt,
unsigned bit_pos,
int imm14) {
positions_recorder()->WriteRecordedPositions();
ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@ -765,7 +766,7 @@ void Assembler::tbnz(const Register& rt,
unsigned bit_pos,
int imm14) {
positions_recorder()->WriteRecordedPositions();
ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@ -2085,7 +2086,7 @@ void Assembler::EmitExtendShift(const Register& rd,
case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
case UXTX:
case SXTX: {
ASSERT(rn.SizeInBits() == kXRegSize);
ASSERT(rn.SizeInBits() == kXRegSizeInBits);
// Nothing to extend. Just shift.
lsl(rd, rn_, left_shift);
break;
@ -2230,7 +2231,7 @@ bool Assembler::IsImmLogical(uint64_t value,
unsigned* imm_s,
unsigned* imm_r) {
ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
ASSERT((width == kWRegSize) || (width == kXRegSize));
ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
// Logical immediates are encoded using parameters n, imm_s and imm_r using
// the following table:
@ -2257,7 +2258,7 @@ bool Assembler::IsImmLogical(uint64_t value,
// 1. If the value has all set or all clear bits, it can't be encoded.
if ((value == 0) || (value == 0xffffffffffffffffUL) ||
((width == kWRegSize) && (value == 0xffffffff))) {
((width == kWRegSizeInBits) && (value == 0xffffffff))) {
return false;
}
@ -2271,7 +2272,7 @@ bool Assembler::IsImmLogical(uint64_t value,
// If width == 64 (X reg), start at 0xFFFFFF80.
// If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
// widths won't be executed.
int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
int imm_s_mask = 0x3F;
for (;;) {

View File

@ -227,7 +227,7 @@ struct Register : public CPURegister {
static Register from_code(int code) {
// Always return an X register.
return Register::Create(code, kXRegSize);
return Register::Create(code, kXRegSizeInBits);
}
// End of V8 compatibility section -----------------------
@ -322,7 +322,7 @@ struct FPRegister : public CPURegister {
static FPRegister from_code(int code) {
// Always return a D register.
return FPRegister::Create(code, kDRegSize);
return FPRegister::Create(code, kDRegSizeInBits);
}
// End of V8 compatibility section -----------------------
};
@ -358,20 +358,23 @@ INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
#define DEFINE_REGISTERS(N) \
INITIALIZE_REGISTER(Register, w##N, N, kWRegSize, CPURegister::kRegister); \
INITIALIZE_REGISTER(Register, x##N, N, kXRegSize, CPURegister::kRegister);
INITIALIZE_REGISTER(Register, w##N, N, \
kWRegSizeInBits, CPURegister::kRegister); \
INITIALIZE_REGISTER(Register, x##N, N, \
kXRegSizeInBits, CPURegister::kRegister);
REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSize,
INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
CPURegister::kRegister);
INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSize,
INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
CPURegister::kRegister);
#define DEFINE_FPREGISTERS(N) \
INITIALIZE_REGISTER(FPRegister, s##N, N, kSRegSize, \
CPURegister::kFPRegister); \
INITIALIZE_REGISTER(FPRegister, d##N, N, kDRegSize, CPURegister::kFPRegister);
#define DEFINE_FPREGISTERS(N) \
INITIALIZE_REGISTER(FPRegister, s##N, N, \
kSRegSizeInBits, CPURegister::kFPRegister); \
INITIALIZE_REGISTER(FPRegister, d##N, N, \
kDRegSizeInBits, CPURegister::kFPRegister);
REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
#undef DEFINE_FPREGISTERS
@ -520,12 +523,12 @@ class CPURegList {
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize);
static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
// AAPCS64 caller-saved registers. Note that this includes lr.
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
// Registers saved as safepoints.
static CPURegList GetSafepointSavedRegisters();

View File

@ -182,7 +182,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ Cbz(argc, &no_arguments);
// First args = sp[(argc - 1) * 8].
__ Sub(argc, argc, 1);
__ Claim(argc, kXRegSizeInBytes);
__ Claim(argc, kXRegSize);
// jssp now point to args[0], load and drop args[0] + receiver.
Register arg = argc;
__ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));
@ -532,8 +532,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// jssp[1]: receiver
// jssp[2]: constructor function
// jssp[3]: number of arguments (smi-tagged)
__ Peek(constructor, 2 * kXRegSizeInBytes); // Load constructor.
__ Peek(argc, 3 * kXRegSizeInBytes); // Load number of arguments.
__ Peek(constructor, 2 * kXRegSize); // Load constructor.
__ Peek(argc, 3 * kXRegSize); // Load number of arguments.
__ SmiUntag(argc);
// Set up pointer to last argument.
@ -617,7 +617,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
__ Peek(x1, 2 * kXRegSizeInBytes);
__ Peek(x1, 2 * kXRegSize);
// Leave construct frame.
}
@ -1017,7 +1017,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label slow, non_function;
__ Peek(function, Operand(argc, LSL, kXRegSizeInBytesLog2));
__ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
__ JumpIfSmi(function, &non_function);
__ JumpIfNotObjectType(function, scratch1, receiver_type,
JS_FUNCTION_TYPE, &slow);
@ -1045,7 +1045,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Compute the receiver in sloppy mode.
Register receiver = x2;
__ Sub(scratch1, argc, 1);
__ Peek(receiver, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
__ Peek(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
__ JumpIfSmi(receiver, &convert_to_object);
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
@ -1074,7 +1074,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
// Restore the function and flag in the registers.
__ Peek(function, Operand(argc, LSL, kXRegSizeInBytesLog2));
__ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
__ Mov(call_type, static_cast<int>(call_type_JS_func));
__ B(&patch_receiver);
@ -1086,7 +1086,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Bind(&patch_receiver);
__ Sub(scratch1, argc, 1);
__ Poke(receiver, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
__ Poke(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
__ B(&shift_arguments);
}
@ -1105,7 +1105,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// become the receiver.
// call type (0: JS function, 1: function proxy, 2: non-function)
__ Sub(scratch1, argc, 1);
__ Poke(function, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
__ Poke(function, Operand(scratch1, LSL, kXRegSizeLog2));
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@ -1351,7 +1351,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize)));
__ Mov(jssp, fp);
__ Pop(fp, lr);
__ DropBySMI(x10, kXRegSizeInBytes);
__ DropBySMI(x10, kXRegSize);
__ Drop(1);
}

View File

@ -1501,7 +1501,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireX();
__ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
__ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
__ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
__ Cmp(temp, x12);
__ Check(eq, kReturnAddressNotFoundInFrame);
}
@ -2323,10 +2323,10 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// Patch the arguments.length and parameters pointer in the current frame.
__ Ldr(x11, MemOperand(caller_fp,
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Poke(x11, 0 * kXRegSizeInBytes);
__ Poke(x11, 0 * kXRegSize);
__ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
__ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset));
__ Poke(x10, 1 * kXRegSizeInBytes);
__ Poke(x10, 1 * kXRegSize);
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@ -3162,7 +3162,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Read two 32 bit values from the static offsets vector buffer into
// an X register
__ Ldr(current_offset,
MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex));
MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
@ -3170,7 +3170,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ And(x11, current_offset, ~kWRegMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex));
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
__ B(&next_capture);
__ Bind(&done);
@ -3454,7 +3454,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ Bind(&non_function);
__ Poke(function, argc_ * kXRegSizeInBytes);
__ Poke(function, argc_ * kXRegSize);
__ Mov(x0, argc_); // Set up the number of arguments.
__ Mov(x2, 0);
__ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
@ -5105,7 +5105,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
// The inlined probes didn't find the entry.
// Call the complete stub to scan the whole dictionary.
CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
spill_list.Combine(lr);
spill_list.Remove(scratch1);
spill_list.Remove(scratch2);
@ -5185,7 +5185,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Bind(&good);
}
CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
spill_list.Combine(lr);
spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.

View File

@ -262,7 +262,7 @@ class RecordWriteStub: public PlatformCodeStub {
// - x29 frame pointer
// - x30 link register(lr)
// - x31 xzr/stack pointer
CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
// We also remove MacroAssembler's scratch registers.
list.Remove(ip0);

View File

@ -529,7 +529,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
@ -559,7 +559,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
@ -567,7 +567,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.
@ -597,7 +597,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Do the final table lookup.
__ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
__ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
__ Bfi(temp2, temp1, 32, 32);

View File

@ -63,22 +63,22 @@ const int kFirstCalleeSavedFPRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
const unsigned kJSCalleeSavedRegList = 0x03f80000;
// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
const unsigned kWRegSize = 32;
const unsigned kWRegSizeLog2 = 5;
const unsigned kWRegSizeInBytes = kWRegSize >> 3;
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
const unsigned kXRegSize = 64;
const unsigned kXRegSizeLog2 = 6;
const unsigned kXRegSizeInBytes = kXRegSize >> 3;
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
const unsigned kSRegSize = 32;
const unsigned kSRegSizeLog2 = 5;
const unsigned kSRegSizeInBytes = kSRegSize >> 3;
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
const unsigned kDRegSize = 64;
const unsigned kDRegSizeLog2 = 6;
const unsigned kDRegSizeInBytes = kDRegSize >> 3;
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
const unsigned kWRegSizeInBits = 32;
const unsigned kWRegSizeInBitsLog2 = 5;
const unsigned kWRegSize = kWRegSizeInBits >> 3;
const unsigned kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
const unsigned kXRegSizeInBits = 64;
const unsigned kXRegSizeInBitsLog2 = 6;
const unsigned kXRegSize = kXRegSizeInBits >> 3;
const unsigned kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
const unsigned kSRegSizeInBits = 32;
const unsigned kSRegSizeInBitsLog2 = 5;
const unsigned kSRegSize = kSRegSizeInBits >> 3;
const unsigned kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
const unsigned kDRegSizeInBits = 64;
const unsigned kDRegSizeInBitsLog2 = 6;
const unsigned kDRegSize = kDRegSizeInBits >> 3;
const unsigned kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
const int64_t kWRegMask = 0x00000000ffffffffL;
const int64_t kXRegMask = 0xffffffffffffffffL;
const int64_t kSRegMask = 0x00000000ffffffffL;

View File

@ -179,7 +179,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
@ -213,7 +213,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
non_object_list =
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Load each non-object register from two SMIs.
// Stack:

View File

@ -148,21 +148,21 @@ void Deoptimizer::EntryGenerator::Generate() {
// in the input frame.
// Save all allocatable floating point registers.
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
FPRegister::kAllocatableFPRegisters);
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSizeInBytes) +
(saved_fp_registers.Count() * kDRegSizeInBytes);
(saved_registers.Count() * kXRegSize) +
(saved_fp_registers.Count() * kDRegSize);
// Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;
const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
// Get the bailout id from the stack.
Register bailout_id = x2;
@ -221,7 +221,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.

View File

@ -255,8 +255,8 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
case ORR_w_imm:
case ORR_x_imm: {
mnemonic = "orr";
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
: kWRegSize;
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
: kWRegSizeInBits;
if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
mnemonic = "mov";
form = "'Rds, 'ITri";
@ -281,8 +281,8 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
ASSERT((reg_size == kXRegSize) ||
((reg_size == kWRegSize) && (value <= 0xffffffff)));
ASSERT((reg_size == kXRegSizeInBits) ||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
if (((value & 0xffffffffffff0000UL) == 0UL) ||
@ -293,14 +293,14 @@ bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
}
// Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
if ((reg_size == kXRegSize) &&
if ((reg_size == kXRegSizeInBits) &&
(((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
return true;
}
if ((reg_size == kWRegSize) &&
if ((reg_size == kWRegSizeInBits) &&
(((value & 0xffff0000) == 0xffff0000) ||
((value & 0x0000ffff) == 0x0000ffff))) {
return true;
@ -447,7 +447,7 @@ void Disassembler::VisitBitfield(Instruction* instr) {
unsigned s = instr->ImmS();
unsigned r = instr->ImmR();
unsigned rd_size_minus_1 =
((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
((instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits) - 1;
const char *mnemonic = "";
const char *form = "";
const char *form_shift_right = "'Rd, 'Rn, 'IBr";
@ -1518,7 +1518,8 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
}
case 'Z': { // IBZ-r.
ASSERT((format[3] == '-') && (format[4] == 'r'));
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
: kWRegSizeInBits;
AppendToOutput("#%d", reg_size - r);
return 5;
}

View File

@ -148,7 +148,7 @@ void FullCodeGenerator::Generate() {
// object).
if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kXRegSizeInBytes;
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
@ -425,7 +425,7 @@ void FullCodeGenerator::EmitReturnSequence() {
ASSERT(!current_sp.Is(csp));
__ mov(current_sp, fp);
int no_frame_start = masm_->pc_offset();
__ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSizeInBytes, PostIndex));
__ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
// Drop the arguments and receiver and return.
// TODO(all): This implementation is overkill as it supports 2**31+1
// arguments, consider how to improve it without creating a security
@ -433,7 +433,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ LoadLiteral(ip0, 3 * kInstructionSize);
__ add(current_sp, current_sp, ip0);
__ ret();
__ dc64(kXRegSizeInBytes * (info_->scope()->num_parameters() + 1));
__ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@ -692,7 +692,7 @@ void FullCodeGenerator::Split(Condition cond,
MemOperand FullCodeGenerator::StackOperand(Variable* var) {
// Offset is negative because higher indexes are at lower addresses.
int offset = -var->index() * kXRegSizeInBytes;
int offset = -var->index() * kXRegSize;
// Adjust by a (parameter or local) base offset.
if (var->IsParameter()) {
offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
@ -1184,18 +1184,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ B(hs, loop_statement.break_label());
// Get the current entry of the array into register r3.
__ Peek(x10, 2 * kXRegSizeInBytes);
__ Peek(x10, 2 * kXRegSize);
__ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
__ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
// Get the expected map from the stack or a smi in the
// permanent slow case into register x10.
__ Peek(x2, 3 * kXRegSizeInBytes);
__ Peek(x2, 3 * kXRegSize);
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
__ Peek(x1, 4 * kXRegSizeInBytes);
__ Peek(x1, 4 * kXRegSize);
__ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
__ Cmp(x11, x2);
__ B(eq, &update_each);
@ -2053,7 +2053,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ B(&done);
__ Bind(&not_minus_zero);
__ Cls(x11, x10);
__ Cmp(x11, kXRegSize - kSmiShift);
__ Cmp(x11, kXRegSizeInBits - kSmiShift);
__ B(lt, &stub_call);
__ SmiTag(result, x10);
__ Bind(&done);
@ -2413,7 +2413,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@ -2427,7 +2427,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Prepare to push a copy of the first argument or undefined if it doesn't
// exist.
if (arg_count > 0) {
__ Peek(x10, arg_count * kXRegSizeInBytes);
__ Peek(x10, arg_count * kXRegSize);
} else {
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
}
@ -2498,7 +2498,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call the evaluated function.
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@ -2601,7 +2601,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
__ Peek(x1, arg_count * kXRegSizeInBytes);
__ Peek(x1, arg_count * kXRegSize);
// Record call targets in unoptimized code.
__ LoadObject(x2, FeedbackVector());
@ -4083,10 +4083,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Push(x0);
break;
case NAMED_PROPERTY:
__ Poke(x0, kXRegSizeInBytes);
__ Poke(x0, kXRegSize);
break;
case KEYED_PROPERTY:
__ Poke(x0, 2 * kXRegSizeInBytes);
__ Poke(x0, 2 * kXRegSize);
break;
}
}

View File

@ -102,7 +102,7 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
unsigned width) {
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
uint64_t result = value & ((1UL << width) - 1UL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
@ -115,7 +115,7 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();

View File

@ -675,7 +675,7 @@ bool LCodeGen::GeneratePrologue() {
info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
@ -2867,7 +2867,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
__ AssertString(input);
// Assert that we can use a W register load to get the hash.
ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize);
ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
__ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
}

View File

@ -747,12 +747,12 @@ void MacroAssembler::Fmov(FPRegister fd, double imm) {
// TODO(all): The Assembler would try to relocate the immediate with
// Assembler::ldr(const FPRegister& ft, double imm) but it is not
// implemented yet.
if (fd.SizeInBits() == kDRegSize) {
if (fd.SizeInBits() == kDRegSizeInBits) {
Register tmp = temps.AcquireX();
Mov(tmp, double_to_rawbits(imm));
Fmov(fd, tmp);
} else {
ASSERT(fd.SizeInBits() == kSRegSize);
ASSERT(fd.SizeInBits() == kSRegSizeInBits);
Register tmp = temps.AcquireW();
Mov(tmp, float_to_rawbits(static_cast<float>(imm)));
Fmov(fd, tmp);
@ -1476,7 +1476,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
return;
}
const int shift = CountTrailingZeros(unit_size, kXRegSize);
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@ -1493,7 +1493,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
ASSERT(IsPowerOf2(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
(shift >= 0) ? (shift) : (-shift));
@ -1537,7 +1537,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
return;
}
const int shift = CountTrailingZeros(unit_size, kXRegSize);
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@ -1557,7 +1557,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
ASSERT(IsPowerOf2(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
(shift >= 0) ? (shift) : (-shift));

View File

@ -343,7 +343,7 @@ unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
@ -474,9 +474,9 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
// Add/sub with carry (shifted register).
ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
ASSERT(operand.shift() != ROR);
ASSERT(
is_uintn(operand.shift_amount(),
rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
ASSERT(is_uintn(operand.shift_amount(),
rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
: kWRegSizeInBitsLog2));
Register temp = temps.AcquireSameSizeAs(rn);
EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
AddSubWithCarry(rd, rn, temp, S, op);
@ -1118,7 +1118,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// system stack pointer (csp).
ASSERT(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex);
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@ -1142,7 +1142,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// system stack pointer (csp).
ASSERT(csp.Is(StackPointer()));
MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex);
MemOperand tos(csp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@ -1621,10 +1621,10 @@ void MacroAssembler::CallApiFunctionAndReturn(
// Save the callee-save registers we are going to use.
// TODO(all): Is this necessary? ARM doesn't do it.
STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
Poke(x19, (spill_offset + 0) * kXRegSizeInBytes);
Poke(x20, (spill_offset + 1) * kXRegSizeInBytes);
Poke(x21, (spill_offset + 2) * kXRegSizeInBytes);
Poke(x22, (spill_offset + 3) * kXRegSizeInBytes);
Poke(x19, (spill_offset + 0) * kXRegSize);
Poke(x20, (spill_offset + 1) * kXRegSize);
Poke(x21, (spill_offset + 2) * kXRegSize);
Poke(x22, (spill_offset + 3) * kXRegSize);
// Allocate HandleScope in callee-save registers.
// We will need to restore the HandleScope after the call to the API function,
@ -1688,10 +1688,10 @@ void MacroAssembler::CallApiFunctionAndReturn(
Bind(&leave_exit_frame);
// Restore callee-saved registers.
Peek(x19, (spill_offset + 0) * kXRegSizeInBytes);
Peek(x20, (spill_offset + 1) * kXRegSizeInBytes);
Peek(x21, (spill_offset + 2) * kXRegSizeInBytes);
Peek(x22, (spill_offset + 3) * kXRegSizeInBytes);
Peek(x19, (spill_offset + 0) * kXRegSize);
Peek(x20, (spill_offset + 1) * kXRegSize);
Peek(x21, (spill_offset + 2) * kXRegSize);
Peek(x22, (spill_offset + 3) * kXRegSize);
// Check if the function scheduled an exception.
Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate())));
@ -2165,7 +2165,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
DONT_DO_SMI_CHECK);
STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2));
STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
Eor(scratch1, scratch1, scratch2);
@ -2298,9 +2298,9 @@ void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
Label loop;
Bind(&loop);
Ldp(scratch4, scratch5,
MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex));
MemOperand(src_untagged, kXRegSize* 2, PostIndex));
Stp(scratch4, scratch5,
MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex));
MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
Sub(remaining, remaining, 1);
Cbnz(remaining, &loop);
@ -2330,10 +2330,8 @@ void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
// Copy fields in pairs.
for (unsigned i = 0; i < count / 2; i++) {
Ldp(scratch3, scratch4,
MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex));
Stp(scratch3, scratch4,
MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex));
Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
}
// Handle the leftovers.
@ -2361,8 +2359,8 @@ void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
// Copy fields one by one.
for (unsigned i = 0; i < count; i++) {
Ldr(scratch3, MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
Str(scratch3, MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
}
}
@ -2907,7 +2905,7 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
while (!saved_fp_regs.IsEmpty()) {
const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
offset -= 2 * kDRegSizeInBytes;
offset -= 2 * kDRegSize;
Ldp(dst1, dst0, MemOperand(fp, offset));
}
}
@ -2952,7 +2950,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// Reserve space for the return address and for user requested memory.
// We do this before aligning to make sure that we end up correctly
// aligned with the minimum of wasted space.
Claim(extra_space + 1, kXRegSizeInBytes);
Claim(extra_space + 1, kXRegSize);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
@ -2978,7 +2976,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
Add(scratch, csp, kXRegSizeInBytes);
Add(scratch, csp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@ -3120,7 +3118,7 @@ void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Pop(x10);
Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes);
Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
Str(x10, MemOperand(x11));
}
@ -4109,13 +4107,13 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PushSafepointFPRegisters() {
PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
FPRegister::kAllocatableFPRegisters));
}
void MacroAssembler::PopSafepointFPRegisters() {
PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
FPRegister::kAllocatableFPRegisters));
}
@ -4948,10 +4946,10 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
// sequence that patches it needs five, so we use the extra space to try to
// simplify some addressing modes and remove some dependencies (compared to
// using two stp instructions with write-back).
__ sub(jssp, jssp, 4 * kXRegSizeInBytes);
__ sub(csp, csp, 4 * kXRegSizeInBytes);
__ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes));
__ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes));
__ sub(jssp, jssp, 4 * kXRegSize);
__ sub(csp, csp, 4 * kXRegSize);
__ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
__ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
__ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
__ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);

View File

@ -531,8 +531,8 @@ class MacroAssembler : public Assembler {
// and pop instructions).
//
// (Push|Pop)SizeRegList allow you to specify the register size as a
// parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
// supported.
// parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
// kSRegSizeInBits are supported.
//
// Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
void PushCPURegList(CPURegList registers);
@ -547,28 +547,28 @@ class MacroAssembler : public Assembler {
PopCPURegList(CPURegList(type, reg_size, registers));
}
inline void PushXRegList(RegList regs) {
PushSizeRegList(regs, kXRegSize);
PushSizeRegList(regs, kXRegSizeInBits);
}
inline void PopXRegList(RegList regs) {
PopSizeRegList(regs, kXRegSize);
PopSizeRegList(regs, kXRegSizeInBits);
}
inline void PushWRegList(RegList regs) {
PushSizeRegList(regs, kWRegSize);
PushSizeRegList(regs, kWRegSizeInBits);
}
inline void PopWRegList(RegList regs) {
PopSizeRegList(regs, kWRegSize);
PopSizeRegList(regs, kWRegSizeInBits);
}
inline void PushDRegList(RegList regs) {
PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
}
inline void PopDRegList(RegList regs) {
PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
}
inline void PushSRegList(RegList regs) {
PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
}
inline void PopSRegList(RegList regs) {
PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
}
// Push the specified register 'count' times.
@ -654,19 +654,19 @@ class MacroAssembler : public Assembler {
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
inline void Claim(const Register& count,
uint64_t unit_size = kXRegSizeInBytes);
inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
uint64_t unit_size = kXRegSize);
inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count,
uint64_t unit_size = kXRegSizeInBytes);
uint64_t unit_size = kXRegSize);
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
// register.
inline void ClaimBySMI(const Register& count_smi,
uint64_t unit_size = kXRegSizeInBytes);
uint64_t unit_size = kXRegSize);
inline void DropBySMI(const Register& count_smi,
uint64_t unit_size = kXRegSizeInBytes);
uint64_t unit_size = kXRegSize);
// Compare a register with an operand, and branch to label depending on the
// condition. May corrupt the status flags.

View File

@ -199,7 +199,8 @@ void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
break;
case CACHED_MSW:
to_advance = GetCachedRegister(reg);
__ Add(to_advance, to_advance, static_cast<int64_t>(by) << kWRegSize);
__ Add(to_advance, to_advance,
static_cast<int64_t>(by) << kWRegSizeInBits);
break;
default:
UNREACHABLE();
@ -296,7 +297,7 @@ void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
__ Cmp(current_input_offset(), w10);
__ Cset(x11, eq);
__ Add(backtrack_stackpointer(),
backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeInBytesLog2));
backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeLog2));
BranchOrBacktrack(eq, on_equal);
}
@ -315,7 +316,7 @@ void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
ASSERT((start_reg % 2) == 0);
if (start_reg < kNumCachedRegisters) {
__ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
} else {
__ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
}
@ -389,7 +390,7 @@ void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
int argument_count = 4;
// The cached registers need to be retained.
CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
__ PushCPURegList(cached_registers);
@ -441,7 +442,7 @@ void RegExpMacroAssemblerA64::CheckNotBackReference(
ASSERT((start_reg % 2) == 0);
if (start_reg < kNumCachedRegisters) {
__ Mov(x10, GetCachedRegister(start_reg));
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
} else {
__ Ldp(w11, w10, capture_location(start_reg, x10));
}
@ -726,7 +727,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
// Make sure the stack alignment will be respected.
int alignment = masm_->ActivationFrameAlignment();
ASSERT_EQ(alignment % 16, 0);
int align_mask = (alignment / kWRegSizeInBytes) - 1;
int align_mask = (alignment / kWRegSize) - 1;
num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
// Check if we have space on the stack.
@ -744,7 +745,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
// Check if there is room for the variable number of registers above
// the stack limit.
__ Cmp(x10, num_wreg_to_allocate * kWRegSizeInBytes);
__ Cmp(x10, num_wreg_to_allocate * kWRegSize);
__ B(hs, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
@ -760,7 +761,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
__ Bind(&stack_ok);
// Allocate space on stack.
__ Claim(num_wreg_to_allocate, kWRegSizeInBytes);
__ Claim(num_wreg_to_allocate, kWRegSize);
// Initialize success_counter with 0.
__ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
@ -785,7 +786,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
// on-stack registers later.
__ Orr(twice_non_position_value(),
non_position_value().X(),
Operand(non_position_value().X(), LSL, kWRegSize));
Operand(non_position_value().X(), LSL, kWRegSizeInBits));
// Initialize code pointer register.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
@ -851,7 +852,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
(i < num_saved_registers_) && (i < kNumCachedRegisters);
i += 2) {
__ Mov(capture_start.X(), GetCachedRegister(i));
__ Lsr(capture_end.X(), capture_start.X(), kWRegSize);
__ Lsr(capture_end.X(), capture_start.X(), kWRegSizeInBits);
if ((i == 0) && global_with_zero_length_check()) {
// Keep capture start for the zero-length check later.
__ Mov(first_capture_start, capture_start);
@ -1006,7 +1007,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
Label exit_with_exception;
// Registers x0 to x7 are used to store the first captures, they need to be
// retained over calls to C++ code.
CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
if (check_preempt_label_.is_linked()) {
@ -1168,7 +1169,7 @@ void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
break;
case CACHED_MSW:
cached_register = GetCachedRegister(reg);
__ Lsr(current_input_offset().X(), cached_register, kWRegSize);
__ Lsr(current_input_offset().X(), cached_register, kWRegSizeInBits);
break;
default:
UNREACHABLE();
@ -1263,7 +1264,7 @@ void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
STATIC_ASSERT(kNumRegistersToUnroll > 2);
// We position the base pointer to (reg_from + 1).
int base_offset = kFirstRegisterOnStack -
kWRegSizeInBytes - (kWRegSizeInBytes * reg_from);
kWRegSize - (kWRegSize * reg_from);
if (num_registers > kNumRegistersToUnroll) {
Register base = x10;
__ Add(base, frame_pointer(), base_offset);
@ -1279,7 +1280,7 @@ void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
for (int i = reg_from; i <= reg_to; i += 2) {
__ Str(twice_non_position_value(),
MemOperand(frame_pointer(), base_offset));
base_offset -= kWRegSizeInBytes * 2;
base_offset -= kWRegSize * 2;
}
}
}
@ -1429,7 +1430,7 @@ void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
// AAPCS64 requires the stack to be 16 byte aligned.
int alignment = masm_->ActivationFrameAlignment();
ASSERT_EQ(alignment % 16, 0);
int align_mask = (alignment / kXRegSizeInBytes) - 1;
int align_mask = (alignment / kXRegSize) - 1;
int xreg_to_claim = (3 + align_mask) & ~align_mask;
ASSERT(csp.Is(__ StackPointer()));
@ -1541,7 +1542,7 @@ void RegExpMacroAssemblerA64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
__ Str(source,
MemOperand(backtrack_stackpointer(),
-static_cast<int>(kWRegSizeInBytes),
-static_cast<int>(kWRegSize),
PreIndex));
}
@ -1550,13 +1551,13 @@ void RegExpMacroAssemblerA64::Pop(Register target) {
ASSERT(target.Is32Bits());
ASSERT(!target.is(backtrack_stackpointer()));
__ Ldr(target,
MemOperand(backtrack_stackpointer(), kWRegSizeInBytes, PostIndex));
MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
}
Register RegExpMacroAssemblerA64::GetCachedRegister(int register_index) {
ASSERT(register_index < kNumCachedRegisters);
return Register::Create(register_index / 2, kXRegSize);
return Register::Create(register_index / 2, kXRegSizeInBits);
}
@ -1578,7 +1579,8 @@ Register RegExpMacroAssemblerA64::GetRegister(int register_index,
result = GetCachedRegister(register_index).W();
break;
case CACHED_MSW:
__ Lsr(maybe_result.X(), GetCachedRegister(register_index), kWRegSize);
__ Lsr(maybe_result.X(), GetCachedRegister(register_index),
kWRegSizeInBits);
result = maybe_result;
break;
default:
@ -1607,12 +1609,12 @@ void RegExpMacroAssemblerA64::StoreRegister(int register_index,
case CACHED_LSW:
cached_register = GetCachedRegister(register_index);
if (!source.Is(cached_register.W())) {
__ Bfi(cached_register, source.X(), 0, kWRegSize);
__ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
}
break;
case CACHED_MSW:
cached_register = GetCachedRegister(register_index);
__ Bfi(cached_register, source.X(), kWRegSize, kWRegSize);
__ Bfi(cached_register, source.X(), kWRegSizeInBits, kWRegSizeInBits);
break;
default:
UNREACHABLE();
@ -1650,7 +1652,7 @@ MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
num_registers_ = register_index + 1;
}
register_index -= kNumCachedRegisters;
int offset = kFirstRegisterOnStack - register_index * kWRegSizeInBytes;
int offset = kFirstRegisterOnStack - register_index * kWRegSize;
return MemOperand(frame_pointer(), offset);
}
@ -1661,7 +1663,7 @@ MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
ASSERT(register_index >= kNumCachedRegisters);
ASSERT_EQ(register_index % 2, 0);
register_index -= kNumCachedRegisters;
int offset = kFirstCaptureOnStack - register_index * kWRegSizeInBytes;
int offset = kFirstCaptureOnStack - register_index * kWRegSize;
// capture_location is used with Stp instructions to load/store 2 registers.
// The immediate field in the encoding is limited to 7 bits (signed).
if (is_int7(offset)) {

View File

@ -144,9 +144,9 @@ class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
static const int kSuccessCounter = kInput - kPointerSize;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
// A capture is a 64 bit value holding two position.
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

View File

@ -306,8 +306,8 @@ void Simulator::CorruptAllCallerSavedCPURegisters() {
// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
// TODO(all): Insert a marker in the extra space allocated on the stack.
uintptr_t Simulator::PushAddress(uintptr_t address) {
ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
intptr_t new_sp = sp() - 2 * kXRegSizeInBytes;
ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
intptr_t new_sp = sp() - 2 * kXRegSize;
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
*stack_slot = address;
set_sp(new_sp);
@ -319,8 +319,8 @@ uintptr_t Simulator::PopAddress() {
intptr_t current_sp = sp();
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
uintptr_t address = *stack_slot;
ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
set_sp(current_sp + 2 * kXRegSizeInBytes);
ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
set_sp(current_sp + 2 * kXRegSize);
return address;
}
@ -614,7 +614,7 @@ int64_t Simulator::AddWithCarry(unsigned reg_size,
int64_t src2,
int64_t carry_in) {
ASSERT((carry_in == 0) || (carry_in == 1));
ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
uint64_t u1, u2;
int64_t result;
@ -622,7 +622,7 @@ int64_t Simulator::AddWithCarry(unsigned reg_size,
uint32_t N, Z, C, V;
if (reg_size == kWRegSize) {
if (reg_size == kWRegSizeInBits) {
u1 = static_cast<uint64_t>(src1) & kWRegMask;
u2 = static_cast<uint64_t>(src2) & kWRegMask;
@ -632,9 +632,9 @@ int64_t Simulator::AddWithCarry(unsigned reg_size,
((kWMaxUInt - u1 - carry_in) < u2);
// Overflow iff the sign bit is the same for the two inputs and different
// for the result.
int64_t s_src1 = src1 << (kXRegSize - kWRegSize);
int64_t s_src2 = src2 << (kXRegSize - kWRegSize);
int64_t s_result = result << (kXRegSize - kWRegSize);
int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits);
int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits);
int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits);
V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
} else {
@ -670,7 +670,7 @@ int64_t Simulator::ShiftOperand(unsigned reg_size,
if (amount == 0) {
return value;
}
int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
switch (shift_type) {
case LSL:
return (value << amount) & mask;
@ -678,13 +678,13 @@ int64_t Simulator::ShiftOperand(unsigned reg_size,
return static_cast<uint64_t>(value) >> amount;
case ASR: {
// Shift used to restore the sign.
unsigned s_shift = kXRegSize - reg_size;
unsigned s_shift = kXRegSizeInBits - reg_size;
// Value with its sign restored.
int64_t s_value = (value << s_shift) >> s_shift;
return (s_value >> amount) & mask;
}
case ROR: {
if (reg_size == kWRegSize) {
if (reg_size == kWRegSizeInBits) {
value &= kWRegMask;
}
return (static_cast<uint64_t>(value) >> amount) |
@ -726,7 +726,7 @@ int64_t Simulator::ExtendValue(unsigned reg_size,
default:
UNREACHABLE();
}
int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
return (value << left_shift) & mask;
}
@ -1056,7 +1056,8 @@ void Simulator::VisitCompareBranch(Instruction* instr) {
void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
bool set_flags = instr->FlagsUpdate();
int64_t new_val = 0;
Instr operation = instr->Mask(AddSubOpMask);
@ -1087,7 +1088,8 @@ void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
void Simulator::VisitAddSubShifted(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t op2 = ShiftOperand(reg_size,
reg(reg_size, instr->Rm()),
static_cast<Shift>(instr->ShiftDP()),
@ -1103,7 +1105,8 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
void Simulator::VisitAddSubExtended(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t op2 = ExtendValue(reg_size,
reg(reg_size, instr->Rm()),
static_cast<Extend>(instr->ExtendMode()),
@ -1113,7 +1116,8 @@ void Simulator::VisitAddSubExtended(Instruction* instr) {
void Simulator::VisitAddSubWithCarry(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t op2 = reg(reg_size, instr->Rm());
int64_t new_val;
@ -1132,7 +1136,8 @@ void Simulator::VisitAddSubWithCarry(Instruction* instr) {
void Simulator::VisitLogicalShifted(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
Shift shift_type = static_cast<Shift>(instr->ShiftDP());
unsigned shift_amount = instr->ImmDPShift();
int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
@ -1150,7 +1155,8 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t op1 = reg(reg_size, instr->Rn());
int64_t result = 0;
bool update_flags = false;
@ -1178,7 +1184,8 @@ void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
}
@ -1189,7 +1196,8 @@ void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t op1 = reg(reg_size, instr->Rn());
if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
@ -1234,7 +1242,7 @@ void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext,
shift_amount);
LoadStoreHelper(instr, offset, Offset);
}
@ -1275,23 +1283,28 @@ void Simulator::LoadStoreHelper(Instruction* instr,
case STR_w:
case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
case LDRSB_w: {
set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead8(address), SXTB));
set_wreg(srcdst,
ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB));
break;
}
case LDRSB_x: {
set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead8(address), SXTB));
set_xreg(srcdst,
ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB));
break;
}
case LDRSH_w: {
set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead16(address), SXTH));
set_wreg(srcdst,
ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH));
break;
}
case LDRSH_x: {
set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead16(address), SXTH));
set_xreg(srcdst,
ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH));
break;
}
case LDRSW_x: {
set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
set_xreg(srcdst,
ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
break;
}
case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
@ -1372,48 +1385,48 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
switch (op) {
case LDP_w: {
set_wreg(rt, MemoryRead32(address));
set_wreg(rt2, MemoryRead32(address + kWRegSizeInBytes));
set_wreg(rt2, MemoryRead32(address + kWRegSize));
break;
}
case LDP_s: {
set_sreg(rt, MemoryReadFP32(address));
set_sreg(rt2, MemoryReadFP32(address + kSRegSizeInBytes));
set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
break;
}
case LDP_x: {
set_xreg(rt, MemoryRead64(address));
set_xreg(rt2, MemoryRead64(address + kXRegSizeInBytes));
set_xreg(rt2, MemoryRead64(address + kXRegSize));
break;
}
case LDP_d: {
set_dreg(rt, MemoryReadFP64(address));
set_dreg(rt2, MemoryReadFP64(address + kDRegSizeInBytes));
set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
break;
}
case LDPSW_x: {
set_xreg(rt, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
set_xreg(rt2, ExtendValue(kXRegSize,
MemoryRead32(address + kWRegSizeInBytes), SXTW));
set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
set_xreg(rt2, ExtendValue(kXRegSizeInBits,
MemoryRead32(address + kWRegSize), SXTW));
break;
}
case STP_w: {
MemoryWrite32(address, wreg(rt));
MemoryWrite32(address + kWRegSizeInBytes, wreg(rt2));
MemoryWrite32(address + kWRegSize, wreg(rt2));
break;
}
case STP_s: {
MemoryWriteFP32(address, sreg(rt));
MemoryWriteFP32(address + kSRegSizeInBytes, sreg(rt2));
MemoryWriteFP32(address + kSRegSize, sreg(rt2));
break;
}
case STP_x: {
MemoryWrite64(address, xreg(rt));
MemoryWrite64(address + kXRegSizeInBytes, xreg(rt2));
MemoryWrite64(address + kXRegSize, xreg(rt2));
break;
}
case STP_d: {
MemoryWriteFP64(address, dreg(rt));
MemoryWriteFP64(address + kDRegSizeInBytes, dreg(rt2));
MemoryWriteFP64(address + kDRegSize, dreg(rt2));
break;
}
default: UNREACHABLE();
@ -1624,7 +1637,8 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
default: UNIMPLEMENTED();
}
}
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
set_reg(reg_size, instr->Rd(), new_val);
}
@ -1634,21 +1648,23 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned src = instr->Rn();
switch (instr->Mask(DataProcessing1SourceMask)) {
case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSize)); break;
case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSize)); break;
case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSize)); break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSize)); break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
break;
case CLS_w: {
set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSize));
set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
break;
}
case CLS_x: {
set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSize));
set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
break;
}
default: UNIMPLEMENTED();
@ -1657,7 +1673,7 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
ASSERT((num_bits == kWRegSize) || (num_bits == kXRegSize));
ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
uint64_t result = 0;
for (unsigned i = 0; i < num_bits; i++) {
result = (result << 1) | (value & 1);
@ -1762,7 +1778,8 @@ void Simulator::VisitDataProcessing2Source(Instruction* instr) {
default: UNIMPLEMENTED();
}
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
if (shift_op != NO_SHIFT) {
// Shift distance encoded in the least-significant five/six bits of the
// register.
@ -1798,7 +1815,8 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
void Simulator::VisitDataProcessing3Source(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t result = 0;
// Extract and sign- or zero-extend 32-bit arguments for widening operations.
@ -1830,7 +1848,8 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
void Simulator::VisitBitfield(Instruction* instr) {
unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
: kWRegSizeInBits;
int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
int64_t R = instr->ImmR();
int64_t S = instr->ImmS();
@ -1884,8 +1903,8 @@ void Simulator::VisitBitfield(Instruction* instr) {
void Simulator::VisitExtract(Instruction* instr) {
unsigned lsb = instr->ImmS();
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
: kWRegSize;
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
: kWRegSizeInBits;
set_reg(reg_size,
instr->Rd(),
(static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
@ -2081,7 +2100,8 @@ uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
void Simulator::VisitFPCompare(Instruction* instr) {
AssertSupportedFPCR();
unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
unsigned reg_size = instr->FPType() == FP32 ? kSRegSizeInBits
: kDRegSizeInBits;
double fn_val = fpreg(reg_size, instr->Rn());
switch (instr->Mask(FPCompareMask)) {
@ -2103,7 +2123,8 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) {
if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
// If the condition passes, set the status flags to the result of
// comparing the operands.
unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
unsigned reg_size = instr->FPType() == FP32 ? kSRegSizeInBits
: kDRegSizeInBits;
FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
} else {
// If the condition fails, set the status flags to the nzcv immediate.

View File

@ -186,8 +186,8 @@ class SimRegisterBase {
protected:
uint8_t value_[kSizeInBytes];
};
typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
@ -358,13 +358,14 @@ class Simulator : public DecoderVisitor {
// Return 'size' bits of the value of an integer register, as the specified
// type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kXRegSize and kWRegSize.
// The only supported values of 'size' are kXRegSizeInBits and
// kWRegSizeInBits.
template<typename T>
T reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSize) || (size == kWRegSize));
ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
@ -400,13 +401,14 @@ class Simulator : public DecoderVisitor {
// Write 'size' bits of 'value' into an integer register. The value is
// zero-extended. This behaviour matches AArch64 register writes.
//
// The only supported values of 'size' are kXRegSize and kWRegSize.
// The only supported values of 'size' are kXRegSizeInBits and
// kWRegSizeInBits.
template<typename T>
void set_reg(unsigned size, unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSize) || (size == kWRegSize));
ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
@ -425,12 +427,12 @@ class Simulator : public DecoderVisitor {
// Common specialized accessors for the set_reg() template.
void set_wreg(unsigned code, int32_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kWRegSize, code, value, r31mode);
set_reg(kWRegSizeInBits, code, value, r31mode);
}
void set_xreg(unsigned code, int64_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kXRegSize, code, value, r31mode);
set_reg(kXRegSizeInBits, code, value, r31mode);
}
// Commonly-used special cases.
@ -458,12 +460,13 @@ class Simulator : public DecoderVisitor {
// Return 'size' bits of the value of a floating-point register, as the
// specified type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kDRegSize and kSRegSize.
// The only supported values of 'size' are kDRegSizeInBits and
// kSRegSizeInBits.
template<typename T>
T fpreg(unsigned size, unsigned code) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kDRegSize) || (size == kSRegSize));
ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits));
ASSERT(code < kNumberOfFPRegisters);
return fpregisters_[code].Get<T>(size_in_bytes);
}
@ -493,8 +496,8 @@ class Simulator : public DecoderVisitor {
double fpreg(unsigned size, unsigned code) const {
switch (size) {
case kSRegSize: return sreg(code);
case kDRegSize: return dreg(code);
case kSRegSizeInBits: return sreg(code);
case kDRegSizeInBits: return dreg(code);
default:
UNREACHABLE();
return 0.0;
@ -505,8 +508,7 @@ class Simulator : public DecoderVisitor {
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
ASSERT((sizeof(value) == kDRegSizeInBytes) ||
(sizeof(value) == kSRegSizeInBytes));
ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
ASSERT(code < kNumberOfFPRegisters);
fpregisters_[code].Set(value, sizeof(value));
}

View File

@ -7150,7 +7150,7 @@ static void TestUScvtfHelper(uint64_t in,
// Corrupt the top word, in case it is accidentally used during W-register
// conversions.
__ Mov(x11, 0x5555555555555555);
__ Bfi(x11, x10, 0, kWRegSize);
__ Bfi(x11, x10, 0, kWRegSizeInBits);
// Test integer conversions.
__ Scvtf(d0, x10);
@ -7168,10 +7168,10 @@ static void TestUScvtfHelper(uint64_t in,
__ Ucvtf(d1, x10, fbits);
__ Scvtf(d2, w11, fbits);
__ Ucvtf(d3, w11, fbits);
__ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
__ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
__ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes));
__ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes));
__ Str(d0, MemOperand(x0, fbits * kDRegSize));
__ Str(d1, MemOperand(x1, fbits * kDRegSize));
__ Str(d2, MemOperand(x2, fbits * kDRegSize));
__ Str(d3, MemOperand(x3, fbits * kDRegSize));
}
// Conversions from W registers can only handle fbits values <= 32, so just
@ -7179,8 +7179,8 @@ static void TestUScvtfHelper(uint64_t in,
for (int fbits = 33; fbits <= 64; fbits++) {
__ Scvtf(d0, x10, fbits);
__ Ucvtf(d1, x10, fbits);
__ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
__ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
__ Str(d0, MemOperand(x0, fbits * kDRegSize));
__ Str(d1, MemOperand(x1, fbits * kDRegSize));
}
END();
@ -7305,7 +7305,7 @@ static void TestUScvtf32Helper(uint64_t in,
// Corrupt the top word, in case it is accidentally used during W-register
// conversions.
__ Mov(x11, 0x5555555555555555);
__ Bfi(x11, x10, 0, kWRegSize);
__ Bfi(x11, x10, 0, kWRegSizeInBits);
// Test integer conversions.
__ Scvtf(s0, x10);
@ -7323,10 +7323,10 @@ static void TestUScvtf32Helper(uint64_t in,
__ Ucvtf(s1, x10, fbits);
__ Scvtf(s2, w11, fbits);
__ Ucvtf(s3, w11, fbits);
__ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
__ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
__ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes));
__ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes));
__ Str(s0, MemOperand(x0, fbits * kSRegSize));
__ Str(s1, MemOperand(x1, fbits * kSRegSize));
__ Str(s2, MemOperand(x2, fbits * kSRegSize));
__ Str(s3, MemOperand(x3, fbits * kSRegSize));
}
// Conversions from W registers can only handle fbits values <= 32, so just
@ -7334,8 +7334,8 @@ static void TestUScvtf32Helper(uint64_t in,
for (int fbits = 33; fbits <= 64; fbits++) {
__ Scvtf(s0, x10, fbits);
__ Ucvtf(s1, x10, fbits);
__ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
__ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
__ Str(s0, MemOperand(x0, fbits * kSRegSize));
__ Str(s1, MemOperand(x1, fbits * kSRegSize));
}
END();
@ -7981,7 +7981,7 @@ TEST(peek_poke_mixed) {
__ SetStackPointer(x4);
__ Poke(wzr, 0); // Clobber the space we're about to drop.
__ Drop(1, kWRegSizeInBytes);
__ Drop(1, kWRegSize);
__ Peek(x6, 0);
__ Claim(1);
__ Peek(w7, 10);
@ -8163,23 +8163,23 @@ TEST(push_pop_jssp_simple_32) {
INIT_V8();
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
PushPopJsspSimpleHelper(count, claim, kWRegSize,
PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopJsspSimpleHelper(count, claim, kWRegSize,
PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopJsspSimpleHelper(count, claim, kWRegSize,
PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopJsspSimpleHelper(count, claim, kWRegSize,
PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
PushPopRegList, PushPopRegList);
}
// Test with the maximum number of registers.
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
PushPopRegList, PushPopRegList);
}
}
@ -8189,23 +8189,23 @@ TEST(push_pop_jssp_simple_64) {
INIT_V8();
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
PushPopJsspSimpleHelper(count, claim, kXRegSize,
PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopJsspSimpleHelper(count, claim, kXRegSize,
PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopJsspSimpleHelper(count, claim, kXRegSize,
PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopJsspSimpleHelper(count, claim, kXRegSize,
PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
PushPopRegList, PushPopRegList);
}
// Test with the maximum number of registers.
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
PushPopRegList, PushPopRegList);
}
}
@ -8346,23 +8346,23 @@ TEST(push_pop_fp_jssp_simple_32) {
INIT_V8();
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
PushPopRegList, PushPopRegList);
}
// Test with the maximum number of registers.
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
PushPopRegList, PushPopRegList);
}
}
@ -8372,23 +8372,23 @@ TEST(push_pop_fp_jssp_simple_64) {
INIT_V8();
for (int claim = 0; claim <= 8; claim++) {
for (int count = 0; count <= 8; count++) {
PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
PushPopRegList, PushPopRegList);
}
// Test with the maximum number of registers.
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
PushPopByFour, PushPopByFour);
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
PushPopByFour, PushPopRegList);
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
PushPopRegList, PushPopByFour);
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
PushPopRegList, PushPopRegList);
}
}
@ -8486,7 +8486,7 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
TEST(push_pop_jssp_mixed_methods_64) {
INIT_V8();
for (int claim = 0; claim <= 8; claim++) {
PushPopJsspMixedMethodsHelper(claim, kXRegSize);
PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
}
}
@ -8494,7 +8494,7 @@ TEST(push_pop_jssp_mixed_methods_64) {
TEST(push_pop_jssp_mixed_methods_32) {
INIT_V8();
for (int claim = 0; claim <= 8; claim++) {
PushPopJsspMixedMethodsHelper(claim, kWRegSize);
PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
}
}
@ -8633,7 +8633,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
// Because we were pushing several registers at a time, we probably pushed
// more than we needed to.
if (active_w_slots > requested_w_slots) {
__ Drop(active_w_slots - requested_w_slots, kWRegSizeInBytes);
__ Drop(active_w_slots - requested_w_slots, kWRegSize);
// Bump the number of active W-sized slots back to where it should be,
// and fill the empty space with a dummy value.
do {
@ -8848,8 +8848,8 @@ TEST(push_queued) {
// Actually push them.
queue.PushQueued();
Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6));
Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2));
Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
// Pop them conventionally.
__ Pop(s2);
@ -8926,8 +8926,8 @@ TEST(pop_queued) {
queue.Queue(x1);
queue.Queue(x0);
Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6));
Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2));
Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
// Actually pop them.
queue.PopQueued();
@ -9419,10 +9419,10 @@ TEST(cpureglist_utils_empty) {
// Test an empty list.
// Empty lists can have type and size properties. Check that we can create
// them, and that they are empty.
CPURegList reg32(CPURegister::kRegister, kWRegSize, 0);
CPURegList reg64(CPURegister::kRegister, kXRegSize, 0);
CPURegList fpreg32(CPURegister::kFPRegister, kSRegSize, 0);
CPURegList fpreg64(CPURegister::kFPRegister, kDRegSize, 0);
CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
CHECK(reg32.IsEmpty());
CHECK(reg64.IsEmpty());

View File

@ -221,10 +221,10 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
r[i] = Register::Create(n, reg_size);
}
if (x) {
x[i] = Register::Create(n, kXRegSize);
x[i] = Register::Create(n, kXRegSizeInBits);
}
if (w) {
w[i] = Register::Create(n, kWRegSize);
w[i] = Register::Create(n, kWRegSizeInBits);
}
list |= (1UL << n);
i++;
@ -248,10 +248,10 @@ RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
v[i] = FPRegister::Create(n, reg_size);
}
if (d) {
d[i] = FPRegister::Create(n, kDRegSize);
d[i] = FPRegister::Create(n, kDRegSizeInBits);
}
if (s) {
s[i] = FPRegister::Create(n, kSRegSize);
s[i] = FPRegister::Create(n, kSRegSizeInBits);
}
list |= (1UL << n);
i++;
@ -268,7 +268,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
Register first = NoReg;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
if (reg_list & (1UL << i)) {
Register xn = Register::Create(i, kXRegSize);
Register xn = Register::Create(i, kXRegSizeInBits);
// We should never write into csp here.
ASSERT(!xn.Is(csp));
if (!xn.IsZero()) {
@ -291,7 +291,7 @@ void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
FPRegister first = NoFPReg;
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
if (reg_list & (1UL << i)) {
FPRegister dn = FPRegister::Create(i, kDRegSize);
FPRegister dn = FPRegister::Create(i, kDRegSizeInBits);
if (!first.IsValid()) {
// This is the first register we've hit, so construct the literal.
__ Fmov(dn, value);
@ -354,37 +354,37 @@ void RegisterDump::Dump(MacroAssembler* masm) {
// The stack pointer cannot be stored directly; it needs to be moved into
// another register first. Also, we pushed four X registers, so we need to
// compensate here.
__ Add(tmp, csp, 4 * kXRegSizeInBytes);
__ Add(tmp, csp, 4 * kXRegSize);
__ Str(tmp, MemOperand(dump_base, sp_offset));
__ Add(tmp_w, wcsp, 4 * kXRegSizeInBytes);
__ Add(tmp_w, wcsp, 4 * kXRegSize);
__ Str(tmp_w, MemOperand(dump_base, wsp_offset));
// Dump X registers.
__ Add(dump, dump_base, x_offset);
for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
__ Stp(Register::XRegFromCode(i), Register::XRegFromCode(i + 1),
MemOperand(dump, i * kXRegSizeInBytes));
MemOperand(dump, i * kXRegSize));
}
// Dump W registers.
__ Add(dump, dump_base, w_offset);
for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
__ Stp(Register::WRegFromCode(i), Register::WRegFromCode(i + 1),
MemOperand(dump, i * kWRegSizeInBytes));
MemOperand(dump, i * kWRegSize));
}
// Dump D registers.
__ Add(dump, dump_base, d_offset);
for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
__ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
MemOperand(dump, i * kDRegSizeInBytes));
MemOperand(dump, i * kDRegSize));
}
// Dump S registers.
__ Add(dump, dump_base, s_offset);
for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
__ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
MemOperand(dump, i * kSRegSizeInBytes));
MemOperand(dump, i * kSRegSize));
}
// Dump the flags.
@ -404,18 +404,18 @@ void RegisterDump::Dump(MacroAssembler* masm) {
__ Pop(tmp, dump, dump_base, xzr);
__ Add(dump2, dump2_base, w_offset);
__ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSizeInBytes));
__ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSizeInBytes));
__ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSizeInBytes));
__ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSize));
__ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSize));
__ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSize));
__ Add(dump2, dump2_base, x_offset);
__ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSizeInBytes));
__ Str(dump, MemOperand(dump2, dump.code() * kXRegSizeInBytes));
__ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSizeInBytes));
__ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSize));
__ Str(dump, MemOperand(dump2, dump.code() * kXRegSize));
__ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSize));
// Finally, restore dump2_base and dump2.
__ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSizeInBytes));
__ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSizeInBytes));
__ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSize));
__ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSize));
// Restore the MacroAssembler's scratch registers.
masm->TmpList()->set_list(old_tmp_list);

View File

@ -162,12 +162,12 @@ class RegisterDump {
} dump_;
static dump_t for_sizeof();
STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kDRegSizeInBytes);
STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kSRegSizeInBytes);
STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kXRegSizeInBytes);
STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kWRegSizeInBytes);
STATIC_ASSERT(sizeof(for_sizeof().x_[0]) == kXRegSizeInBytes);
STATIC_ASSERT(sizeof(for_sizeof().w_[0]) == kWRegSizeInBytes);
STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kDRegSize);
STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kSRegSize);
STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kXRegSize);
STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kWRegSize);
STATIC_ASSERT(sizeof(for_sizeof().x_[0]) == kXRegSize);
STATIC_ASSERT(sizeof(for_sizeof().w_[0]) == kWRegSize);
};
// Some of these methods don't use the RegisterDump argument, but they have to