ARM64: Clean up support for explicit literal load.
This is the first patch to improve literal pool handling in arm64. Cleans up assembler/macro-assembler access to literal pools. BUG= R=rmcilroy@chromium.org Review URL: https://codereview.chromium.org/318773009 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21723 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
b06b63bbff
commit
7ff3e3c86b
@ -261,29 +261,23 @@ inline FPRegister CPURegister::D() const {
|
||||
}
|
||||
|
||||
|
||||
// Operand.
|
||||
template<typename T>
|
||||
Operand::Operand(Handle<T> value) : reg_(NoReg) {
|
||||
initialize_handle(value);
|
||||
}
|
||||
|
||||
|
||||
// Immediate.
|
||||
// Default initializer is for int types
|
||||
template<typename int_t>
|
||||
struct OperandInitializer {
|
||||
template<typename T>
|
||||
struct ImmediateInitializer {
|
||||
static const bool kIsIntType = true;
|
||||
static inline RelocInfo::Mode rmode_for(int_t) {
|
||||
return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
|
||||
static inline RelocInfo::Mode rmode_for(T) {
|
||||
return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
|
||||
}
|
||||
static inline int64_t immediate_for(int_t t) {
|
||||
STATIC_ASSERT(sizeof(int_t) <= 8);
|
||||
static inline int64_t immediate_for(T t) {
|
||||
STATIC_ASSERT(sizeof(T) <= 8);
|
||||
return t;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<>
|
||||
struct OperandInitializer<Smi*> {
|
||||
struct ImmediateInitializer<Smi*> {
|
||||
static const bool kIsIntType = false;
|
||||
static inline RelocInfo::Mode rmode_for(Smi* t) {
|
||||
return RelocInfo::NONE64;
|
||||
@ -295,7 +289,7 @@ struct OperandInitializer<Smi*> {
|
||||
|
||||
|
||||
template<>
|
||||
struct OperandInitializer<ExternalReference> {
|
||||
struct ImmediateInitializer<ExternalReference> {
|
||||
static const bool kIsIntType = false;
|
||||
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
|
||||
return RelocInfo::EXTERNAL_REFERENCE;
|
||||
@ -307,27 +301,46 @@ struct OperandInitializer<ExternalReference> {
|
||||
|
||||
|
||||
template<typename T>
|
||||
Operand::Operand(T t)
|
||||
: immediate_(OperandInitializer<T>::immediate_for(t)),
|
||||
reg_(NoReg),
|
||||
rmode_(OperandInitializer<T>::rmode_for(t)) {}
|
||||
Immediate::Immediate(Handle<T> value) {
|
||||
InitializeHandle(value);
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
Immediate::Immediate(T t)
|
||||
: value_(ImmediateInitializer<T>::immediate_for(t)),
|
||||
rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
|
||||
|
||||
|
||||
template<typename T>
|
||||
Immediate::Immediate(T t, RelocInfo::Mode rmode)
|
||||
: value_(ImmediateInitializer<T>::immediate_for(t)),
|
||||
rmode_(rmode) {
|
||||
STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
|
||||
}
|
||||
|
||||
|
||||
// Operand.
|
||||
template<typename T>
|
||||
Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
|
||||
|
||||
|
||||
template<typename T>
|
||||
Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
|
||||
|
||||
|
||||
template<typename T>
|
||||
Operand::Operand(T t, RelocInfo::Mode rmode)
|
||||
: immediate_(OperandInitializer<T>::immediate_for(t)),
|
||||
reg_(NoReg),
|
||||
rmode_(rmode) {
|
||||
STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
|
||||
}
|
||||
: immediate_(t, rmode),
|
||||
reg_(NoReg) {}
|
||||
|
||||
|
||||
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
|
||||
: reg_(reg),
|
||||
: immediate_(0),
|
||||
reg_(reg),
|
||||
shift_(shift),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(shift_amount),
|
||||
rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
|
||||
shift_amount_(shift_amount) {
|
||||
ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
|
||||
ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
|
||||
ASSERT(!reg.IsSP());
|
||||
@ -335,11 +348,11 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
|
||||
|
||||
|
||||
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
|
||||
: reg_(reg),
|
||||
: immediate_(0),
|
||||
reg_(reg),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(extend),
|
||||
shift_amount_(shift_amount),
|
||||
rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
|
||||
shift_amount_(shift_amount) {
|
||||
ASSERT(reg.IsValid());
|
||||
ASSERT(shift_amount <= 4);
|
||||
ASSERT(!reg.IsSP());
|
||||
@ -366,7 +379,7 @@ bool Operand::IsExtendedRegister() const {
|
||||
|
||||
bool Operand::IsZero() const {
|
||||
if (IsImmediate()) {
|
||||
return immediate() == 0;
|
||||
return ImmediateValue() == 0;
|
||||
} else {
|
||||
return reg().IsZero();
|
||||
}
|
||||
@ -380,12 +393,18 @@ Operand Operand::ToExtendedRegister() const {
|
||||
}
|
||||
|
||||
|
||||
int64_t Operand::immediate() const {
|
||||
Immediate Operand::immediate() const {
|
||||
ASSERT(IsImmediate());
|
||||
return immediate_;
|
||||
}
|
||||
|
||||
|
||||
int64_t Operand::ImmediateValue() const {
|
||||
ASSERT(IsImmediate());
|
||||
return immediate_.value();
|
||||
}
|
||||
|
||||
|
||||
Register Operand::reg() const {
|
||||
ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return reg_;
|
||||
@ -473,7 +492,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
|
||||
ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
|
||||
if (offset.IsImmediate()) {
|
||||
offset_ = offset.immediate();
|
||||
offset_ = offset.ImmediateValue();
|
||||
|
||||
regoffset_ = NoReg;
|
||||
} else if (offset.IsShiftedRegister()) {
|
||||
@ -944,6 +963,16 @@ LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
|
||||
}
|
||||
|
||||
|
||||
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
|
||||
if (rt.IsRegister()) {
|
||||
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
|
||||
} else {
|
||||
ASSERT(rt.IsFPRegister());
|
||||
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
|
||||
ASSERT(kStartOfLabelLinkChain == 0);
|
||||
int offset = LinkAndGetByteOffsetTo(label);
|
||||
@ -1200,11 +1229,6 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
|
||||
LoadRelocatedValue(rt, operand, LDR_x_lit);
|
||||
}
|
||||
|
||||
|
||||
inline void Assembler::CheckBufferSpace() {
|
||||
ASSERT(pc_ < (buffer_ + buffer_size_));
|
||||
if (buffer_space() < kGap) {
|
||||
|
@ -268,29 +268,31 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
|
||||
}
|
||||
|
||||
|
||||
void Operand::initialize_handle(Handle<Object> handle) {
|
||||
void Immediate::InitializeHandle(Handle<Object> handle) {
|
||||
AllowDeferredHandleDereference using_raw_address;
|
||||
|
||||
// Verify all Objects referred by code are NOT in new space.
|
||||
Object* obj = *handle;
|
||||
if (obj->IsHeapObject()) {
|
||||
ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
|
||||
immediate_ = reinterpret_cast<intptr_t>(handle.location());
|
||||
value_ = reinterpret_cast<intptr_t>(handle.location());
|
||||
rmode_ = RelocInfo::EMBEDDED_OBJECT;
|
||||
} else {
|
||||
STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
|
||||
immediate_ = reinterpret_cast<intptr_t>(obj);
|
||||
value_ = reinterpret_cast<intptr_t>(obj);
|
||||
rmode_ = RelocInfo::NONE64;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Operand::NeedsRelocation(const Assembler* assembler) const {
|
||||
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
RelocInfo::Mode rmode = immediate_.rmode();
|
||||
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
return assembler->serializer_enabled();
|
||||
}
|
||||
|
||||
return !RelocInfo::IsNone(rmode_);
|
||||
return !RelocInfo::IsNone(rmode);
|
||||
}
|
||||
|
||||
|
||||
@ -1473,27 +1475,23 @@ void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::ldr(const Register& rt, uint64_t imm) {
|
||||
// TODO(all): Constant pool may be garbage collected. Hence we cannot store
|
||||
// arbitrary values in them. Manually move it for now. Fix
|
||||
// MacroAssembler::Fmov when this is implemented.
|
||||
UNIMPLEMENTED();
|
||||
void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
|
||||
// The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
|
||||
// constant pool. It should not be emitted.
|
||||
ASSERT(!rt.IsZero());
|
||||
Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::ldr(const FPRegister& ft, double imm) {
|
||||
// TODO(all): Constant pool may be garbage collected. Hence we cannot store
|
||||
// arbitrary values in them. Manually move it for now. Fix
|
||||
// MacroAssembler::Fmov when this is implemented.
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
|
||||
// Currently we only support 64-bit literals.
|
||||
ASSERT(rt.Is64Bits());
|
||||
|
||||
|
||||
void Assembler::ldr(const FPRegister& ft, float imm) {
|
||||
// TODO(all): Constant pool may be garbage collected. Hence we cannot store
|
||||
// arbitrary values in them. Manually move it for now. Fix
|
||||
// MacroAssembler::Fmov when this is implemented.
|
||||
UNIMPLEMENTED();
|
||||
RecordRelocInfo(imm.rmode(), imm.value());
|
||||
BlockConstPoolFor(1);
|
||||
// The load will be patched when the constpool is emitted, patching code
|
||||
// expect a load literal with offset 0.
|
||||
ldr_pcrel(rt, 0);
|
||||
}
|
||||
|
||||
|
||||
@ -1919,7 +1917,7 @@ void Assembler::AddSub(const Register& rd,
|
||||
ASSERT(rd.SizeInBits() == rn.SizeInBits());
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
int64_t immediate = operand.ImmediateValue();
|
||||
ASSERT(IsImmAddSub(immediate));
|
||||
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
|
||||
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
|
||||
@ -2015,7 +2013,7 @@ void Assembler::Logical(const Register& rd,
|
||||
ASSERT(rd.SizeInBits() == rn.SizeInBits());
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
int64_t immediate = operand.ImmediateValue();
|
||||
unsigned reg_size = rd.SizeInBits();
|
||||
|
||||
ASSERT(immediate != 0);
|
||||
@ -2067,7 +2065,7 @@ void Assembler::ConditionalCompare(const Register& rn,
|
||||
Instr ccmpop;
|
||||
ASSERT(!operand.NeedsRelocation(this));
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
int64_t immediate = operand.ImmediateValue();
|
||||
ASSERT(IsImmConditionalCompare(immediate));
|
||||
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
|
||||
} else {
|
||||
@ -2269,28 +2267,6 @@ bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
|
||||
ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
|
||||
// The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
|
||||
// constant pool. It should not be emitted.
|
||||
ASSERT(!rt.Is(xzr));
|
||||
Emit(LDR_x_lit |
|
||||
ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
|
||||
Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::LoadRelocatedValue(const CPURegister& rt,
|
||||
const Operand& operand,
|
||||
LoadLiteralOp op) {
|
||||
int64_t imm = operand.immediate();
|
||||
ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
|
||||
RecordRelocInfo(operand.rmode(), imm);
|
||||
BlockConstPoolFor(1);
|
||||
Emit(op | ImmLLiteral(0) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
// Test if a given value can be encoded in the immediate field of a logical
|
||||
// instruction.
|
||||
// If it can be encoded, the function returns true, and values pointed to by n,
|
||||
|
@ -599,6 +599,31 @@ class CPURegList {
|
||||
#define kCallerSaved CPURegList::GetCallerSaved()
|
||||
#define kCallerSavedFP CPURegList::GetCallerSavedFP()
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Immediates.
|
||||
class Immediate {
|
||||
public:
|
||||
template<typename T>
|
||||
inline explicit Immediate(Handle<T> handle);
|
||||
|
||||
// This is allowed to be an implicit constructor because Immediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
template<typename T>
|
||||
inline Immediate(T value); // NOLINT(runtime/explicit)
|
||||
|
||||
template<typename T>
|
||||
inline Immediate(T value, RelocInfo::Mode rmode);
|
||||
|
||||
int64_t value() const { return value_; }
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
private:
|
||||
void InitializeHandle(Handle<Object> value);
|
||||
|
||||
int64_t value_;
|
||||
RelocInfo::Mode rmode_;
|
||||
};
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Operands.
|
||||
@ -634,8 +659,8 @@ class Operand {
|
||||
inline Operand(T t); // NOLINT(runtime/explicit)
|
||||
|
||||
// Implicit constructor for int types.
|
||||
template<typename int_t>
|
||||
inline Operand(int_t t, RelocInfo::Mode rmode);
|
||||
template<typename T>
|
||||
inline Operand(T t, RelocInfo::Mode rmode);
|
||||
|
||||
inline bool IsImmediate() const;
|
||||
inline bool IsShiftedRegister() const;
|
||||
@ -646,15 +671,14 @@ class Operand {
|
||||
// which helps in the encoding of instructions that use the stack pointer.
|
||||
inline Operand ToExtendedRegister() const;
|
||||
|
||||
inline int64_t immediate() const;
|
||||
inline Immediate immediate() const;
|
||||
inline int64_t ImmediateValue() const;
|
||||
inline Register reg() const;
|
||||
inline Shift shift() const;
|
||||
inline Extend extend() const;
|
||||
inline unsigned shift_amount() const;
|
||||
|
||||
// Relocation information.
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
|
||||
bool NeedsRelocation(const Assembler* assembler) const;
|
||||
|
||||
// Helpers
|
||||
@ -662,13 +686,11 @@ class Operand {
|
||||
inline static Operand UntagSmiAndScale(Register smi, int scale);
|
||||
|
||||
private:
|
||||
void initialize_handle(Handle<Object> value);
|
||||
int64_t immediate_;
|
||||
Immediate immediate_;
|
||||
Register reg_;
|
||||
Shift shift_;
|
||||
Extend extend_;
|
||||
unsigned shift_amount_;
|
||||
RelocInfo::Mode rmode_;
|
||||
};
|
||||
|
||||
|
||||
@ -1369,9 +1391,6 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Memory instructions.
|
||||
|
||||
// Load literal from pc + offset_from_pc.
|
||||
void LoadLiteral(const CPURegister& rt, int offset_from_pc);
|
||||
|
||||
// Load integer or FP register.
|
||||
void ldr(const CPURegister& rt, const MemOperand& src);
|
||||
|
||||
@ -1418,12 +1437,11 @@ class Assembler : public AssemblerBase {
|
||||
void stnp(const CPURegister& rt, const CPURegister& rt2,
|
||||
const MemOperand& dst);
|
||||
|
||||
// Load literal to register.
|
||||
void ldr(const Register& rt, uint64_t imm);
|
||||
// Load literal to register from a pc relative address.
|
||||
void ldr_pcrel(const CPURegister& rt, int imm19);
|
||||
|
||||
// Load literal to FP register.
|
||||
void ldr(const FPRegister& ft, double imm);
|
||||
void ldr(const FPRegister& ft, float imm);
|
||||
// Load literal to register.
|
||||
void ldr(const CPURegister& rt, const Immediate& imm);
|
||||
|
||||
// Move instructions. The default shift of -1 indicates that the move
|
||||
// instruction will calculate an appropriate 16-bit immediate and left shift
|
||||
@ -1841,7 +1859,6 @@ class Assembler : public AssemblerBase {
|
||||
void CheckVeneerPool(bool force_emit, bool require_jump,
|
||||
int margin = kVeneerDistanceMargin);
|
||||
|
||||
|
||||
class BlockPoolsScope {
|
||||
public:
|
||||
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
|
||||
@ -1857,10 +1874,6 @@ class Assembler : public AssemblerBase {
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
|
||||
};
|
||||
|
||||
// Available for constrained code generation scopes. Prefer
|
||||
// MacroAssembler::Mov() when possible.
|
||||
inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
|
||||
|
||||
protected:
|
||||
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
|
||||
|
||||
@ -1927,6 +1940,7 @@ class Assembler : public AssemblerBase {
|
||||
const CPURegister& rt, const CPURegister& rt2);
|
||||
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
|
||||
const CPURegister& rt, const CPURegister& rt2);
|
||||
static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
|
||||
|
||||
// Remove the specified branch from the unbound label link chain.
|
||||
// If available, a veneer for this label can be used for other branches in the
|
||||
@ -1959,11 +1973,6 @@ class Assembler : public AssemblerBase {
|
||||
const CPURegister& rt2,
|
||||
const MemOperand& addr,
|
||||
LoadStorePairNonTemporalOp op);
|
||||
// Register the relocation information for the operand and load its value
|
||||
// into rt.
|
||||
void LoadRelocatedValue(const CPURegister& rt,
|
||||
const Operand& operand,
|
||||
LoadLiteralOp op);
|
||||
void ConditionalSelect(const Register& rd,
|
||||
const Register& rn,
|
||||
const Register& rm,
|
||||
|
@ -25,8 +25,7 @@ namespace internal {
|
||||
|
||||
const unsigned kInstructionSize = 4;
|
||||
const unsigned kInstructionSizeLog2 = 2;
|
||||
const unsigned kLiteralEntrySize = 4;
|
||||
const unsigned kLiteralEntrySizeLog2 = 2;
|
||||
const unsigned kLoadLiteralScaleLog2 = 2;
|
||||
const unsigned kMaxLoadLiteralRange = 1 * MB;
|
||||
|
||||
const unsigned kNumberOfRegisters = 32;
|
||||
|
@ -46,7 +46,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
|
||||
|
||||
// The first instruction of a patched return sequence must be a load literal
|
||||
// loading the address of the debug break return code.
|
||||
patcher.LoadLiteral(ip0, 3 * kInstructionSize);
|
||||
patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
|
||||
// TODO(all): check the following is correct.
|
||||
// The debug break return code will push a frame and call statically compiled
|
||||
// code. By using blr, even though control will not return after the branch,
|
||||
@ -105,7 +105,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
|
||||
|
||||
// The first instruction of a patched debug break slot must be a load literal
|
||||
// loading the address of the debug break slot code.
|
||||
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
|
||||
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
|
||||
// TODO(all): check the following is correct.
|
||||
// The debug break slot code will push a frame and call statically compiled
|
||||
// code. By using blr, event hough control will not return after the branch,
|
||||
|
@ -48,7 +48,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
||||
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
|
||||
|
||||
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
|
||||
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
|
||||
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
|
||||
patcher.blr(ip0);
|
||||
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
|
||||
|
||||
|
@ -1384,7 +1384,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
switch (format[2]) {
|
||||
case 'L': { // ILLiteral - Immediate Load Literal.
|
||||
AppendToOutput("pc%+" PRId64,
|
||||
instr->ImmLLiteral() << kLiteralEntrySizeLog2);
|
||||
instr->ImmLLiteral() << kLoadLiteralScaleLog2);
|
||||
return 9;
|
||||
}
|
||||
case 'S': { // ILS - Immediate Load/Store.
|
||||
|
@ -459,7 +459,7 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
// TODO(all): This implementation is overkill as it supports 2**31+1
|
||||
// arguments, consider how to improve it without creating a security
|
||||
// hole.
|
||||
__ LoadLiteral(ip0, 3 * kInstructionSize);
|
||||
__ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
|
||||
__ add(current_sp, current_sp, ip0);
|
||||
__ ret();
|
||||
__ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
|
||||
|
@ -280,7 +280,7 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
|
||||
|
||||
void Instruction::SetImmLLiteral(Instruction* source) {
|
||||
ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
|
||||
ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2;
|
||||
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(offset);
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
|
||||
|
@ -353,7 +353,7 @@ class Instruction {
|
||||
void SetImmLLiteral(Instruction* source);
|
||||
|
||||
uint8_t* LiteralAddress() {
|
||||
int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
|
||||
int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
|
||||
return reinterpret_cast<uint8_t*>(this) + offset;
|
||||
}
|
||||
|
||||
|
@ -3103,7 +3103,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
||||
__ bind(&map_check);
|
||||
// Will be patched with the cached map.
|
||||
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
|
||||
__ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
|
||||
__ ldr(scratch, Immediate(Handle<Object>(cell)));
|
||||
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
||||
__ cmp(map, scratch);
|
||||
__ b(&cache_miss, ne);
|
||||
@ -3111,7 +3111,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
||||
// above, so check the size of the code generated.
|
||||
ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
|
||||
// Will be patched with the cached result.
|
||||
__ LoadRelocated(result, Operand(factory()->the_hole_value()));
|
||||
__ ldr(result, Immediate(factory()->the_hole_value()));
|
||||
}
|
||||
__ B(&done);
|
||||
|
||||
|
@ -126,8 +126,8 @@ void MacroAssembler::Ccmp(const Register& rn,
|
||||
StatusFlags nzcv,
|
||||
Condition cond) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
if (operand.IsImmediate() && (operand.immediate() < 0)) {
|
||||
ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
|
||||
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
|
||||
ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
|
||||
} else {
|
||||
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
|
||||
}
|
||||
@ -139,8 +139,8 @@ void MacroAssembler::Ccmn(const Register& rn,
|
||||
StatusFlags nzcv,
|
||||
Condition cond) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
if (operand.IsImmediate() && (operand.immediate() < 0)) {
|
||||
ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
|
||||
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
|
||||
ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
|
||||
} else {
|
||||
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
|
||||
}
|
||||
@ -151,8 +151,8 @@ void MacroAssembler::Add(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
if (operand.IsImmediate() && (operand.immediate() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
|
||||
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
|
||||
} else {
|
||||
AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
|
||||
}
|
||||
@ -162,8 +162,8 @@ void MacroAssembler::Adds(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
if (operand.IsImmediate() && (operand.immediate() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
|
||||
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
|
||||
} else {
|
||||
AddSubMacro(rd, rn, operand, SetFlags, ADD);
|
||||
}
|
||||
@ -174,8 +174,8 @@ void MacroAssembler::Sub(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
if (operand.IsImmediate() && (operand.immediate() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
|
||||
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
|
||||
} else {
|
||||
AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
|
||||
}
|
||||
@ -186,8 +186,8 @@ void MacroAssembler::Subs(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
if (operand.IsImmediate() && (operand.immediate() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
|
||||
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
|
||||
AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
|
||||
} else {
|
||||
AddSubMacro(rd, rn, operand, SetFlags, SUB);
|
||||
}
|
||||
@ -211,7 +211,7 @@ void MacroAssembler::Neg(const Register& rd,
|
||||
ASSERT(allow_macro_instructions_);
|
||||
ASSERT(!rd.IsZero());
|
||||
if (operand.IsImmediate()) {
|
||||
Mov(rd, -operand.immediate());
|
||||
Mov(rd, -operand.ImmediateValue());
|
||||
} else {
|
||||
Sub(rd, AppropriateZeroRegFor(rd), operand);
|
||||
}
|
||||
@ -717,11 +717,7 @@ void MacroAssembler::Fmov(FPRegister fd, double imm) {
|
||||
} else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
|
||||
fmov(fd, xzr);
|
||||
} else {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
// TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm).
|
||||
Mov(tmp, double_to_rawbits(imm));
|
||||
Fmov(fd, tmp);
|
||||
Ldr(fd, imm);
|
||||
}
|
||||
}
|
||||
|
||||
@ -880,16 +876,16 @@ void MacroAssembler::Ldpsw(const Register& rt,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
|
||||
void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
ldr(ft, imm);
|
||||
ldr(rt, imm);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
|
||||
void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
ASSERT(!rt.IsZero());
|
||||
ldr(rt, imm);
|
||||
ASSERT(rt.Is64Bits());
|
||||
ldr(rt, Immediate(double_to_rawbits(imm)));
|
||||
}
|
||||
|
||||
|
||||
@ -1264,7 +1260,7 @@ void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
|
||||
InstructionAccurateScope scope(this);
|
||||
ASSERT(space.IsImmediate());
|
||||
// Align to 16 bytes.
|
||||
uint64_t imm = RoundUp(space.immediate(), 0x10);
|
||||
uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
|
||||
ASSERT(is_uint24(imm));
|
||||
|
||||
Register source = StackPointer();
|
||||
@ -1633,7 +1629,7 @@ void MacroAssembler::CompareAndBranch(const Register& lhs,
|
||||
const Operand& rhs,
|
||||
Condition cond,
|
||||
Label* label) {
|
||||
if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
|
||||
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
|
||||
((cond == eq) || (cond == ne))) {
|
||||
if (cond == eq) {
|
||||
Cbz(lhs, label);
|
||||
|
@ -58,11 +58,11 @@ void MacroAssembler::LogicalMacro(const Register& rd,
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
Ldr(temp, operand.immediate());
|
||||
Logical(rd, rn, temp, op);
|
||||
|
||||
} else if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
int64_t immediate = operand.ImmediateValue();
|
||||
unsigned reg_size = rd.SizeInBits();
|
||||
ASSERT(rd.Is64Bits() || is_uint32(immediate));
|
||||
|
||||
@ -250,11 +250,11 @@ void MacroAssembler::Mov(const Register& rd,
|
||||
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
LoadRelocated(dst, operand);
|
||||
Ldr(dst, operand.immediate());
|
||||
|
||||
} else if (operand.IsImmediate()) {
|
||||
// Call the macro assembler for generic immediates.
|
||||
Mov(dst, operand.immediate());
|
||||
Mov(dst, operand.ImmediateValue());
|
||||
|
||||
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
|
||||
// Emit a shift instruction if moving a shifted register. This operation
|
||||
@ -298,12 +298,12 @@ void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
|
||||
ASSERT(allow_macro_instructions_);
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
LoadRelocated(rd, operand);
|
||||
Ldr(rd, operand.immediate());
|
||||
mvn(rd, rd);
|
||||
|
||||
} else if (operand.IsImmediate()) {
|
||||
// Call the macro assembler for generic immediates.
|
||||
Mov(rd, ~operand.immediate());
|
||||
Mov(rd, ~operand.ImmediateValue());
|
||||
|
||||
} else if (operand.IsExtendedRegister()) {
|
||||
// Emit two instructions for the extend case. This differs from Mov, as
|
||||
@ -355,11 +355,12 @@ void MacroAssembler::ConditionalCompareMacro(const Register& rn,
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
Ldr(temp, operand.immediate());
|
||||
ConditionalCompareMacro(rn, temp, nzcv, cond, op);
|
||||
|
||||
} else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
|
||||
(operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
|
||||
(operand.IsImmediate() &&
|
||||
IsImmConditionalCompare(operand.ImmediateValue()))) {
|
||||
// The immediate can be encoded in the instruction, or the operand is an
|
||||
// unshifted register: call the assembler.
|
||||
ConditionalCompare(rn, operand, nzcv, cond, op);
|
||||
@ -385,7 +386,7 @@ void MacroAssembler::Csel(const Register& rd,
|
||||
if (operand.IsImmediate()) {
|
||||
// Immediate argument. Handle special cases of 0, 1 and -1 using zero
|
||||
// register.
|
||||
int64_t imm = operand.immediate();
|
||||
int64_t imm = operand.ImmediateValue();
|
||||
Register zr = AppropriateZeroRegFor(rn);
|
||||
if (imm == 0) {
|
||||
csel(rd, rn, zr, cond);
|
||||
@ -396,7 +397,7 @@ void MacroAssembler::Csel(const Register& rd,
|
||||
} else {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireSameSizeAs(rn);
|
||||
Mov(temp, operand.immediate());
|
||||
Mov(temp, imm);
|
||||
csel(rd, rn, temp, cond);
|
||||
}
|
||||
} else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
|
||||
@ -426,10 +427,11 @@ void MacroAssembler::AddSubMacro(const Register& rd,
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
Ldr(temp, operand.immediate());
|
||||
AddSubMacro(rd, rn, temp, S, op);
|
||||
} else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
|
||||
(rn.IsZero() && !operand.IsShiftedRegister()) ||
|
||||
} else if ((operand.IsImmediate() &&
|
||||
!IsImmAddSub(operand.ImmediateValue())) ||
|
||||
(rn.IsZero() && !operand.IsShiftedRegister()) ||
|
||||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireSameSizeAs(rn);
|
||||
@ -451,7 +453,7 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
Register temp = temps.AcquireX();
|
||||
LoadRelocated(temp, operand);
|
||||
Ldr(temp, operand.immediate());
|
||||
AddSubWithCarryMacro(rd, rn, temp, S, op);
|
||||
|
||||
} else if (operand.IsImmediate() ||
|
||||
@ -1071,7 +1073,7 @@ void MacroAssembler::PushPreamble(Operand total_size) {
|
||||
// on entry and the total size of the specified registers must also be a
|
||||
// multiple of 16 bytes.
|
||||
if (total_size.IsImmediate()) {
|
||||
ASSERT((total_size.immediate() % 16) == 0);
|
||||
ASSERT((total_size.ImmediateValue() % 16) == 0);
|
||||
}
|
||||
|
||||
// Don't check access size for non-immediate sizes. It's difficult to do
|
||||
@ -1091,7 +1093,7 @@ void MacroAssembler::PopPostamble(Operand total_size) {
|
||||
// on entry and the total size of the specified registers must also be a
|
||||
// multiple of 16 bytes.
|
||||
if (total_size.IsImmediate()) {
|
||||
ASSERT((total_size.immediate() % 16) == 0);
|
||||
ASSERT((total_size.ImmediateValue() % 16) == 0);
|
||||
}
|
||||
|
||||
// Don't check access size for non-immediate sizes. It's difficult to do
|
||||
@ -1107,7 +1109,7 @@ void MacroAssembler::PopPostamble(Operand total_size) {
|
||||
|
||||
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
|
||||
if (offset.IsImmediate()) {
|
||||
ASSERT(offset.immediate() >= 0);
|
||||
ASSERT(offset.ImmediateValue() >= 0);
|
||||
} else if (emit_debug_code()) {
|
||||
Cmp(xzr, offset);
|
||||
Check(le, kStackAccessBelowStackPointer);
|
||||
@ -1119,7 +1121,7 @@ void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
|
||||
|
||||
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
|
||||
if (offset.IsImmediate()) {
|
||||
ASSERT(offset.immediate() >= 0);
|
||||
ASSERT(offset.ImmediateValue() >= 0);
|
||||
} else if (emit_debug_code()) {
|
||||
Cmp(xzr, offset);
|
||||
Check(le, kStackAccessBelowStackPointer);
|
||||
@ -2069,7 +2071,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
movk(temp, (imm >> 16) & 0xffff, 16);
|
||||
movk(temp, (imm >> 32) & 0xffff, 32);
|
||||
} else {
|
||||
LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
|
||||
Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
|
||||
}
|
||||
Blr(temp);
|
||||
#ifdef DEBUG
|
||||
@ -5173,7 +5175,7 @@ void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
|
||||
//
|
||||
// A branch (br) is used rather than a call (blr) because this code replaces
|
||||
// the frame setup code that would normally preserve lr.
|
||||
__ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
|
||||
__ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
|
||||
__ adr(x0, &start);
|
||||
__ br(ip0);
|
||||
// IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
|
||||
|
@ -394,13 +394,10 @@ class MacroAssembler : public Assembler {
|
||||
inline void Ldpsw(const Register& rt,
|
||||
const Register& rt2,
|
||||
const MemOperand& src);
|
||||
// Provide both double and float interfaces for FP immediate loads, rather
|
||||
// than relying on implicit C++ casts. This allows signalling NaNs to be
|
||||
// preserved when the immediate matches the format of fd. Most systems convert
|
||||
// signalling NaNs to quiet NaNs when converting between float and double.
|
||||
inline void Ldr(const FPRegister& ft, double imm);
|
||||
inline void Ldr(const FPRegister& ft, float imm);
|
||||
inline void Ldr(const Register& rt, uint64_t imm);
|
||||
// Load a literal from the inline constant pool.
|
||||
inline void Ldr(const CPURegister& rt, const Immediate& imm);
|
||||
// Helper function for double immediate.
|
||||
inline void Ldr(const CPURegister& rt, double imm);
|
||||
inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
|
||||
inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
|
||||
inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
|
||||
|
Loading…
Reference in New Issue
Block a user