ARM64: optimize call immediate
BUG= R=ulan@chromium.org Review URL: https://codereview.chromium.org/209923002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20207 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
0e0624c0b6
commit
1da7cb61ca
@ -589,20 +589,19 @@ Address Assembler::return_address_from_call_start(Address pc) {
|
||||
// sequences:
|
||||
//
|
||||
// Without relocation:
|
||||
// movz ip0, #(target & 0x000000000000ffff)
|
||||
// movk ip0, #(target & 0x00000000ffff0000)
|
||||
// movk ip0, #(target & 0x0000ffff00000000)
|
||||
// movk ip0, #(target & 0xffff000000000000)
|
||||
// blr ip0
|
||||
// movz temp, #(target & 0x000000000000ffff)
|
||||
// movk temp, #(target & 0x00000000ffff0000)
|
||||
// movk temp, #(target & 0x0000ffff00000000)
|
||||
// blr temp
|
||||
//
|
||||
// With relocation:
|
||||
// ldr ip0, =target
|
||||
// blr ip0
|
||||
// ldr temp, =target
|
||||
// blr temp
|
||||
//
|
||||
// The return address is immediately after the blr instruction in both cases,
|
||||
// so it can be found by adding the call size to the address at the start of
|
||||
// the call sequence.
|
||||
STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
|
||||
STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
|
||||
STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
|
||||
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(pc);
|
||||
@ -610,8 +609,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
|
||||
// Verify the instruction sequence.
|
||||
ASSERT(instr->following(1)->IsMovk());
|
||||
ASSERT(instr->following(2)->IsMovk());
|
||||
ASSERT(instr->following(3)->IsMovk());
|
||||
ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
|
||||
ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
|
||||
return pc + Assembler::kCallSizeWithoutRelocation;
|
||||
} else {
|
||||
// Verify the instruction sequence.
|
||||
|
@ -818,16 +818,15 @@ class Assembler : public AssemblerBase {
|
||||
// as it will choose the correct value for a given relocation mode.
|
||||
//
|
||||
// Without relocation:
|
||||
// movz ip0, #(target & 0x000000000000ffff)
|
||||
// movk ip0, #(target & 0x00000000ffff0000)
|
||||
// movk ip0, #(target & 0x0000ffff00000000)
|
||||
// movk ip0, #(target & 0xffff000000000000)
|
||||
// blr ip0
|
||||
// movz temp, #(target & 0x000000000000ffff)
|
||||
// movk temp, #(target & 0x00000000ffff0000)
|
||||
// movk temp, #(target & 0x0000ffff00000000)
|
||||
// blr temp
|
||||
//
|
||||
// With relocation:
|
||||
// ldr ip0, =target
|
||||
// blr ip0
|
||||
static const int kCallSizeWithoutRelocation = 5 * kInstructionSize;
|
||||
// ldr temp, =target
|
||||
// blr temp
|
||||
static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
|
||||
static const int kCallSizeWithRelocation = 2 * kInstructionSize;
|
||||
|
||||
// Size of the generated code in bytes
|
||||
|
@ -2024,11 +2024,13 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
Register temp = temps.AcquireX();
|
||||
|
||||
if (rmode == RelocInfo::NONE64) {
|
||||
// Addresses are 48 bits so we never need to load the upper 16 bits.
|
||||
uint64_t imm = reinterpret_cast<uint64_t>(target);
|
||||
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
|
||||
ASSERT(((imm >> 48) & 0xffff) == 0);
|
||||
movz(temp, (imm >> 0) & 0xffff, 0);
|
||||
movk(temp, (imm >> 16) & 0xffff, 16);
|
||||
movk(temp, (imm >> 32) & 0xffff, 32);
|
||||
movk(temp, (imm >> 48) & 0xffff, 48);
|
||||
} else {
|
||||
LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user