diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h index c509e05a5b..b56e3ed2a1 100644 --- a/src/arm64/assembler-arm64-inl.h +++ b/src/arm64/assembler-arm64-inl.h @@ -589,20 +589,19 @@ Address Assembler::return_address_from_call_start(Address pc) { // sequences: // // Without relocation: - // movz ip0, #(target & 0x000000000000ffff) - // movk ip0, #(target & 0x00000000ffff0000) - // movk ip0, #(target & 0x0000ffff00000000) - // movk ip0, #(target & 0xffff000000000000) - // blr ip0 + // movz temp, #(target & 0x000000000000ffff) + // movk temp, #(target & 0x00000000ffff0000) + // movk temp, #(target & 0x0000ffff00000000) + // blr temp // // With relocation: - // ldr ip0, =target - // blr ip0 + // ldr temp, =target + // blr temp // // The return address is immediately after the blr instruction in both cases, // so it can be found by adding the call size to the address at the start of // the call sequence. - STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize); + STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize); STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize); Instruction* instr = reinterpret_cast(pc); @@ -610,8 +609,7 @@ Address Assembler::return_address_from_call_start(Address pc) { // Verify the instruction sequence. ASSERT(instr->following(1)->IsMovk()); ASSERT(instr->following(2)->IsMovk()); - ASSERT(instr->following(3)->IsMovk()); - ASSERT(instr->following(4)->IsBranchAndLinkToRegister()); + ASSERT(instr->following(3)->IsBranchAndLinkToRegister()); return pc + Assembler::kCallSizeWithoutRelocation; } else { // Verify the instruction sequence. diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h index 79f957b91c..e3e0277294 100644 --- a/src/arm64/assembler-arm64.h +++ b/src/arm64/assembler-arm64.h @@ -818,16 +818,15 @@ class Assembler : public AssemblerBase { // as it will choose the correct value for a given relocation mode. // // Without relocation: - // movz ip0, #(target & 0x000000000000ffff) - // movk ip0, #(target & 0x00000000ffff0000) - // movk ip0, #(target & 0x0000ffff00000000) - // movk ip0, #(target & 0xffff000000000000) - // blr ip0 + // movz temp, #(target & 0x000000000000ffff) + // movk temp, #(target & 0x00000000ffff0000) + // movk temp, #(target & 0x0000ffff00000000) + // blr temp // // With relocation: - // ldr ip0, =target - // blr ip0 - static const int kCallSizeWithoutRelocation = 5 * kInstructionSize; + // ldr temp, =target + // blr temp + static const int kCallSizeWithoutRelocation = 4 * kInstructionSize; static const int kCallSizeWithRelocation = 2 * kInstructionSize; // Size of the generated code in bytes diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index d7d0ab7502..a4108410bd 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -2024,11 +2024,13 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { Register temp = temps.AcquireX(); if (rmode == RelocInfo::NONE64) { + // Addresses are 48 bits so we never need to load the upper 16 bits. uint64_t imm = reinterpret_cast(target); + // If we don't use ARM tagged addresses, the 16 higher bits must be 0. + ASSERT(((imm >> 48) & 0xffff) == 0); movz(temp, (imm >> 0) & 0xffff, 0); movk(temp, (imm >> 16) & 0xffff, 16); movk(temp, (imm >> 32) & 0xffff, 32); - movk(temp, (imm >> 48) & 0xffff, 48); } else { LoadRelocated(temp, Operand(reinterpret_cast(target), rmode)); }