MIPS[64] Make BranchLong PIC

In order to enable PIC code in builtins we need to have BranchLong
position independent.

Change-Id: I374134ff540b515f3cf385a8b936487b47c55762
Reviewed-on: https://chromium-review.googlesource.com/1152810
Reviewed-by: Ivica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: Sreten Kovacevic <skovacevic@wavecomp.com>
Commit-Queue: Ivica Bogosavljevic <ibogosavljevic@wavecomp.com>
Cr-Commit-Position: refs/heads/master@{#54901}
This commit is contained in:
Predrag Rudic 2018-07-31 10:49:32 +02:00 committed by Commit Bot
parent 3656b4656e
commit 5ba6f2b00c
6 changed files with 302 additions and 199 deletions

View File

@ -554,6 +554,12 @@ bool Assembler::IsBc(Instr instr) {
return opcode == BC || opcode == BALC;
}
bool Assembler::IsBal(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
return opcode == REGIMM && rt_field == BGEZAL && rs_field == 0;
}
bool Assembler::IsBzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@ -850,29 +856,58 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsLui(instr));
DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
if (IsBranch(instr)) {
return AddBranchOffset(pos, instr);
} else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
int32_t imm;
if (IsJicOrJialc(instr2)) {
imm = CreateTargetAddress(instr1, instr2);
} else {
imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
}
if (imm == kEndOfJumpChain) {
} else if (IsMov(instr, t8, ra)) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
DCHECK(IsLui(instr));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
int32_t delta = instr_address - imm;
DCHECK(pos > delta);
return pos - delta;
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
int32_t imm;
if (IsJicOrJialc(instr2)) {
imm = CreateTargetAddress(instr1, instr2);
} else {
imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
}
if (imm == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
int32_t delta = instr_address - imm;
DCHECK(pos > delta);
return pos - delta;
}
}
}
return 0;
@ -916,8 +951,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else if (IsMov(instr, t8, ra)) {
Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
@ -938,31 +973,49 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 4 * Assembler::kInstrSize,
instr_lui | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 5 * Assembler::kInstrSize,
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
}
} else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
DCHECK_EQ(imm & 3, 0);
DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
DCHECK(IsLui(instr));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
} else {
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr1 | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr2 | (imm & kImm16Mask));
instr_lui | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
DCHECK_EQ(imm & 3, 0);
DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
} else {
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr1 | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr2 | (imm & kImm16Mask));
}
}
}
}
@ -1421,6 +1474,28 @@ uint32_t Assembler::jump_address(Label* L) {
return imm;
}
uint32_t Assembler::branch_long_offset(Label* L) {
int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
return kEndOfJumpChain;
}
}
DCHECK(is_int32(static_cast<int64_t>(target_pos) -
static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
DCHECK_EQ(offset & 3, 0);
return offset;
}
int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
@ -2228,7 +2303,7 @@ void Assembler::sc(Register rd, const MemOperand& rs) {
}
void Assembler::lui(Register rd, int32_t j) {
DCHECK(is_uint16(j));
DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j);
}
@ -3832,10 +3907,6 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{
// printf("Generate trampoline %d\n", i);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
if (IsMipsArchVariant(kMips32r6)) {
bc(&after_pool);
nop();
@ -3843,20 +3914,15 @@ void Assembler::CheckTrampolinePool() {
Label find_pc;
or_(t8, ra, zero_reg);
bal(&find_pc);
or_(t9, ra, zero_reg);
lui(t9, 0);
bind(&find_pc);
or_(ra, t8, zero_reg);
lui(t8, 0);
ori(t8, t8, 0);
addu(t9, t9, t8);
// Instruction jr will take or_ from the next trampoline.
// in its branch delay slot. This is the expected behavior
// in order to decrease size of trampoline pool.
ori(t9, t9, 0);
addu(t9, ra, t9);
jr(t9);
or_(ra, t8, zero_reg); // Branch delay slot.
}
}
}
nop();
bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_);

View File

@ -556,6 +556,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return branch_offset26(L) >> 2;
}
uint32_t jump_address(Label* L);
uint32_t branch_long_offset(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
@ -609,12 +610,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = 4;
static constexpr int kBranchPCOffset = kInstrSize;
// Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC
// branches
static constexpr int kLongBranchPCOffset = 12;
static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
@ -649,7 +650,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static constexpr int kTrampolineSlotsSize =
IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 8 * kInstrSize;
IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 7 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
@ -1758,6 +1759,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr);
static bool IsBal(Instr instr);
static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);

View File

@ -3914,41 +3914,23 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
} else {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
uint32_t lui_offset, jic_offset;
UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
lui(scratch, lui_offset);
jic(scratch, jic_offset);
}
CheckBuffer();
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(scratch, (imm32 & kHiMask) >> kLuiShift);
ori(scratch, scratch, (imm32 & kImm16Mask));
}
CheckBuffer();
jr(scratch);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
Label find_pc;
int32_t imm32;
imm32 = branch_long_offset(L);
or_(t8, ra, zero_reg);
bal(&find_pc);
lui(t9, (imm32 & kHiMask) >> kLuiShift);
bind(&find_pc);
ori(t9, t9, (imm32 & kImm16Mask));
addu(t9, ra, t9);
if (bdslot == USE_DELAY_SLOT) {
or_(ra, t8, zero_reg);
}
jr(t9);
// Emit a or_ in the branch delay slot if it's protected.
if (bdslot == PROTECT) or_(ra, t8, zero_reg);
}
}
@ -3957,41 +3939,19 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
} else {
// Generate position independent long branch and link.
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
uint32_t lui_offset, jialc_offset;
UnpackTargetAddressUnsigned(imm32, lui_offset, jialc_offset);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
lui(scratch, lui_offset);
jialc(scratch, jialc_offset);
}
CheckBuffer();
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(scratch, (imm32 & kHiMask) >> kLuiShift);
ori(scratch, scratch, (imm32 & kImm16Mask));
}
CheckBuffer();
jalr(scratch);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
}
Label find_pc;
int32_t imm32;
imm32 = branch_long_offset(L);
lui(t8, (imm32 & kHiMask) >> kLuiShift);
bal(&find_pc);
ori(t8, t8, (imm32 & kImm16Mask));
bind(&find_pc);
addu(t8, ra, t8);
jalr(t8);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
}
}

View File

@ -533,6 +533,12 @@ bool Assembler::IsBc(Instr instr) {
return opcode == BC || opcode == BALC;
}
bool Assembler::IsBal(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
return opcode == REGIMM && rt_field == BGEZAL && rs_field == 0;
}
bool Assembler::IsBzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@ -781,34 +787,63 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
IsMov(instr, t8, ra));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
return AddBranchOffset(pos, instr);
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
} else if (IsMov(instr, t8, ra)) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
// TODO(plind) create named constants for shift values.
int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
// Sign extend address;
imm >>= 16;
if (imm == kEndOfJumpChain) {
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else if (IsLui(instr)) {
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
DCHECK(instr_address - imm < INT_MAX);
int delta = static_cast<int>(instr_address - imm);
DCHECK(pos > delta);
return pos - delta;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
// TODO(plind) create named constants for shift values.
int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
// Sign extend address;
imm >>= 16;
if (imm == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
DCHECK(instr_address - imm < INT_MAX);
int delta = static_cast<int>(instr_address - imm);
DCHECK(pos > delta);
return pos - delta;
}
}
} else {
DCHECK(IsJ(instr) || IsJal(instr));
@ -859,28 +894,45 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_ori2 &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_ori2 &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
}
} else if (IsMov(instr, t8, ra)) {
Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
@ -901,9 +953,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 4 * Assembler::kInstrSize,
instr_lui | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 5 * Assembler::kInstrSize,
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
}
} else if (IsJ(instr) || IsJal(instr)) {
@ -989,7 +1041,7 @@ void Assembler::bind_to(Label* L, int pos) {
target_at_put(fixup_pos, pos, false);
} else {
DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
IsEmittedConstant(instr));
IsEmittedConstant(instr) || IsMov(instr, t8, ra));
target_at_put(fixup_pos, pos, false);
}
}
@ -1405,6 +1457,25 @@ uint64_t Assembler::jump_offset(Label* L) {
return static_cast<uint64_t>(imm);
}
uint64_t Assembler::branch_long_offset(Label* L) {
int64_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
return kEndOfJumpChain;
}
}
int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
DCHECK_EQ(offset & 3, 0);
return static_cast<uint64_t>(offset);
}
int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
@ -2404,7 +2475,7 @@ void Assembler::scd(Register rd, const MemOperand& rs) {
}
void Assembler::lui(Register rd, int32_t j) {
DCHECK(is_uint16(j));
DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j);
}
@ -4184,9 +4255,7 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{ // Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and available
// to be patched.
{
if (kArchVariant == kMips64r6) {
bc(&after_pool);
nop();
@ -4194,20 +4263,15 @@ void Assembler::CheckTrampolinePool() {
Label find_pc;
or_(t8, ra, zero_reg);
bal(&find_pc);
or_(t9, ra, zero_reg);
lui(t9, 0);
bind(&find_pc);
or_(ra, t8, zero_reg);
lui(t8, 0);
ori(t8, t8, 0);
daddu(t9, t9, t8);
// Instruction jr will take or_ from the next trampoline.
// in its branch delay slot. This is the expected behavior
// in order to decrease size of trampoline pool.
ori(t9, t9, 0);
daddu(t9, ra, t9);
jr(t9);
or_(ra, t8, zero_reg); // Branch delay slot.
}
}
}
nop();
bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_);

View File

@ -564,6 +564,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
uint64_t jump_address(Label* L);
uint64_t jump_offset(Label* L);
uint64_t branch_long_offset(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
@ -619,12 +620,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = 4;
static constexpr int kBranchPCOffset = kInstrSize;
// Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC
// branches
static constexpr int kLongBranchPCOffset = 12;
static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
@ -660,7 +661,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static constexpr int kTrampolineSlotsSize =
kArchVariant == kMips64r6 ? 2 * kInstrSize : 8 * kInstrSize;
kArchVariant == kMips64r6 ? 2 * kInstrSize : 7 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
@ -1836,6 +1837,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr);
static bool IsBal(Instr instr);
static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);

View File

@ -4291,17 +4291,24 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
} else {
EmitForbiddenSlotInstruction();
// Generate position independent jong branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
j(L);
Label find_pc;
int64_t imm64;
imm64 = branch_long_offset(L);
DCHECK(is_int32(imm64));
or_(t8, ra, zero_reg);
bal(&find_pc);
lui(t9, (imm64 & kHiMaskOf32) >> kLuiShift);
bind(&find_pc);
ori(t9, t9, (imm64 & kImm16Mask));
daddu(t9, ra, t9);
if (bdslot == USE_DELAY_SLOT) {
or_(ra, t8, zero_reg);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
jr(t9);
// Emit a or_ in the branch delay slot if it's protected.
if (bdslot == PROTECT) or_(ra, t8, zero_reg);
}
}
@ -4310,15 +4317,17 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
} else {
EmitForbiddenSlotInstruction();
BlockTrampolinePoolScope block_trampoline_pool(this);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
jal(L);
}
Label find_pc;
int64_t imm64;
imm64 = branch_long_offset(L);
DCHECK(is_int32(imm64));
lui(t8, (imm64 & kHiMaskOf32) >> kLuiShift);
bal(&find_pc);
ori(t8, t8, (imm64 & kImm16Mask));
bind(&find_pc);
daddu(t8, ra, t8);
jalr(t8);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
}