MIPS: Use JIC/JIALC offset when possible

Until now JIC and JIALC compact branches were emited without using their
offset. Here we optimize their use by using offset after addition and/or
load immediate operations.

The CL also fixes a problem with deserialization that occurs when a code
object ends with an optimized LUI/AUI and JIC/JIALC instruction pair.
Deserializer processed these instruction pairs by moving to a location
immediately after it, but when this location is the end of the object it
would finish with the current object before doing relocation. This is
fixed by moving the deserializer one instruction before the location of
the instruction pair end.

BUG=

Review-Url: https://codereview.chromium.org/2542403002
Cr-Commit-Position: refs/heads/master@{#44841}
This commit is contained in:
Miran.Karic 2017-04-25 04:32:13 -07:00 committed by Commit bot
parent 9372dd95d9
commit fe916cd224
10 changed files with 262 additions and 97 deletions

View File

@ -396,8 +396,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@ -421,8 +420,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
__ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
@ -1191,8 +1189,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
__ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
@ -1572,8 +1569,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ PushStandardFrame(a1);
// Jump to point after the code-age stub.
__ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
__ Jump(a0);
__ Jump(a0, kNoCodeAgeSequenceLength);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
@ -2492,8 +2488,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
__ lw(at, MemOperand(at));
__ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, Code::kHeaderSize - kHeapObjectTag);
}
// static
@ -2717,8 +2712,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
__ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
}
// static
@ -2808,8 +2802,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
__ lw(at, MemOperand(at));
__ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, Code::kHeaderSize - kHeapObjectTag);
}
// static
@ -3081,8 +3074,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPop(gp_regs);
}
// Now jump to the instructions of the returned code object.
__ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
#undef __

View File

@ -622,8 +622,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
__ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -640,8 +639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
__ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();

View File

@ -375,9 +375,7 @@ bool LCodeGen::GenerateJumpTable() {
}
// Add the base address to the offset previously loaded in entry_offset.
__ Addu(entry_offset, entry_offset,
Operand(ExternalReference::ForDeoptEntry(base)));
__ Jump(entry_offset);
__ Jump(entry_offset, Operand(ExternalReference::ForDeoptEntry(base)));
}
__ RecordComment("]");
@ -3538,8 +3536,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
__ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(target);
__ Jump(target, Code::kHeaderSize - kHeapObjectTag);
}
} else {
LPointerMap* pointers = instr->pointer_map();
@ -3554,8 +3551,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
__ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(target);
__ Call(target, Code::kHeaderSize - kHeapObjectTag);
}
generator.AfterCall();
}

View File

@ -53,9 +53,17 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
// to a call to the debug break slot code.
// li t9, address (lui t9 / ori t9 instruction pair)
// call t9 (jalr t9 / nop instruction pair)
// Add a label for checking the size of the code used for returning.
Label check_codesize;
patcher.masm()->bind(&check_codesize);
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int32_t>(code->entry())));
patcher.masm()->Call(v8::internal::t9);
// Check that the size of the code generated is as expected.
DCHECK_EQ(Assembler::kDebugBreakSlotLength,
patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {

View File

@ -121,8 +121,17 @@ Address RelocInfo::target_address_address() {
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
return reinterpret_cast<Address>(
pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
if (IsMipsArchVariant(kMips32r6)) {
// On R6 we don't move to the end of the instructions to be patched, but one
// instruction before, because if these instructions are at the end of the
// code object it can cause errors in the deserializer.
return reinterpret_cast<Address>(
pc_ +
(Assembler::kInstructionsFor32BitConstant - 1) * Assembler::kInstrSize);
} else {
return reinterpret_cast<Address>(
pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
}
}

View File

@ -2077,6 +2077,7 @@ void Assembler::lui(Register rd, int32_t j) {
void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
@ -3746,11 +3747,17 @@ void Assembler::CheckTrampolinePool() {
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
// Interpret 2 instructions generated by li: lui/ori
if (IsLui(instr1) && IsOri(instr2)) {
// Assemble the 32 bit value.
return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
GetImmediate16(instr2));
// Interpret 2 instructions generated by li (lui/ori) or optimized pairs
// lui/jic, aui/jic or lui/jialc.
if (IsLui(instr1)) {
if (IsOri(instr2)) {
// Assemble the 32 bit value.
return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
GetImmediate16(instr2));
} else if (IsJicOrJialc(instr2)) {
// Assemble the 32 bit value.
return reinterpret_cast<Address>(CreateTargetAddress(instr1, instr2));
}
}
// We should never get here, force a bad address if we do.

View File

@ -439,6 +439,8 @@ class Operand BASE_EMBEDDED {
Register rm() const { return rm_; }
RelocInfo::Mode rmode() const { return rmode_; }
private:
Register rm_;
int32_t imm32_; // Valid if rm_ == no_reg.
@ -594,10 +596,19 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
Address target) {
set_target_address_at(
isolate,
instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
target);
if (IsMipsArchVariant(kMips32r6)) {
// On R6 the address location is shifted by one instruction
set_target_address_at(
isolate,
instruction_payload -
(kInstructionsFor32BitConstant - 1) * kInstrSize,
code, target);
} else {
set_target_address_at(
isolate,
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
code, target);
}
}
// This sets the internal reference at the pc.
@ -628,7 +639,7 @@ class Assembler : public AssemblerBase {
// Distance between the instruction referring to the address of the call
// target and the return address.
#ifdef _MIPS_ARCH_MIPS32R6
static constexpr int kCallTargetAddressOffset = 3 * kInstrSize;
static constexpr int kCallTargetAddressOffset = 2 * kInstrSize;
#else
static constexpr int kCallTargetAddressOffset = 4 * kInstrSize;
#endif

View File

@ -1102,9 +1102,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(a1));
__ li(a2, Operand(pending_handler_offset_address));
__ lw(a2, MemOperand(a2));
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Addu(t9, a1, a2);
__ Jump(t9);
__ Jump(t9, Code::kHeaderSize - kHeapObjectTag);
}
@ -1237,8 +1236,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ lw(t9, MemOperand(t0)); // Deref address.
// Call JSEntryTrampoline.
__ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
__ Call(t9);
__ Call(t9, Code::kHeaderSize - kHeapObjectTag);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@ -1421,8 +1419,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
__ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
__ bind(&non_function);
__ mov(a3, a1);

View File

@ -3600,22 +3600,87 @@ bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
void MacroAssembler::Jump(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
void MacroAssembler::Jump(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_int16(offset));
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jic(target, 0);
jic(target, offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jic(target, 0);
jic(target, offset);
}
} else {
if (offset != 0) {
Addu(target, target, offset);
}
if (cond == cc_always) {
jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
}
void MacroAssembler::Jump(Register target, Register base, int16_t offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(is_int16(offset));
BlockTrampolinePoolScope block_trampoline_pool(this);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jic(base, offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jic(base, offset);
}
} else {
if (offset != 0) {
Addu(target, base, offset);
} else { // Call through target
if (!target.is(base)) mov(target, base);
}
if (cond == cc_always) {
jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
}
void MacroAssembler::Jump(Register target, const Operand& offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
!is_int16(offset.immediate())) {
uint32_t aui_offset, jic_offset;
Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
jic_offset);
RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
aui(target, target, aui_offset);
if (cond == cc_always) {
jic(target, jic_offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jic(target, jic_offset);
}
} else {
if (offset.immediate() != 0) {
Addu(target, target, offset);
}
if (cond == cc_always) {
jr(target);
} else {
@ -3635,14 +3700,24 @@ void MacroAssembler::Jump(intptr_t target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label skip;
if (cond != cc_always) {
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
li(t9, Operand(target, rmode));
Jump(t9, al, zero_reg, Operand(zero_reg), bd);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
uint32_t lui_offset, jic_offset;
UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
DCHECK(MustUseReg(rmode));
RecordRelocInfo(rmode, target);
lui(t9, lui_offset);
Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd);
} else {
li(t9, Operand(target, rmode));
Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd);
}
bind(&skip);
}
@ -3669,11 +3744,8 @@ void MacroAssembler::Jump(Handle<Code> code,
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
int MacroAssembler::CallSize(Register target,
Condition cond,
Register rs,
const Operand& rt,
int MacroAssembler::CallSize(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
@ -3685,16 +3757,18 @@ int MacroAssembler::CallSize(Register target,
if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
if (!IsMipsArchVariant(kMips32r6) && offset != 0) {
size += 1;
}
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
void MacroAssembler::Call(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(is_int16(offset));
#ifdef DEBUG
int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
#endif
@ -3704,13 +3778,16 @@ void MacroAssembler::Call(Register target,
bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jialc(target, 0);
jialc(target, offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jialc(target, 0);
jialc(target, offset);
}
} else {
if (offset != 0) {
Addu(target, target, offset);
}
if (cond == cc_always) {
jalr(target);
} else {
@ -3723,7 +3800,50 @@ void MacroAssembler::Call(Register target,
}
#ifdef DEBUG
CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(Register target, Register base, int16_t offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(is_uint16(offset));
#ifdef DEBUG
int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
#endif
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jialc(base, offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jialc(base, offset);
}
} else {
if (offset != 0) {
Addu(target, base, offset);
} else { // Call through target
if (!target.is(base)) mov(target, base);
}
if (cond == cc_always) {
jalr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jalr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
#ifdef DEBUG
CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
@ -3735,8 +3855,11 @@ int MacroAssembler::CallSize(Address target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, cond, rs, rt, bd);
return size + 2 * kInstrSize;
int size = CallSize(t9, 0, cond, rs, rt, bd);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always)
return size + 1 * kInstrSize;
else
return size + 2 * kInstrSize;
}
@ -3746,12 +3869,23 @@ void MacroAssembler::Call(Address target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
CheckBuffer();
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
int32_t target_int = reinterpret_cast<int32_t>(target);
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
uint32_t lui_offset, jialc_offset;
UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target_int);
}
lui(t9, lui_offset);
Call(t9, jialc_offset, cond, rs, rt, bd);
} else {
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, 0, cond, rs, rt, bd);
}
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@ -3796,7 +3930,7 @@ void MacroAssembler::Ret(Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
Jump(ra, 0, cond, rs, rt, bd);
}
@ -3825,9 +3959,8 @@ void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references
// until associated instructions are emitted and available to be
// patched.
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
@ -3850,8 +3983,8 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
uint32_t imm32;
imm32 = jump_address(L);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
uint32_t lui_offset, jic_offset;
UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
uint32_t lui_offset, jialc_offset;
UnpackTargetAddressUnsigned(imm32, lui_offset, jialc_offset);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
@ -3859,16 +3992,15 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, lui_offset);
jialc(at, jic_offset);
jialc(at, jialc_offset);
}
CheckBuffer();
} else {
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references
// until associated instructions are emitted and available to be
// patched.
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
@ -6047,15 +6179,27 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
li(t8, Operand(function));
CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
if (IsMipsArchVariant(kMips32r6)) {
uint32_t lui_offset, jialc_offset;
UnpackTargetAddressUnsigned(Operand(function).immediate(), lui_offset,
jialc_offset);
if (MustUseReg(Operand(function).rmode())) {
RecordRelocInfo(Operand(function).rmode(), Operand(function).immediate());
}
lui(t9, lui_offset);
CallCFunctionHelper(t9, jialc_offset, num_reg_arguments,
num_double_arguments);
} else {
li(t9, Operand(function));
CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
}
}
void MacroAssembler::CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments);
}
@ -6070,8 +6214,8 @@ void MacroAssembler::CallCFunction(Register function,
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
void MacroAssembler::CallCFunctionHelper(Register function_base,
int16_t function_offset,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
@ -6103,12 +6247,12 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
if (!function.is(t9)) {
mov(t9, function);
function = t9;
if (!function_base.is(t9)) {
mov(t9, function_base);
function_base = t9;
}
Call(function);
Call(function_base, function_offset);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@ -6443,6 +6587,7 @@ CodePatcher::~CodePatcher() {
}
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}

View File

@ -180,12 +180,15 @@ class MacroAssembler: public Assembler {
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void Jump(Register target, COND_ARGS);
void Jump(Register target, int16_t offset = 0, COND_ARGS);
void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
void Jump(Register target, const Operand& offset, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
static int CallSize(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
@ -1664,9 +1667,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function_base, int16_t function_offset,
int num_reg_arguments, int num_double_arguments);
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);