Remove dead TurboAssembler::CallSize methods

R=mstarzinger@chromium.org

Bug: v8:7754
Change-Id: I470813e241ace22b2e39b7bb9ff26dd824b50426
Reviewed-on: https://chromium-review.googlesource.com/1142162
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54555}
This commit is contained in:
Clemens Hammacher 2018-07-19 11:51:25 +02:00 committed by Commit Bot
parent 0f2d22dd22
commit af0451d96b
18 changed files with 12 additions and 390 deletions

View File

@ -227,7 +227,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(
tasm, tasm->CallStubSize() + 2 * Assembler::kInstrSize);
tasm, TurboAssembler::kCallStubSize + 2 * Assembler::kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
@ -239,7 +239,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(
masm, masm->CallStubSize() + 2 * Assembler::kInstrSize);
masm, TurboAssembler::kCallStubSize + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);

View File

@ -219,29 +219,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond);
}
int TurboAssembler::CallSize(Register target, Condition cond) {
return kInstrSize;
}
void TurboAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
blx(target, cond);
DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
Instr mov_instr = cond | MOV | LeaveCC;
Operand mov_operand = Operand(target, rmode);
return kInstrSize +
mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize;
}
int TurboAssembler::CallStubSize() {
return CallSize(Handle<Code>(), RelocInfo::CODE_TARGET, al);
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
@ -251,20 +232,12 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
if (check_constant_pool) MaybeCheckConstPool();
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
bool old_predictable_code_size = predictable_code_size();
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(true);
}
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(target, rmode, cond);
#endif
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
@ -282,17 +255,11 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
mov(ip, Operand(target, rmode));
blx(ip, cond);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
return CallSize(code.address(), rmode, cond);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
@ -1698,13 +1665,10 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallStubSize();
Label start;
bind(&start);
#endif
// Call sequence on V7 or later may be :
@ -1721,7 +1685,7 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
mov(ip, Operand::EmbeddedCode(stub));
blx(ip, al);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
DCHECK_EQ(kCallStubSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {

View File

@ -324,16 +324,7 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
static int CallSize(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
int CallStubSize();
static constexpr int kCallStubSize = 2 * kInstrSize;
void CallStubDelayed(CodeStub* stub);
// Call a runtime routine. This expects {centry} to contain a fitting CEntry

View File

@ -1008,8 +1008,6 @@ class Assembler : public AssemblerBase {
static constexpr int kSpecialTargetSize = 0;
// The sizes of the call sequences emitted by MacroAssembler::Call.
// Wherever possible, use MacroAssembler::CallSize instead of these constants,
// as it will choose the correct value for a given relocation mode.
//
// A "near" call is encoded in a BL immediate instruction:
// bl target
@ -1034,16 +1032,6 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
// Check the size of the code generated since the given label. This function
// is used primarily to work around comparisons between signed and unsigned
// quantities, since V8 uses both.
// TODO(jbramley): Work out what sign to use for these things and if possible,
// change things to be consistent.
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
DCHECK_GE(size, 0);
DCHECK_EQ(static_cast<uint64_t>(size), SizeOfCodeGeneratedSince(label));
}
// Return the number of instructions generated from label to the
// current position.
uint64_t InstructionsGeneratedSince(const Label* label) {

View File

@ -1717,14 +1717,12 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
Label start;
Bind(&start);
#endif
Operand operand = Operand::EmbeddedCode(stub);
near_call(operand.heap_object_request());
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize);
#endif
DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallStub(CodeStub* stub) {
@ -1970,26 +1968,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Register target) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
#endif
Blr(target);
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
#endif
}
// TurboAssembler::CallSize is sensitive to changes in this function, as it
// requires to know how many instructions are used to branch to the target.
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
#endif
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
@ -1998,17 +1981,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
} else {
IndirectCall(target, rmode);
}
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
#endif
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
#endif
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
@ -2045,11 +2021,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
} else {
IndirectCall(code.address(), rmode);
}
#ifdef DEBUG
// Check the size of the code generated.
AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode));
#endif
}
void TurboAssembler::Call(ExternalReference target) {
@ -2078,8 +2049,8 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
Label start;
Bind(&start);
#endif
// The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
@ -2096,24 +2067,7 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize);
#endif
}
int TurboAssembler::CallSize(Register target) {
USE(target);
return kInstructionSize;
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
DCHECK_EQ(kNearCallSize + kInstructionSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,

View File

@ -900,13 +900,6 @@ class TurboAssembler : public TurboAssemblerBase {
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode);
// For every Call variant, there is a matching CallSize function that returns
// the size (in bytes) of the call sequence.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Calls a C function.
// The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the

View File

@ -1606,22 +1606,12 @@ void Assembler::wasm_call(Address entry, RelocInfo::Mode rmode) {
emit(entry, rmode);
}
int Assembler::CallSize(Operand adr) {
// Call size is 1 (opcode) + adr.len_ (operand).
return 1 + adr.len_;
}
void Assembler::call(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(edx, adr);
}
int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
}
void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));

View File

@ -850,10 +850,8 @@ class Assembler : public AssemblerBase {
// Calls
void call(Label* L);
void call(Address entry, RelocInfo::Mode rmode);
int CallSize(Operand adr);
void call(Register reg) { call(Operand(reg)); }
void call(Operand adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void call(CodeStub* stub);
void wasm_call(Address address, RelocInfo::Mode rmode);

View File

@ -3793,27 +3793,6 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
int TurboAssembler::CallSize(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
if (cond == cc_always) {
size += 1;
} else {
size += 3;
}
if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
if (!IsMipsArchVariant(kMips32r6) && offset != 0) {
size += 1;
}
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
@ -3823,8 +3802,6 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
#endif
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jialc(target, offset);
@ -3847,11 +3824,6 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
#ifdef DEBUG
DCHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@ -3864,8 +3836,6 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
#endif
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jialc(base, offset);
@ -3890,29 +3860,12 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
#ifdef DEBUG
DCHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, 0, cond, rs, rt, bd);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always)
return size + 1 * kInstrSize;
else
return size + 2 * kInstrSize;
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
CheckBuffer();
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
int32_t target_int = static_cast<int32_t>(target);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
uint32_t lui_offset, jialc_offset;
@ -3926,15 +3879,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, 0, cond, rs, rt, bd);
}
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
AllowDeferredHandleDereference using_raw_address;
return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@ -3960,13 +3904,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
}
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
Call(code.address(), rmode, cond, rs, rt, bd);
DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,

View File

@ -264,14 +264,9 @@ class TurboAssembler : public TurboAssemblerBase {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);

View File

@ -4218,22 +4218,6 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
int TurboAssembler::CallSize(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
int size = 0;
if (cond == cc_always) {
size += 1;
} else {
size += 3;
}
if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
@ -4242,8 +4226,6 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
#endif
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
if (cond == cc_always) {
jialc(target, 0);
@ -4263,35 +4245,13 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
#ifdef DEBUG
DCHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, cond, rs, rt, bd);
return size + 4 * kInstrSize;
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
li(t9, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
Call(t9, cond, rs, rt, bd);
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@ -4318,12 +4278,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
}
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
Call(code.address(), rmode, cond, rs, rt, bd);
DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,

View File

@ -282,13 +282,8 @@ class TurboAssembler : public TurboAssemblerBase {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);

View File

@ -212,18 +212,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
void TurboAssembler::Call(Register target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
// branch via link register and set LK bit for return point
mtctr(target);
bctrl();
DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallJSEntry(Register target) {
@ -231,12 +224,6 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
Operand mov_operand = Operand(target, rmode);
return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
@ -248,13 +235,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(cond == al);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(target, rmode, cond);
Label start;
bind(&start);
#endif
// This can likely be optimized to make use of bc() with 24bit relative
//
// RecordRelocInfo(x.rmode_, x.immediate);
@ -264,13 +244,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
mov(ip, Operand(target, rmode));
mtctr(ip);
bctrl();
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
return CallSize(code.address(), rmode, cond);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,

View File

@ -445,12 +445,6 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
@ -461,9 +455,6 @@ class TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Call(Label* target);

View File

@ -204,16 +204,9 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
jump(code, rmode, cond);
}
int TurboAssembler::CallSize(Register target) { return 2; } // BASR
void TurboAssembler::Call(Register target) {
Label start;
bind(&start);
// Branch to target via indirect branch
basr(r14, target);
DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallJSEntry(Register target) {
@ -221,18 +214,6 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
// S390 Assembler::move sequence is IILF / IIHF
int size;
#if V8_TARGET_ARCH_S390X
size = 14; // IILF + IIHF + BASR
#else
size = 8; // IILF + BASR
#endif
return size;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
@ -250,23 +231,8 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(cond == al);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(target, rmode, cond);
Label start;
bind(&start);
#endif
mov(ip, Operand(target, rmode));
basr(r14, ip);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
return 6; // BRASL
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,

View File

@ -185,10 +185,6 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
@ -203,9 +199,6 @@ class TurboAssembler : public TurboAssemblerBase {
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Ret() { b(r14); }

View File

@ -186,27 +186,6 @@ Operand TurboAssembler::ExternalOperand(ExternalReference target,
return Operand(scratch, 0);
}
int TurboAssembler::LoadAddressSize(ExternalReference source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
// Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
size += 3; // Need full four-byte displacement in lea.
}
return size;
}
}
// Size of movp(destination, src);
return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
void MacroAssembler::PushAddress(ExternalReference source) {
LoadAddress(kScratchRegister, source);
Push(kScratchRegister);
@ -1521,19 +1500,9 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
jmp(kOffHeapTrampolineRegister);
}
int TurboAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
return LoadAddressSize(ext) +
Assembler::kCallScratchRegisterInstructionLength;
}
void TurboAssembler::Call(ExternalReference ext) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(ext);
#endif
LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
DCHECK_EQ(end_position, pc_offset());
}
void TurboAssembler::Call(Operand op) {
@ -1546,12 +1515,8 @@ void TurboAssembler::Call(Operand op) {
}
void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
#endif
Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
DCHECK_EQ(pc_offset(), end_position);
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
@ -1581,12 +1546,8 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
}
}
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
DCHECK_EQ(end_position, pc_offset());
}
void TurboAssembler::RetpolineCall(Register reg) {
@ -1610,14 +1571,8 @@ void TurboAssembler::RetpolineCall(Register reg) {
}
void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
// TODO(titzer): CallSize() is wrong for RetpolineCalls
// int end_position = pc_offset() + CallSize(destination);
#endif
Move(kScratchRegister, destination, rmode);
RetpolineCall(kScratchRegister);
// TODO(titzer): CallSize() is wrong for RetpolineCalls
// DCHECK_EQ(pc_offset(), end_position);
}
void TurboAssembler::RetpolineJump(Register reg) {

View File

@ -409,26 +409,6 @@ class TurboAssembler : public TurboAssemblerBase {
call(target, rmode);
}
// The size of the code generated for different call instructions.
int CallSize(ExternalReference ext);
int CallSize(Address destination) { return kCallSequenceLength; }
int CallSize(Handle<Code> code_object) {
// Code calls use 32-bit relative addressing.
return kShortCallInstructionLength;
}
int CallSize(Register target) {
// Opcode: REX_opt FF /2 m64
return (target.high_bit() != 0) ? 3 : 2;
}
int CallSize(Operand target) {
// Opcode: REX_opt FF /2 m64
return (target.requires_rex() ? 2 : 1) + target.operand_size();
}
// Returns the size of the code generated by LoadAddress.
// Used by CallSize(ExternalReference) to find the size of a call.
int LoadAddressSize(ExternalReference source);
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);