PPC/s390: [nojit] Skip the on-heap trampoline for builtin calls

Port ccc068d5fd

Original Commit Message:

    This CL does two things:

    1. It introduces Call/JumpCodeObject as the bottleneck for all calls
       to non-heap-constant Code objects; and
    2. it dispatches directly to the off-heap entry point for all embedded
       code.

    Codegen at runtime remains unchanged to preserve the shorter,
    branch-less calling sequence.

R=jgruber@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I282a5711fdd481a1fde3569e72f0a6141ebcdf2a
Reviewed-on: https://chromium-review.googlesource.com/c/1396501
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Reviewed-by: Joran Siu <joransiu@ca.ibm.com>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58596}
This commit is contained in:
Junliang Yan 2019-01-07 10:43:22 -05:00 committed by Commit Bot
parent 398ee1ce57
commit fccd095552
13 changed files with 261 additions and 158 deletions

View File

@ -86,8 +86,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(r3);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r5);
__ JumpCodeObject(r5);
}
namespace {
@ -492,8 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r5);
__ JumpCodeObject(r5);
}
__ bind(&prepare_step_in_if_stepping);
@ -944,8 +942,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ addi(r5, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(r5, optimized_code_entry);
__ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@ -1465,8 +1462,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r5);
__ JumpCodeObject(r5);
}
namespace {
@ -2449,8 +2445,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r6 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CallJSEntry(r5);
__ CallCodeObject(r5);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@ -2465,8 +2460,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r5);
__ JumpCodeObject(r5);
__ bind(&stack_overflow);
{

View File

@ -86,8 +86,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(r2);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r4);
__ JumpCodeObject(r4);
}
namespace {
@ -492,8 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadRR(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r4);
__ JumpCodeObject(r4);
}
__ bind(&prepare_step_in_if_stepping);
@ -984,8 +982,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ AddP(r4, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(r4, optimized_code_entry);
__ Jump(r4);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@ -1502,8 +1499,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r4);
__ JumpCodeObject(r4);
}
namespace {
@ -2488,8 +2484,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CallJSEntry(r4);
__ CallCodeObject(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@ -2504,8 +2499,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(r4);
__ JumpCodeObject(r4);
__ bind(&stack_overflow);
{

View File

@ -861,8 +861,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(reg);
__ CallCodeObject(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@ -910,8 +909,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(reg);
__ JumpCodeObject(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@ -966,8 +964,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r5);
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();

View File

@ -1368,8 +1368,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(reg);
__ CallCodeObject(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@ -1415,8 +1414,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(reg);
__ JumpCodeObject(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@ -1467,8 +1465,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r4);
__ CallCodeObject(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;

View File

@ -14857,7 +14857,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32)
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
// rewritten as pc-relative jumps to the off-heap instruction stream and are

View File

@ -178,35 +178,38 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ppc code, never THUMB code
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
Register scratch = ip;
IndirectLoadConstant(scratch, code);
addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(scratch);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
return;
}
}
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
Register scratch = ip;
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadP(scratch, MemOperand(kRootRegister, offset), r0);
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(scratch);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
// Inline the trampoline.
Label skip;
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
return;
}
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
@ -249,37 +252,39 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
Builtins::IsIsolateIndependentBuiltin(*code));
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
IndirectLoadConstant(ip, code);
addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
DCHECK(Builtins::IsBuiltinId(builtin_index));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
return;
}
}
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadP(ip, MemOperand(kRootRegister, offset));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
return;
}
Call(code.address(), rmode, cond);
}
@ -1351,12 +1356,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
CallJSEntry(code);
CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
JumpToJSEntry(code);
JumpCodeObject(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@ -1645,8 +1649,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
mov(r3, Operand(f->nargs));
Move(r4, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, r3, r4));
addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@ -3039,6 +3042,65 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
Call(builtin_pointer);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
Register scratch = r11;
DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));
// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
LoadWordArith(scratch,
FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
cmpi(scratch, Operand(Builtins::kNoBuiltinId));
bne(&if_code_is_builtin);
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
b(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
// table.
// The builtin index is loaded in scratch.
bind(&if_code_is_builtin);
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadP(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()), r0);
bind(&out);
} else {
addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.

View File

@ -412,6 +412,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Condition cond = al);
void Call(Label* target);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
void CallBuiltinPointer(Register builtin_pointer) override;

View File

@ -92,7 +92,8 @@ Address RelocInfo::target_internal_reference_address() {
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@ -145,7 +146,8 @@ HeapObject RelocInfo::target_object() {
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));

View File

@ -678,7 +678,7 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
EnsureSpace ensure_space(this);
RecordRelocInfo(rmode);

View File

@ -29,8 +29,7 @@
namespace v8 {
namespace internal {
// TODO(sigurds): Change this value once we use relative jumps.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// Number of registers
const int kNumRegisters = 16;

View File

@ -175,30 +175,31 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
Register scratch = r1;
IndirectLoadConstant(scratch, code);
la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
b(cond, scratch);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(ip, cond);
return;
}
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
Label skip;
if (cond != al) {
b(NegateCondition(cond), &skip, Label::kNear);
}
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
b(ip);
bind(&skip);
return;
}
jump(code, rmode, cond);
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
}
void TurboAssembler::Call(Register target) {
@ -236,30 +237,23 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
IndirectLoadConstant(ip, code);
la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
Call(ip);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
DCHECK(Builtins::IsBuiltinId(builtin_index));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip);
return;
}
}
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip);
return;
}
call(code, rmode);
}
@ -1415,12 +1409,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
CallJSEntry(code);
CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
JumpToJSEntry(code);
JumpCodeObject(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@ -1595,8 +1588,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
mov(r2, Operand(f->nargs));
Move(r3, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, r2, r3));
la(centry, MemOperand(centry, Code::kHeaderSize - kHeapObjectTag));
Call(centry);
CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@ -4408,6 +4400,64 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
Call(builtin_pointer);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
Register scratch = r1;
DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));
// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
LoadW(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
CmpP(scratch, Operand(Builtins::kNoBuiltinId));
bne(&if_code_is_builtin);
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
b(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
// table.
// The builtin index is loaded in scratch.
bind(&if_code_is_builtin);
ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
AddP(destination, destination, kRootRegister);
LoadP(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.

View File

@ -177,6 +177,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
void CallBuiltinPointer(Register builtin_pointer) override;
// Register move. May do nothing if the registers are identical.

View File

@ -137,7 +137,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32)
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.