PPC/s390: [nojit] Skip the on-heap trampoline for builtin calls
Port ccc068d5fd
Original Commit Message:
This CL does two things:
1. It introduces Call/JumpCodeObject as the bottleneck for all calls
to non-heap-constant Code objects; and
2. it dispatches directly to the off-heap entry point for all embedded
code.
Codegen at runtime remains unchanged to preserve the shorter,
branch-less calling sequence.
R=jgruber@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N
Change-Id: I282a5711fdd481a1fde3569e72f0a6141ebcdf2a
Reviewed-on: https://chromium-review.googlesource.com/c/1396501
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Reviewed-by: Joran Siu <joransiu@ca.ibm.com>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58596}
This commit is contained in:
parent
398ee1ce57
commit
fccd095552
@ -86,8 +86,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
|
|||||||
__ SmiUntag(r3);
|
__ SmiUntag(r3);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r5);
|
||||||
__ JumpToJSEntry(r5);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -492,8 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ mr(r4, r7);
|
__ mr(r4, r7);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
||||||
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r5);
|
||||||
__ JumpToJSEntry(r5);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(&prepare_step_in_if_stepping);
|
__ bind(&prepare_step_in_if_stepping);
|
||||||
@ -944,8 +942,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
|
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
|
||||||
scratch2, scratch3, feedback_vector);
|
scratch2, scratch3, feedback_vector);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ addi(r5, optimized_code_entry,
|
__ LoadCodeObjectEntry(r5, optimized_code_entry);
|
||||||
Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
||||||
__ Jump(r5);
|
__ Jump(r5);
|
||||||
|
|
||||||
// Optimized code slot contains deoptimized code, evict it and re-enter the
|
// Optimized code slot contains deoptimized code, evict it and re-enter the
|
||||||
@ -1465,8 +1462,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
|
|||||||
// which has be reset to the compile lazy builtin.
|
// which has be reset to the compile lazy builtin.
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
||||||
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r5);
|
||||||
__ JumpToJSEntry(r5);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -2449,8 +2445,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
|||||||
// r6 : new target (passed through to callee)
|
// r6 : new target (passed through to callee)
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
||||||
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ CallCodeObject(r5);
|
||||||
__ CallJSEntry(r5);
|
|
||||||
|
|
||||||
// Store offset of return address for deoptimizer.
|
// Store offset of return address for deoptimizer.
|
||||||
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
|
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
|
||||||
@ -2465,8 +2460,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
|||||||
__ bind(&dont_adapt_arguments);
|
__ bind(&dont_adapt_arguments);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
|
||||||
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r5);
|
||||||
__ JumpToJSEntry(r5);
|
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
{
|
{
|
||||||
|
@ -86,8 +86,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
|
|||||||
__ SmiUntag(r2);
|
__ SmiUntag(r2);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r4);
|
||||||
__ JumpToJSEntry(r4);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -492,8 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ LoadRR(r3, r6);
|
__ LoadRR(r3, r6);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||||
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r4);
|
||||||
__ JumpToJSEntry(r4);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(&prepare_step_in_if_stepping);
|
__ bind(&prepare_step_in_if_stepping);
|
||||||
@ -984,8 +982,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
|
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
|
||||||
scratch2, scratch3, feedback_vector);
|
scratch2, scratch3, feedback_vector);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ AddP(r4, optimized_code_entry,
|
__ LoadCodeObjectEntry(r4, optimized_code_entry);
|
||||||
Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
||||||
__ Jump(r4);
|
__ Jump(r4);
|
||||||
|
|
||||||
// Optimized code slot contains deoptimized code, evict it and re-enter the
|
// Optimized code slot contains deoptimized code, evict it and re-enter the
|
||||||
@ -1502,8 +1499,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
|
|||||||
// which has be reset to the compile lazy builtin.
|
// which has be reset to the compile lazy builtin.
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||||
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r4);
|
||||||
__ JumpToJSEntry(r4);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -2488,8 +2484,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
|||||||
// r5 : new target (passed through to callee)
|
// r5 : new target (passed through to callee)
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||||
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ CallCodeObject(r4);
|
||||||
__ CallJSEntry(r4);
|
|
||||||
|
|
||||||
// Store offset of return address for deoptimizer.
|
// Store offset of return address for deoptimizer.
|
||||||
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
|
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
|
||||||
@ -2504,8 +2499,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
|||||||
__ bind(&dont_adapt_arguments);
|
__ bind(&dont_adapt_arguments);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||||
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(r4);
|
||||||
__ JumpToJSEntry(r4);
|
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
{
|
{
|
||||||
|
@ -861,8 +861,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||||
reg == kJavaScriptCallCodeStartRegister);
|
reg == kJavaScriptCallCodeStartRegister);
|
||||||
__ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ CallCodeObject(reg);
|
||||||
__ Call(reg);
|
|
||||||
} else {
|
} else {
|
||||||
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
|
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -910,8 +909,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||||
reg == kJavaScriptCallCodeStartRegister);
|
reg == kJavaScriptCallCodeStartRegister);
|
||||||
__ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(reg);
|
||||||
__ Jump(reg);
|
|
||||||
} else {
|
} else {
|
||||||
// We cannot use the constant pool to load the target since
|
// We cannot use the constant pool to load the target since
|
||||||
// we've already restored the caller's frame.
|
// we've already restored the caller's frame.
|
||||||
@ -966,8 +964,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
|
__ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||||
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ CallCodeObject(r5);
|
||||||
__ Call(r5);
|
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
|
@ -1368,8 +1368,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||||
reg == kJavaScriptCallCodeStartRegister);
|
reg == kJavaScriptCallCodeStartRegister);
|
||||||
__ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ CallCodeObject(reg);
|
||||||
__ Call(reg);
|
|
||||||
} else {
|
} else {
|
||||||
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
|
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -1415,8 +1414,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||||
reg == kJavaScriptCallCodeStartRegister);
|
reg == kJavaScriptCallCodeStartRegister);
|
||||||
__ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ JumpCodeObject(reg);
|
||||||
__ Jump(reg);
|
|
||||||
} else {
|
} else {
|
||||||
// We cannot use the constant pool to load the target since
|
// We cannot use the constant pool to load the target since
|
||||||
// we've already restored the caller's frame.
|
// we've already restored the caller's frame.
|
||||||
@ -1467,8 +1465,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
|
__ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||||
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ CallCodeObject(r4);
|
||||||
__ Call(r4);
|
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
break;
|
break;
|
||||||
|
@ -14857,7 +14857,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
|
|||||||
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
|
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
|
||||||
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||||
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||||
defined(V8_TARGET_ARCH_IA32)
|
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
|
||||||
// On these platforms we emit relative builtin-to-builtin
|
// On these platforms we emit relative builtin-to-builtin
|
||||||
// jumps for isolate independent builtins in the snapshot. They are later
|
// jumps for isolate independent builtins in the snapshot. They are later
|
||||||
// rewritten as pc-relative jumps to the off-heap instruction stream and are
|
// rewritten as pc-relative jumps to the off-heap instruction stream and are
|
||||||
|
@ -178,35 +178,38 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
|
|||||||
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||||
Condition cond, CRegister cr) {
|
Condition cond, CRegister cr) {
|
||||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||||
// 'code' is always generated ppc code, never THUMB code
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
if (FLAG_embedded_builtins) {
|
Builtins::IsIsolateIndependentBuiltin(*code));
|
||||||
if (root_array_available_ && options().isolate_independent_code) {
|
|
||||||
Register scratch = ip;
|
int builtin_index = Builtins::kNoBuiltinId;
|
||||||
IndirectLoadConstant(scratch, code);
|
bool target_is_isolate_independent_builtin =
|
||||||
addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
|
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||||
Label skip;
|
Builtins::IsIsolateIndependent(builtin_index);
|
||||||
if (cond != al) b(NegateCondition(cond), &skip, cr);
|
|
||||||
Jump(scratch);
|
if (root_array_available_ && options().isolate_independent_code) {
|
||||||
bind(&skip);
|
Label skip;
|
||||||
return;
|
Register scratch = ip;
|
||||||
} else if (options().inline_offheap_trampolines) {
|
int offset = code->builtin_index() * kSystemPointerSize +
|
||||||
int builtin_index = Builtins::kNoBuiltinId;
|
IsolateData::builtin_entry_table_offset();
|
||||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
LoadP(scratch, MemOperand(kRootRegister, offset), r0);
|
||||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
if (cond != al) b(NegateCondition(cond), &skip, cr);
|
||||||
// Inline the trampoline.
|
Jump(scratch);
|
||||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
bind(&skip);
|
||||||
EmbeddedData d = EmbeddedData::FromBlob();
|
return;
|
||||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
} else if (options().inline_offheap_trampolines &&
|
||||||
// Use ip directly instead of using UseScratchRegisterScope, as we do
|
target_is_isolate_independent_builtin) {
|
||||||
// not preserve scratch registers across calls.
|
// Inline the trampoline.
|
||||||
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
Label skip;
|
||||||
Label skip;
|
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||||
if (cond != al) b(NegateCondition(cond), &skip, cr);
|
EmbeddedData d = EmbeddedData::FromBlob();
|
||||||
Jump(ip);
|
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||||
bind(&skip);
|
// Use ip directly instead of using UseScratchRegisterScope, as we do
|
||||||
return;
|
// not preserve scratch registers across calls.
|
||||||
}
|
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||||
}
|
if (cond != al) b(NegateCondition(cond), &skip, cr);
|
||||||
|
Jump(ip);
|
||||||
|
bind(&skip);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
|
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
|
||||||
}
|
}
|
||||||
@ -249,37 +252,39 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
|||||||
Condition cond) {
|
Condition cond) {
|
||||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||||
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
|
Builtins::IsIsolateIndependentBuiltin(*code));
|
||||||
|
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
|
||||||
|
Builtins::IsIsolateIndependentBuiltin(*code));
|
||||||
|
|
||||||
if (FLAG_embedded_builtins) {
|
int builtin_index = Builtins::kNoBuiltinId;
|
||||||
if (root_array_available_ && options().isolate_independent_code) {
|
bool target_is_isolate_independent_builtin =
|
||||||
// Use ip directly instead of using UseScratchRegisterScope, as we do not
|
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||||
// preserve scratch registers across calls.
|
Builtins::IsIsolateIndependent(builtin_index);
|
||||||
IndirectLoadConstant(ip, code);
|
|
||||||
addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
|
if (root_array_available_ && options().isolate_independent_code) {
|
||||||
Label skip;
|
Label skip;
|
||||||
if (cond != al) b(NegateCondition(cond), &skip);
|
int offset = code->builtin_index() * kSystemPointerSize +
|
||||||
Call(ip);
|
IsolateData::builtin_entry_table_offset();
|
||||||
bind(&skip);
|
LoadP(ip, MemOperand(kRootRegister, offset));
|
||||||
return;
|
if (cond != al) b(NegateCondition(cond), &skip);
|
||||||
} else if (options().inline_offheap_trampolines) {
|
Call(ip);
|
||||||
int builtin_index = Builtins::kNoBuiltinId;
|
bind(&skip);
|
||||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
return;
|
||||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
} else if (options().inline_offheap_trampolines &&
|
||||||
// Inline the trampoline.
|
target_is_isolate_independent_builtin) {
|
||||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
// Inline the trampoline.
|
||||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||||
EmbeddedData d = EmbeddedData::FromBlob();
|
EmbeddedData d = EmbeddedData::FromBlob();
|
||||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||||
// Use ip directly instead of using UseScratchRegisterScope, as we do
|
// Use ip directly instead of using UseScratchRegisterScope, as we do
|
||||||
// not preserve scratch registers across calls.
|
// not preserve scratch registers across calls.
|
||||||
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||||
Label skip;
|
Label skip;
|
||||||
if (cond != al) b(NegateCondition(cond), &skip);
|
if (cond != al) b(NegateCondition(cond), &skip);
|
||||||
Call(ip);
|
Call(ip);
|
||||||
bind(&skip);
|
bind(&skip);
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Call(code.address(), rmode, cond);
|
Call(code.address(), rmode, cond);
|
||||||
}
|
}
|
||||||
@ -1351,12 +1356,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// call sites.
|
// call sites.
|
||||||
Register code = kJavaScriptCallCodeStartRegister;
|
Register code = kJavaScriptCallCodeStartRegister;
|
||||||
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||||
addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
||||||
if (flag == CALL_FUNCTION) {
|
if (flag == CALL_FUNCTION) {
|
||||||
CallJSEntry(code);
|
CallCodeObject(code);
|
||||||
} else {
|
} else {
|
||||||
DCHECK(flag == JUMP_FUNCTION);
|
DCHECK(flag == JUMP_FUNCTION);
|
||||||
JumpToJSEntry(code);
|
JumpCodeObject(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Continue here if InvokePrologue does handle the invocation due to
|
// Continue here if InvokePrologue does handle the invocation due to
|
||||||
@ -1645,8 +1649,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
|
|||||||
mov(r3, Operand(f->nargs));
|
mov(r3, Operand(f->nargs));
|
||||||
Move(r4, ExternalReference::Create(f));
|
Move(r4, ExternalReference::Create(f));
|
||||||
DCHECK(!AreAliased(centry, r3, r4));
|
DCHECK(!AreAliased(centry, r3, r4));
|
||||||
addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
CallCodeObject(centry);
|
||||||
Call(centry);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
|
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
|
||||||
@ -3039,6 +3042,65 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
|
|||||||
Call(builtin_pointer);
|
Call(builtin_pointer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||||
|
Register code_object) {
|
||||||
|
// Code objects are called differently depending on whether we are generating
|
||||||
|
// builtin code (which will later be embedded into the binary) or compiling
|
||||||
|
// user JS code at runtime.
|
||||||
|
// * Builtin code runs in --jitless mode and thus must not call into on-heap
|
||||||
|
// Code targets. Instead, we dispatch through the builtins entry table.
|
||||||
|
// * Codegen at runtime does not have this restriction and we can use the
|
||||||
|
// shorter, branchless instruction sequence. The assumption here is that
|
||||||
|
// targets are usually generated code and not builtin Code objects.
|
||||||
|
|
||||||
|
if (options().isolate_independent_code) {
|
||||||
|
DCHECK(root_array_available());
|
||||||
|
Label if_code_is_builtin, out;
|
||||||
|
|
||||||
|
Register scratch = r11;
|
||||||
|
|
||||||
|
DCHECK(!AreAliased(destination, scratch));
|
||||||
|
DCHECK(!AreAliased(code_object, scratch));
|
||||||
|
|
||||||
|
// Check whether the Code object is a builtin. If so, call its (off-heap)
|
||||||
|
// entry point directly without going through the (on-heap) trampoline.
|
||||||
|
// Otherwise, just call the Code object as always.
|
||||||
|
|
||||||
|
LoadWordArith(scratch,
|
||||||
|
FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||||
|
cmpi(scratch, Operand(Builtins::kNoBuiltinId));
|
||||||
|
bne(&if_code_is_builtin);
|
||||||
|
|
||||||
|
// A non-builtin Code object, the entry point is at
|
||||||
|
// Code::raw_instruction_start().
|
||||||
|
addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
b(&out);
|
||||||
|
|
||||||
|
// A builtin Code object, the entry point is loaded from the builtin entry
|
||||||
|
// table.
|
||||||
|
// The builtin index is loaded in scratch.
|
||||||
|
bind(&if_code_is_builtin);
|
||||||
|
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
|
||||||
|
add(destination, destination, kRootRegister);
|
||||||
|
LoadP(destination,
|
||||||
|
MemOperand(destination, IsolateData::builtin_entry_table_offset()), r0);
|
||||||
|
|
||||||
|
bind(&out);
|
||||||
|
} else {
|
||||||
|
addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::CallCodeObject(Register code_object) {
|
||||||
|
LoadCodeObjectEntry(code_object, code_object);
|
||||||
|
Call(code_object);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::JumpCodeObject(Register code_object) {
|
||||||
|
LoadCodeObjectEntry(code_object, code_object);
|
||||||
|
Jump(code_object);
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
||||||
// This generates the final instruction sequence for calls to C functions
|
// This generates the final instruction sequence for calls to C functions
|
||||||
// once an exit frame has been constructed.
|
// once an exit frame has been constructed.
|
||||||
|
@ -412,6 +412,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
Condition cond = al);
|
Condition cond = al);
|
||||||
void Call(Label* target);
|
void Call(Label* target);
|
||||||
|
|
||||||
|
void LoadCodeObjectEntry(Register destination, Register code_object) override;
|
||||||
|
void CallCodeObject(Register code_object) override;
|
||||||
|
void JumpCodeObject(Register code_object) override;
|
||||||
|
|
||||||
void CallBuiltinPointer(Register builtin_pointer) override;
|
void CallBuiltinPointer(Register builtin_pointer) override;
|
||||||
|
|
||||||
|
|
||||||
|
@ -92,7 +92,8 @@ Address RelocInfo::target_internal_reference_address() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Address RelocInfo::target_address() {
|
Address RelocInfo::target_address() {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
|
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
|
||||||
|
IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
|
||||||
return Assembler::target_address_at(pc_, constant_pool_);
|
return Assembler::target_address_at(pc_, constant_pool_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,7 +146,8 @@ HeapObject RelocInfo::target_object() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
|
||||||
|
rmode_ == EMBEDDED_OBJECT);
|
||||||
if (rmode_ == EMBEDDED_OBJECT) {
|
if (rmode_ == EMBEDDED_OBJECT) {
|
||||||
return Handle<HeapObject>(reinterpret_cast<Address*>(
|
return Handle<HeapObject>(reinterpret_cast<Address*>(
|
||||||
Assembler::target_address_at(pc_, constant_pool_)));
|
Assembler::target_address_at(pc_, constant_pool_)));
|
||||||
|
@ -678,7 +678,7 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
|
|||||||
|
|
||||||
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
|
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
|
||||||
Condition cond) {
|
Condition cond) {
|
||||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
|
||||||
EnsureSpace ensure_space(this);
|
EnsureSpace ensure_space(this);
|
||||||
|
|
||||||
RecordRelocInfo(rmode);
|
RecordRelocInfo(rmode);
|
||||||
|
@ -29,8 +29,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
// TODO(sigurds): Change this value once we use relative jumps.
|
constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
|
||||||
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
|
|
||||||
|
|
||||||
// Number of registers
|
// Number of registers
|
||||||
const int kNumRegisters = 16;
|
const int kNumRegisters = 16;
|
||||||
|
@ -175,30 +175,31 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
|||||||
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||||
Condition cond) {
|
Condition cond) {
|
||||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||||
if (FLAG_embedded_builtins) {
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
if (root_array_available_ && options().isolate_independent_code) {
|
Builtins::IsIsolateIndependentBuiltin(*code));
|
||||||
Register scratch = r1;
|
|
||||||
IndirectLoadConstant(scratch, code);
|
int builtin_index = Builtins::kNoBuiltinId;
|
||||||
la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
|
bool target_is_isolate_independent_builtin =
|
||||||
b(cond, scratch);
|
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||||
return;
|
Builtins::IsIsolateIndependent(builtin_index);
|
||||||
} else if (options().inline_offheap_trampolines) {
|
|
||||||
int builtin_index = Builtins::kNoBuiltinId;
|
if (options().inline_offheap_trampolines &&
|
||||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
target_is_isolate_independent_builtin) {
|
||||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
Label skip;
|
||||||
// Inline the trampoline.
|
if (cond != al) {
|
||||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
b(NegateCondition(cond), &skip, Label::kNear);
|
||||||
EmbeddedData d = EmbeddedData::FromBlob();
|
|
||||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
|
||||||
// Use ip directly instead of using UseScratchRegisterScope, as we do
|
|
||||||
// not preserve scratch registers across calls.
|
|
||||||
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
|
||||||
Jump(ip, cond);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// Inline the trampoline.
|
||||||
|
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||||
|
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||||
|
EmbeddedData d = EmbeddedData::FromBlob();
|
||||||
|
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||||
|
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||||
|
b(ip);
|
||||||
|
bind(&skip);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
jump(code, rmode, cond);
|
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Call(Register target) {
|
void TurboAssembler::Call(Register target) {
|
||||||
@ -236,30 +237,23 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
|||||||
Condition cond) {
|
Condition cond) {
|
||||||
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
|
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
|
||||||
|
|
||||||
if (FLAG_embedded_builtins) {
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
if (root_array_available_ && options().isolate_independent_code) {
|
Builtins::IsIsolateIndependentBuiltin(*code));
|
||||||
// Use ip directly instead of using UseScratchRegisterScope, as we do not
|
int builtin_index = Builtins::kNoBuiltinId;
|
||||||
// preserve scratch registers across calls.
|
bool target_is_isolate_independent_builtin =
|
||||||
IndirectLoadConstant(ip, code);
|
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||||
la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
|
Builtins::IsIsolateIndependent(builtin_index);
|
||||||
Call(ip);
|
|
||||||
return;
|
if (options().inline_offheap_trampolines &&
|
||||||
} else if (options().inline_offheap_trampolines) {
|
target_is_isolate_independent_builtin) {
|
||||||
int builtin_index = Builtins::kNoBuiltinId;
|
// Inline the trampoline.
|
||||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||||
// Inline the trampoline.
|
EmbeddedData d = EmbeddedData::FromBlob();
|
||||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||||
EmbeddedData d = EmbeddedData::FromBlob();
|
Call(ip);
|
||||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
return;
|
||||||
// Use ip directly instead of using UseScratchRegisterScope, as we do
|
|
||||||
// not preserve scratch registers across calls.
|
|
||||||
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
|
||||||
Call(ip);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
call(code, rmode);
|
call(code, rmode);
|
||||||
}
|
}
|
||||||
@ -1415,12 +1409,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
|||||||
// call sites.
|
// call sites.
|
||||||
Register code = kJavaScriptCallCodeStartRegister;
|
Register code = kJavaScriptCallCodeStartRegister;
|
||||||
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||||
AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
||||||
if (flag == CALL_FUNCTION) {
|
if (flag == CALL_FUNCTION) {
|
||||||
CallJSEntry(code);
|
CallCodeObject(code);
|
||||||
} else {
|
} else {
|
||||||
DCHECK(flag == JUMP_FUNCTION);
|
DCHECK(flag == JUMP_FUNCTION);
|
||||||
JumpToJSEntry(code);
|
JumpCodeObject(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Continue here if InvokePrologue does handle the invocation due to
|
// Continue here if InvokePrologue does handle the invocation due to
|
||||||
@ -1595,8 +1588,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
|
|||||||
mov(r2, Operand(f->nargs));
|
mov(r2, Operand(f->nargs));
|
||||||
Move(r3, ExternalReference::Create(f));
|
Move(r3, ExternalReference::Create(f));
|
||||||
DCHECK(!AreAliased(centry, r2, r3));
|
DCHECK(!AreAliased(centry, r2, r3));
|
||||||
la(centry, MemOperand(centry, Code::kHeaderSize - kHeapObjectTag));
|
CallCodeObject(centry);
|
||||||
Call(centry);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
|
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
|
||||||
@ -4408,6 +4400,64 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
|
|||||||
Call(builtin_pointer);
|
Call(builtin_pointer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||||
|
Register code_object) {
|
||||||
|
// Code objects are called differently depending on whether we are generating
|
||||||
|
// builtin code (which will later be embedded into the binary) or compiling
|
||||||
|
// user JS code at runtime.
|
||||||
|
// * Builtin code runs in --jitless mode and thus must not call into on-heap
|
||||||
|
// Code targets. Instead, we dispatch through the builtins entry table.
|
||||||
|
// * Codegen at runtime does not have this restriction and we can use the
|
||||||
|
// shorter, branchless instruction sequence. The assumption here is that
|
||||||
|
// targets are usually generated code and not builtin Code objects.
|
||||||
|
|
||||||
|
if (options().isolate_independent_code) {
|
||||||
|
DCHECK(root_array_available());
|
||||||
|
Label if_code_is_builtin, out;
|
||||||
|
|
||||||
|
Register scratch = r1;
|
||||||
|
|
||||||
|
DCHECK(!AreAliased(destination, scratch));
|
||||||
|
DCHECK(!AreAliased(code_object, scratch));
|
||||||
|
|
||||||
|
// Check whether the Code object is a builtin. If so, call its (off-heap)
|
||||||
|
// entry point directly without going through the (on-heap) trampoline.
|
||||||
|
// Otherwise, just call the Code object as always.
|
||||||
|
|
||||||
|
LoadW(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||||
|
CmpP(scratch, Operand(Builtins::kNoBuiltinId));
|
||||||
|
bne(&if_code_is_builtin);
|
||||||
|
|
||||||
|
// A non-builtin Code object, the entry point is at
|
||||||
|
// Code::raw_instruction_start().
|
||||||
|
AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
b(&out);
|
||||||
|
|
||||||
|
// A builtin Code object, the entry point is loaded from the builtin entry
|
||||||
|
// table.
|
||||||
|
// The builtin index is loaded in scratch.
|
||||||
|
bind(&if_code_is_builtin);
|
||||||
|
ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
|
||||||
|
AddP(destination, destination, kRootRegister);
|
||||||
|
LoadP(destination,
|
||||||
|
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
|
||||||
|
|
||||||
|
bind(&out);
|
||||||
|
} else {
|
||||||
|
AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::CallCodeObject(Register code_object) {
|
||||||
|
LoadCodeObjectEntry(code_object, code_object);
|
||||||
|
Call(code_object);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurboAssembler::JumpCodeObject(Register code_object) {
|
||||||
|
LoadCodeObjectEntry(code_object, code_object);
|
||||||
|
Jump(code_object);
|
||||||
|
}
|
||||||
|
|
||||||
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
||||||
// This generates the final instruction sequence for calls to C functions
|
// This generates the final instruction sequence for calls to C functions
|
||||||
// once an exit frame has been constructed.
|
// once an exit frame has been constructed.
|
||||||
|
@ -177,6 +177,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
|
|
||||||
void Call(Label* target);
|
void Call(Label* target);
|
||||||
|
|
||||||
|
void LoadCodeObjectEntry(Register destination, Register code_object) override;
|
||||||
|
void CallCodeObject(Register code_object) override;
|
||||||
|
void JumpCodeObject(Register code_object) override;
|
||||||
|
|
||||||
void CallBuiltinPointer(Register builtin_pointer) override;
|
void CallBuiltinPointer(Register builtin_pointer) override;
|
||||||
|
|
||||||
// Register move. May do nothing if the registers are identical.
|
// Register move. May do nothing if the registers are identical.
|
||||||
|
@ -137,7 +137,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
|
|||||||
|
|
||||||
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
|
||||||
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
|
||||||
defined(V8_TARGET_ARCH_IA32)
|
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390)
|
||||||
// On these platforms we emit relative builtin-to-builtin
|
// On these platforms we emit relative builtin-to-builtin
|
||||||
// jumps for isolate independent builtins in the snapshot. This fixes up the
|
// jumps for isolate independent builtins in the snapshot. This fixes up the
|
||||||
// relative jumps to the right offsets in the snapshot.
|
// relative jumps to the right offsets in the snapshot.
|
||||||
|
Loading…
Reference in New Issue
Block a user