[loong64][mips64][codet] Remove the CodeT=Code implementation
Besides, remove obsolete CodeT dispatch functions; Port commit177b6be920
Port commit651d4d9748
Bug: v8:13654 Change-Id: Ifc3ef61b6f525b7cf41dbaf6b84ee1275df76c54 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4170403 Auto-Submit: Liu Yu <liuyu@loongson.cn> Reviewed-by: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn> Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn> Cr-Commit-Position: refs/heads/main@{#85302}
This commit is contained in:
parent
2d52e86fae
commit
6c55f09ed5
@ -299,12 +299,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
|
||||
Generate_JSBuiltinsConstructStubHelper(masm);
|
||||
}
|
||||
|
||||
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
|
||||
__ DecodeField<Code::KindField>(scratch);
|
||||
__ Ld_d(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
|
||||
Operand(static_cast<int>(CodeKind::BASELINE)));
|
||||
}
|
||||
@ -321,7 +321,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE));
|
||||
AssertCodeIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
__ Branch(is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -433,7 +433,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Move(a1, a4);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
||||
__ JumpCodeObject(a2);
|
||||
__ JumpCodeDataContainerObject(a2);
|
||||
}
|
||||
|
||||
__ bind(&prepare_step_in_if_stepping);
|
||||
@ -648,7 +648,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
|
||||
Handle<Code> trampoline_code =
|
||||
Handle<CodeT> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -754,9 +754,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// s7 is cp. Do not init.
|
||||
|
||||
// Invoke the code.
|
||||
Handle<Code> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Leave internal frame.
|
||||
@ -1301,7 +1301,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ Move(a2, kInterpreterBytecodeArrayRegister);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ ReplaceClosureCodeWithOptimizedCode(a2, closure);
|
||||
__ JumpCodeObject(a2);
|
||||
__ JumpCodeDataContainerObject(a2);
|
||||
|
||||
__ bind(&install_baseline_code);
|
||||
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
|
||||
@ -1471,7 +1471,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
|
||||
__ Ld_d(t0,
|
||||
FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
||||
__ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadCodeDataContainerEntry(t0, t0);
|
||||
__ Branch(&trampoline_loaded);
|
||||
|
||||
__ bind(&builtin_trampoline);
|
||||
@ -1722,6 +1722,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
__ LeaveFrame(StackFrame::STUB);
|
||||
}
|
||||
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(a0, a0);
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
__ Ld_d(a1, MemOperand(maybe_target_code,
|
||||
@ -2010,7 +2012,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<Code> code) {
|
||||
Handle<CodeT> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a1 : target
|
||||
// -- a0 : number of parameters on the stack
|
||||
@ -2081,7 +2083,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<Code> code) {
|
||||
Handle<CodeT> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : the number of arguments
|
||||
// -- a3 : the new.target (for [[Construct]] calls)
|
||||
@ -3601,9 +3603,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeIsBaseline(masm, code_obj, t2);
|
||||
AssertCodeTIsBaseline(masm, code_obj, t2);
|
||||
}
|
||||
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = a2;
|
||||
__ Ld_d(feedback_vector,
|
||||
|
@ -299,12 +299,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
|
||||
Generate_JSBuiltinsConstructStubHelper(masm);
|
||||
}
|
||||
|
||||
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
|
||||
__ DecodeField<Code::KindField>(scratch);
|
||||
__ Ld(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
|
||||
Operand(static_cast<int>(CodeKind::BASELINE)));
|
||||
}
|
||||
@ -321,7 +321,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE));
|
||||
AssertCodeIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
__ Branch(is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -431,8 +431,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ Move(a1, a4);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
||||
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(a2);
|
||||
__ JumpCodeDataContainerObject(a2);
|
||||
}
|
||||
|
||||
__ bind(&prepare_step_in_if_stepping);
|
||||
@ -650,7 +649,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
|
||||
Handle<Code> trampoline_code =
|
||||
Handle<CodeT> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -756,9 +755,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// s7 is cp. Do not init.
|
||||
|
||||
// Invoke the code.
|
||||
Handle<Code> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Leave internal frame.
|
||||
@ -1295,7 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
||||
__ Move(a2, kInterpreterBytecodeArrayRegister);
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ ReplaceClosureCodeWithOptimizedCode(a2, closure, t0, t1);
|
||||
__ JumpCodeObject(a2);
|
||||
__ JumpCodeDataContainerObject(a2);
|
||||
|
||||
__ bind(&install_baseline_code);
|
||||
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
|
||||
@ -1462,7 +1461,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
Operand(INTERPRETER_DATA_TYPE));
|
||||
|
||||
__ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
||||
__ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadCodeDataContainerEntry(t0, t0);
|
||||
__ Branch(&trampoline_loaded);
|
||||
|
||||
__ bind(&builtin_trampoline);
|
||||
@ -1713,6 +1712,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ LeaveFrame(StackFrame::STUB);
|
||||
}
|
||||
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(a0, a0);
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
__ Ld(a1, MemOperand(maybe_target_code,
|
||||
@ -2003,7 +2005,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<Code> code) {
|
||||
Handle<CodeT> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a1 : target
|
||||
// -- a0 : number of parameters on the stack
|
||||
@ -2074,7 +2076,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<Code> code) {
|
||||
Handle<CodeT> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : the number of arguments
|
||||
// -- a3 : the new.target (for [[Construct]] calls)
|
||||
@ -3625,9 +3627,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeIsBaseline(masm, code_obj, t2);
|
||||
AssertCodeTIsBaseline(masm, code_obj, t2);
|
||||
}
|
||||
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = a2;
|
||||
__ Ld(feedback_vector,
|
||||
|
@ -137,7 +137,7 @@ Address RelocInfo::target_internal_reference_address() {
|
||||
return pc_;
|
||||
}
|
||||
|
||||
Handle<Code> Assembler::relative_code_target_object_handle_at(
|
||||
Handle<CodeT> Assembler::relative_code_target_object_handle_at(
|
||||
Address pc) const {
|
||||
Instr instr = Assembler::instr_at(pc);
|
||||
int32_t code_target_index = instr & kImm26Mask;
|
||||
|
@ -824,7 +824,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void CheckTrampolinePool();
|
||||
|
||||
// Get the code target object for a pc-relative call or jump.
|
||||
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
|
||||
V8_INLINE Handle<CodeT> relative_code_target_object_handle_at(
|
||||
Address pc_) const;
|
||||
|
||||
inline int UnboundLabelsCount() { return unbound_labels_count_; }
|
||||
|
@ -2584,7 +2584,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rj, const Operand& rk) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
@ -2659,7 +2659,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
bind(&skip);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rj, const Operand& rk) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
Builtin builtin = Builtin::kNoBuiltinId;
|
||||
@ -2669,7 +2669,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
}
|
||||
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(code->IsExecutable());
|
||||
Call(code.address(), rmode, cond, rj, rk);
|
||||
}
|
||||
|
||||
@ -3008,9 +3007,8 @@ void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
|
||||
Register scratch,
|
||||
Condition cond,
|
||||
Label* target) {
|
||||
Ld_d(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
|
||||
Ld_wu(scratch,
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Branch(target, cond, scratch, Operand(zero_reg));
|
||||
}
|
||||
@ -3150,10 +3148,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
CallCodeObject(code);
|
||||
CallCodeDataContainerObject(code);
|
||||
break;
|
||||
case InvokeType::kJump:
|
||||
JumpCodeObject(code);
|
||||
JumpCodeDataContainerObject(code);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3375,7 +3373,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
PrepareCEntryArgs(num_arguments);
|
||||
PrepareCEntryFunction(ExternalReference::Create(f));
|
||||
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -3392,7 +3390,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
|
||||
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
bool builtin_exit_frame) {
|
||||
PrepareCEntryFunction(builtin);
|
||||
Handle<Code> code =
|
||||
Handle<CodeT> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
|
||||
}
|
||||
@ -4143,63 +4141,37 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
|
||||
: Deoptimizer::kEagerDeoptExitSize);
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
Register code_object) {
|
||||
void TurboAssembler::LoadCodeDataContainerEntry(
|
||||
Register destination, Register code_data_container_object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
// Code objects are called differently depending on whether we are generating
|
||||
// builtin code (which will later be embedded into the binary) or compiling
|
||||
// user JS code at runtime.
|
||||
// * Builtin code runs in --jitless mode and thus must not call into on-heap
|
||||
// Code targets. Instead, we dispatch through the builtins entry table.
|
||||
// * Codegen at runtime does not have this restriction and we can use the
|
||||
// shorter, branchless instruction sequence. The assumption here is that
|
||||
// targets are usually generated code and not builtin Code objects.
|
||||
if (options().isolate_independent_code) {
|
||||
DCHECK(root_array_available());
|
||||
Label if_code_is_off_heap, out;
|
||||
Register scratch = t8;
|
||||
|
||||
DCHECK(!AreAliased(destination, scratch));
|
||||
DCHECK(!AreAliased(code_object, scratch));
|
||||
|
||||
// Check whether the Code object is an off-heap trampoline. If so, call its
|
||||
// (off-heap) entry point directly without going through the (on-heap)
|
||||
// trampoline. Otherwise, just call the Code object as always.
|
||||
Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
|
||||
And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
|
||||
BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
|
||||
// Not an off-heap trampoline object, the entry point is at
|
||||
// Code::raw_instruction_start().
|
||||
Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
|
||||
Branch(&out);
|
||||
|
||||
// An off-heap trampoline, the entry point is loaded from the builtin entry
|
||||
// table.
|
||||
bind(&if_code_is_off_heap);
|
||||
Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
// TODO(liuyu): don't use scratch_reg in Alsl_d;
|
||||
Alsl_d(destination, scratch, kRootRegister, kSystemPointerSizeLog2,
|
||||
zero_reg);
|
||||
Ld_d(destination,
|
||||
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
|
||||
|
||||
bind(&out);
|
||||
} else {
|
||||
Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
|
||||
}
|
||||
Ld_d(destination, FieldMemOperand(code_data_container_object,
|
||||
CodeDataContainer::kCodeEntryPointOffset));
|
||||
}
|
||||
|
||||
void TurboAssembler::CallCodeObject(Register code_object) {
|
||||
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
||||
Register destination, Register code_data_container_object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadCodeObjectEntry(code_object, code_object);
|
||||
Call(code_object);
|
||||
// Compute the Code object pointer from the code entry point.
|
||||
Ld_d(destination, FieldMemOperand(code_data_container_object,
|
||||
CodeDataContainer::kCodeEntryPointOffset));
|
||||
Sub_d(destination, destination, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
|
||||
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
|
||||
void TurboAssembler::CallCodeDataContainerObject(
|
||||
Register code_data_container_object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadCodeDataContainerEntry(code_data_container_object,
|
||||
code_data_container_object);
|
||||
Call(code_data_container_object);
|
||||
}
|
||||
|
||||
void TurboAssembler::JumpCodeDataContainerObject(
|
||||
Register code_data_container_object, JumpMode jump_mode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK_EQ(JumpMode::kJump, jump_mode);
|
||||
LoadCodeObjectEntry(code_object, code_object);
|
||||
Jump(code_object);
|
||||
LoadCodeDataContainerEntry(code_data_container_object,
|
||||
code_data_container_object);
|
||||
Jump(code_data_container_object);
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -4233,7 +4205,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
|
||||
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ LoadCodeObjectEntry(a2, optimized_code_entry);
|
||||
__ LoadCodeDataContainerEntry(a2, optimized_code_entry);
|
||||
__ Jump(a2);
|
||||
|
||||
// Optimized code slot contains deoptimized code or code is cleared and
|
||||
@ -4282,7 +4254,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
|
||||
|
||||
CallRuntime(function_id, 1);
|
||||
LoadCodeObjectEntry(a2, a0);
|
||||
LoadCodeDataContainerEntry(a2, a0);
|
||||
// Restore target function, new target and actual argument count.
|
||||
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
|
||||
kJavaScriptCallArgCountRegister);
|
||||
|
@ -188,11 +188,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// it to register use ld_d, it can be used in wasm jump table for concurrent
|
||||
// patching.
|
||||
void PatchAndJump(Address target);
|
||||
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Call(Register target, COND_ARGS);
|
||||
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
COND_ARGS);
|
||||
void Call(Label* target);
|
||||
|
||||
@ -206,11 +206,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void CallBuiltin(Builtin builtin);
|
||||
void TailCallBuiltin(Builtin builtin);
|
||||
|
||||
void LoadCodeObjectEntry(Register destination, Register code_object);
|
||||
void CallCodeObject(Register code_object);
|
||||
|
||||
void JumpCodeObject(Register code_object,
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
// Load the code entry point from the CodeDataContainer object.
|
||||
void LoadCodeDataContainerEntry(Register destination,
|
||||
Register code_data_container_object);
|
||||
// Load code entry point from the CodeDataContainer object and compute
|
||||
// Code object pointer out of it. Must not be used for CodeDataContainers
|
||||
// corresponding to builtins, because their entry points values point to
|
||||
// the embedded instruction stream in .text section.
|
||||
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
|
||||
Register code_data_container_object);
|
||||
void CallCodeDataContainerObject(Register code_data_container_object);
|
||||
void JumpCodeDataContainerObject(Register code_data_container_object,
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
|
||||
// Generates an instruction sequence s.t. the return address points to the
|
||||
// instruction following the call.
|
||||
|
@ -4326,7 +4326,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
@ -4399,7 +4399,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Call(t9, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
@ -4409,7 +4409,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
return;
|
||||
}
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(code->IsExecutable());
|
||||
Call(code.address(), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
@ -4457,8 +4456,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
IndirectLoadConstant(temp, code);
|
||||
Daddu(temp, temp, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
Call(temp);
|
||||
CallCodeDataContainerObject(temp);
|
||||
break;
|
||||
}
|
||||
case BuiltinCallJumpMode::kPCRelative:
|
||||
@ -4486,8 +4484,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
IndirectLoadConstant(temp, code);
|
||||
Daddu(temp, temp, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
Jump(temp);
|
||||
JumpCodeDataContainerObject(temp);
|
||||
break;
|
||||
}
|
||||
case BuiltinCallJumpMode::kPCRelative:
|
||||
@ -4926,9 +4923,8 @@ void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
|
||||
Register scratch,
|
||||
Condition cond,
|
||||
Label* target) {
|
||||
Ld(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
|
||||
Lwu(scratch,
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Branch(target, cond, scratch, Operand(zero_reg));
|
||||
}
|
||||
@ -5072,12 +5068,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
|
||||
switch (type) {
|
||||
case InvokeType::kCall:
|
||||
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
Call(code);
|
||||
CallCodeDataContainerObject(code);
|
||||
break;
|
||||
case InvokeType::kJump:
|
||||
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
Jump(code);
|
||||
JumpCodeDataContainerObject(code);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -5283,7 +5277,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
PrepareCEntryArgs(num_arguments);
|
||||
PrepareCEntryFunction(ExternalReference::Create(f));
|
||||
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -5301,7 +5295,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
BranchDelaySlot bd,
|
||||
bool builtin_exit_frame) {
|
||||
PrepareCEntryFunction(builtin);
|
||||
Handle<Code> code =
|
||||
Handle<CodeT> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
|
||||
}
|
||||
@ -6192,62 +6186,37 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
|
||||
: Deoptimizer::kEagerDeoptExitSize);
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
Register code_object) {
|
||||
void TurboAssembler::LoadCodeDataContainerEntry(
|
||||
Register destination, Register code_data_container_object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
// Code objects are called differently depending on whether we are generating
|
||||
// builtin code (which will later be embedded into the binary) or compiling
|
||||
// user JS code at runtime.
|
||||
// * Builtin code runs in --jitless mode and thus must not call into on-heap
|
||||
// Code targets. Instead, we dispatch through the builtins entry table.
|
||||
// * Codegen at runtime does not have this restriction and we can use the
|
||||
// shorter, branchless instruction sequence. The assumption here is that
|
||||
// targets are usually generated code and not builtin Code objects.
|
||||
if (options().isolate_independent_code) {
|
||||
DCHECK(root_array_available());
|
||||
Label if_code_is_off_heap, out;
|
||||
|
||||
Register scratch = kScratchReg;
|
||||
DCHECK(!AreAliased(destination, scratch));
|
||||
DCHECK(!AreAliased(code_object, scratch));
|
||||
|
||||
// Check whether the Code object is an off-heap trampoline. If so, call its
|
||||
// (off-heap) entry point directly without going through the (on-heap)
|
||||
// trampoline. Otherwise, just call the Code object as always.
|
||||
Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
|
||||
And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
|
||||
Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
|
||||
|
||||
// Not an off-heap trampoline object, the entry point is at
|
||||
// Code::raw_instruction_start().
|
||||
Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
|
||||
Branch(&out);
|
||||
|
||||
// An off-heap trampoline, the entry point is loaded from the builtin entry
|
||||
// table.
|
||||
bind(&if_code_is_off_heap);
|
||||
Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
|
||||
Dlsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
|
||||
Ld(destination,
|
||||
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
|
||||
|
||||
bind(&out);
|
||||
} else {
|
||||
Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
|
||||
}
|
||||
Ld(destination, FieldMemOperand(code_data_container_object,
|
||||
CodeDataContainer::kCodeEntryPointOffset));
|
||||
}
|
||||
|
||||
void TurboAssembler::CallCodeObject(Register code_object) {
|
||||
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
||||
Register destination, Register code_data_container_object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadCodeObjectEntry(code_object, code_object);
|
||||
Call(code_object);
|
||||
// Compute the Code object pointer from the code entry point.
|
||||
Ld(destination, FieldMemOperand(code_data_container_object,
|
||||
CodeDataContainer::kCodeEntryPointOffset));
|
||||
Dsubu(destination, destination, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
|
||||
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
|
||||
void TurboAssembler::CallCodeDataContainerObject(
|
||||
Register code_data_container_object) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
LoadCodeDataContainerEntry(code_data_container_object,
|
||||
code_data_container_object);
|
||||
Call(code_data_container_object);
|
||||
}
|
||||
|
||||
void TurboAssembler::JumpCodeDataContainerObject(
|
||||
Register code_data_container_object, JumpMode jump_mode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK_EQ(JumpMode::kJump, jump_mode);
|
||||
LoadCodeObjectEntry(code_object, code_object);
|
||||
Jump(code_object);
|
||||
LoadCodeDataContainerEntry(code_data_container_object,
|
||||
code_data_container_object);
|
||||
Jump(code_data_container_object);
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -6284,8 +6253,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
scratch1, scratch2);
|
||||
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ Daddu(a2, optimized_code_entry,
|
||||
Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadCodeDataContainerEntry(a2, optimized_code_entry);
|
||||
__ Jump(a2);
|
||||
|
||||
// Optimized code slot contains deoptimized code or code is cleared and
|
||||
@ -6345,7 +6313,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
|
||||
}
|
||||
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
LoadCodeDataContainerEntry(a2, v0);
|
||||
Jump(a2);
|
||||
}
|
||||
|
||||
|
@ -244,11 +244,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// it to register use ld, it can be used in wasm jump table for concurrent
|
||||
// patching.
|
||||
void PatchAndJump(Address target);
|
||||
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Call(Register target, COND_ARGS);
|
||||
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
COND_ARGS);
|
||||
void Call(Label* target);
|
||||
void LoadAddress(Register dst, Label* target);
|
||||
@ -263,10 +263,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void CallBuiltin(Builtin builtin);
|
||||
void TailCallBuiltin(Builtin builtin);
|
||||
|
||||
void LoadCodeObjectEntry(Register destination, Register code_object);
|
||||
void CallCodeObject(Register code_object);
|
||||
void JumpCodeObject(Register code_object,
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
// Load the code entry point from the CodeDataContainer object.
|
||||
void LoadCodeDataContainerEntry(Register destination,
|
||||
Register code_data_container_object);
|
||||
// Load code entry point from the CodeDataContainer object and compute
|
||||
// Code object pointer out of it. Must not be used for CodeDataContainers
|
||||
// corresponding to builtins, because their entry points values point to
|
||||
// the embedded instruction stream in .text section.
|
||||
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
|
||||
Register code_data_container_object);
|
||||
void CallCodeDataContainerObject(Register code_data_container_object);
|
||||
void JumpCodeDataContainerObject(Register code_data_container_object,
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
|
||||
// Generates an instruction sequence s.t. the return address points to the
|
||||
// instruction following the call.
|
||||
|
@ -560,7 +560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_IMPLIES(
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ CallCodeObject(reg);
|
||||
__ CallCodeDataContainerObject(reg);
|
||||
}
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
@ -608,7 +608,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_IMPLIES(
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ JumpCodeObject(reg);
|
||||
__ JumpCodeDataContainerObject(reg);
|
||||
}
|
||||
frame_access_state()->ClearSPDelta();
|
||||
frame_access_state()->SetFrameAccessToDefault();
|
||||
@ -636,7 +636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ CallCodeObject(a2);
|
||||
__ CallCodeDataContainerObject(a2);
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
|
@ -575,8 +575,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_IMPLIES(
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Call(reg);
|
||||
__ CallCodeDataContainerObject(reg);
|
||||
}
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
@ -626,8 +625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_IMPLIES(
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Jump(reg);
|
||||
__ JumpCodeDataContainerObject(reg);
|
||||
}
|
||||
frame_access_state()->ClearSPDelta();
|
||||
frame_access_state()->SetFrameAccessToDefault();
|
||||
@ -654,8 +652,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||
__ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(a2);
|
||||
__ CallCodeDataContainerObject(a2);
|
||||
RecordCallPosition(instr);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user