[loong64][mips64] Rename CodeDataContainer to Code

Port commit c53c026e6e

Bug: v8:13654
Change-Id: If925923040fca38f8e8a224efdcf050112559702
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4173356
Auto-Submit: Liu Yu <liuyu@loongson.cn>
Reviewed-by: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#85529}
This commit is contained in:
Liu Yu 2023-01-18 15:48:37 +08:00 committed by V8 LUCI CQ
parent d55d51a242
commit 2949bb9e5c
12 changed files with 148 additions and 168 deletions

View File

@ -357,8 +357,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{ {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch(); Register scratch = temps.AcquireScratch();
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump( __ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
scratch_and_result, scratch, eq, on_result); on_result);
__ li(scratch, __ ClearedValue()); __ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier( StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()), feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),

View File

@ -367,8 +367,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{ {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch(); Register scratch = temps.AcquireScratch();
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump( __ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
scratch_and_result, scratch, eq, on_result); on_result);
__ li(scratch, __ ClearedValue()); __ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier( StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()), feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),

View File

@ -299,12 +299,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm); Generate_JSBuiltinsConstructStubHelper(masm);
} }
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm, static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register code, Register scratch) { Register scratch) {
DCHECK(!AreAliased(code, scratch)); DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind. // Verify that the code kind is baseline code via the CodeKind.
__ Ld_d(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset)); __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch); __ DecodeField<Code::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch, __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
Operand(static_cast<int>(CodeKind::BASELINE))); Operand(static_cast<int>(CodeKind::BASELINE)));
} }
@ -320,12 +320,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ GetObjectType(sfi_data, scratch1, scratch1); __ GetObjectType(sfi_data, scratch1, scratch1);
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
Label not_baseline; Label not_baseline;
__ Branch(&not_baseline, ne, scratch1, Operand(CODE_DATA_CONTAINER_TYPE)); __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1); AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline); __ Branch(is_baseline);
__ bind(&not_baseline); __ bind(&not_baseline);
} else { } else {
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE)); __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
} }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld_d(sfi_data, __ Ld_d(sfi_data,
@ -433,7 +433,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a1, a4); __ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(a2); __ JumpCodeObject(a2);
} }
__ bind(&prepare_step_in_if_stepping); __ bind(&prepare_step_in_if_stepping);
@ -648,7 +648,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and // Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. // pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code = Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline); masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET); __ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -754,9 +754,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// s7 is cp. Do not init. // s7 is cp. Do not init.
// Invoke the code. // Invoke the code.
Handle<CodeDataContainer> builtin = Handle<Code> builtin = is_construct
is_construct ? BUILTIN_CODE(masm->isolate(), Construct) ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call(); : masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET); __ Call(builtin, RelocInfo::CODE_TARGET);
// Leave internal frame. // Leave internal frame.
@ -1301,7 +1301,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Move(a2, kInterpreterBytecodeArrayRegister); __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(a2, closure); __ ReplaceClosureCodeWithOptimizedCode(a2, closure);
__ JumpCodeDataContainerObject(a2); __ JumpCodeObject(a2);
__ bind(&install_baseline_code); __ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1471,7 +1471,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Ld_d(t0, __ Ld_d(t0,
FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(t0, t0); __ LoadCodeEntry(t0, t0);
__ Branch(&trampoline_loaded); __ Branch(&trampoline_loaded);
__ bind(&builtin_trampoline); __ bind(&builtin_trampoline);
@ -1722,7 +1722,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB); __ LeaveFrame(StackFrame::STUB);
} }
__ LoadCodeDataContainerInstructionStreamNonBuiltin(a0, a0); __ LoadCodeInstructionStreamNonBuiltin(a0, a0);
// Load deoptimization data from the code object. // Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset] // <deopt_data> = <code>[#deoptimization_data_offset]
@ -2014,7 +2014,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// static // static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) { Handle<Code> code) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a1 : target // -- a1 : target
// -- a0 : number of parameters on the stack // -- a0 : number of parameters on the stack
@ -2083,9 +2083,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
} }
// static // static
void Builtins::Generate_CallOrConstructForwardVarargs( void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
MacroAssembler* masm, CallOrConstructMode mode, CallOrConstructMode mode,
Handle<CodeDataContainer> code) { Handle<Code> code) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments // -- a0 : the number of arguments
// -- a3 : the new.target (for [[Construct]] calls) // -- a3 : the new.target (for [[Construct]] calls)
@ -3588,7 +3588,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) { if (!is_osr) {
Label start_with_baseline; Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Branch(&start_with_baseline, eq, t2, Operand(CODE_DATA_CONTAINER_TYPE)); __ Branch(&start_with_baseline, eq, t2, Operand(CODE_TYPE));
// Start with bytecode as there is no baseline code. // Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode Builtin builtin_id = next_bytecode
@ -3601,15 +3601,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline); __ bind(&start_with_baseline);
} else if (v8_flags.debug_code) { } else if (v8_flags.debug_code) {
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Assert(eq, AbortReason::kExpectedBaselineData, t2, __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODE_TYPE));
Operand(CODE_DATA_CONTAINER_TYPE));
} }
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, t2); AssertCodeIsBaseline(masm, code_obj, t2);
} }
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj); __ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Replace BytecodeOffset with the feedback vector. // Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2; Register feedback_vector = a2;

View File

@ -299,12 +299,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm); Generate_JSBuiltinsConstructStubHelper(masm);
} }
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm, static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register code, Register scratch) { Register scratch) {
DCHECK(!AreAliased(code, scratch)); DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind. // Verify that the code kind is baseline code via the CodeKind.
__ Ld(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset)); __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch); __ DecodeField<Code::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch, __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
Operand(static_cast<int>(CodeKind::BASELINE))); Operand(static_cast<int>(CodeKind::BASELINE)));
} }
@ -320,12 +320,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ GetObjectType(sfi_data, scratch1, scratch1); __ GetObjectType(sfi_data, scratch1, scratch1);
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
Label not_baseline; Label not_baseline;
__ Branch(&not_baseline, ne, scratch1, Operand(CODE_DATA_CONTAINER_TYPE)); __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1); AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline); __ Branch(is_baseline);
__ bind(&not_baseline); __ bind(&not_baseline);
} else { } else {
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE)); __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
} }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data, __ Ld(sfi_data,
@ -431,7 +431,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a1, a4); __ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(a2); __ JumpCodeObject(a2);
} }
__ bind(&prepare_step_in_if_stepping); __ bind(&prepare_step_in_if_stepping);
@ -649,7 +649,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and // Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. // pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code = Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline); masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET); __ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -755,9 +755,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// s7 is cp. Do not init. // s7 is cp. Do not init.
// Invoke the code. // Invoke the code.
Handle<CodeDataContainer> builtin = Handle<Code> builtin = is_construct
is_construct ? BUILTIN_CODE(masm->isolate(), Construct) ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call(); : masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET); __ Call(builtin, RelocInfo::CODE_TARGET);
// Leave internal frame. // Leave internal frame.
@ -1294,7 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Move(a2, kInterpreterBytecodeArrayRegister); __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(a2, closure, t0, t1); __ ReplaceClosureCodeWithOptimizedCode(a2, closure, t0, t1);
__ JumpCodeDataContainerObject(a2); __ JumpCodeObject(a2);
__ bind(&install_baseline_code); __ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1461,7 +1461,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Operand(INTERPRETER_DATA_TYPE)); Operand(INTERPRETER_DATA_TYPE));
__ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(t0, t0); __ LoadCodeEntry(t0, t0);
__ Branch(&trampoline_loaded); __ Branch(&trampoline_loaded);
__ bind(&builtin_trampoline); __ bind(&builtin_trampoline);
@ -1713,7 +1713,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB); __ LeaveFrame(StackFrame::STUB);
} }
__ LoadCodeDataContainerInstructionStreamNonBuiltin(a0, a0); __ LoadCodeInstructionStreamNonBuiltin(a0, a0);
// Load deoptimization data from the code object. // Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset] // <deopt_data> = <code>[#deoptimization_data_offset]
@ -2006,7 +2006,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// static // static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) { Handle<Code> code) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a1 : target // -- a1 : target
// -- a0 : number of parameters on the stack // -- a0 : number of parameters on the stack
@ -2075,9 +2075,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
} }
// static // static
void Builtins::Generate_CallOrConstructForwardVarargs( void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
MacroAssembler* masm, CallOrConstructMode mode, CallOrConstructMode mode,
Handle<CodeDataContainer> code) { Handle<Code> code) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments // -- a0 : the number of arguments
// -- a3 : the new.target (for [[Construct]] calls) // -- a3 : the new.target (for [[Construct]] calls)
@ -3611,7 +3611,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) { if (!is_osr) {
Label start_with_baseline; Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Branch(&start_with_baseline, eq, t2, Operand(CODE_DATA_CONTAINER_TYPE)); __ Branch(&start_with_baseline, eq, t2, Operand(CODE_TYPE));
// Start with bytecode as there is no baseline code. // Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode Builtin builtin_id = next_bytecode
@ -3624,15 +3624,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline); __ bind(&start_with_baseline);
} else if (v8_flags.debug_code) { } else if (v8_flags.debug_code) {
__ GetObjectType(code_obj, t2, t2); __ GetObjectType(code_obj, t2, t2);
__ Assert(eq, AbortReason::kExpectedBaselineData, t2, __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODE_TYPE));
Operand(CODE_DATA_CONTAINER_TYPE));
} }
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, t2); AssertCodeIsBaseline(masm, code_obj, t2);
} }
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj); __ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Replace BytecodeOffset with the feedback vector. // Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2; Register feedback_vector = a2;

View File

@ -137,7 +137,7 @@ Address RelocInfo::target_internal_reference_address() {
return pc_; return pc_;
} }
Handle<CodeDataContainer> Assembler::relative_code_target_object_handle_at( Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const { Address pc) const {
Instr instr = Assembler::instr_at(pc); Instr instr = Assembler::instr_at(pc);
int32_t code_target_index = instr & kImm26Mask; int32_t code_target_index = instr & kImm26Mask;

View File

@ -824,7 +824,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckTrampolinePool(); void CheckTrampolinePool();
// Get the code target object for a pc-relative call or jump. // Get the code target object for a pc-relative call or jump.
V8_INLINE Handle<CodeDataContainer> relative_code_target_object_handle_at( V8_INLINE Handle<Code> relative_code_target_object_handle_at(
Address pc_) const; Address pc_) const;
inline int UnboundLabelsCount() { return unbound_labels_count_; } inline int UnboundLabelsCount() { return unbound_labels_count_; }

View File

@ -2584,7 +2584,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk); Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
} }
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rj, const Operand& rk) { Condition cond, Register rj, const Operand& rk) {
DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK(RelocInfo::IsCodeTarget(rmode));
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
@ -2659,7 +2659,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
bind(&skip); bind(&skip);
} }
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rj, const Operand& rk) { Condition cond, Register rj, const Operand& rk) {
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
Builtin builtin = Builtin::kNoBuiltinId; Builtin builtin = Builtin::kNoBuiltinId;
@ -2718,8 +2718,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
} }
case BuiltinCallJumpMode::kForMksnapshot: { case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) { if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code = Handle<Code> code = isolate()->builtins()->code_handle(builtin);
isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code); int32_t code_target_index = AddCodeTarget(code);
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET); RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
bl(code_target_index); bl(code_target_index);
@ -2755,8 +2754,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
} }
case BuiltinCallJumpMode::kForMksnapshot: { case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) { if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code = Handle<Code> code = isolate()->builtins()->code_handle(builtin);
isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code); int32_t code_target_index = AddCodeTarget(code);
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET); RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
b(code_target_index); b(code_target_index);
@ -3005,11 +3003,11 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Branch(stack_overflow, le, scratch1, Operand(scratch2)); Branch(stack_overflow, le, scratch1, Operand(scratch2));
} }
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimizationAndJump( void MacroAssembler::TestCodeIsMarkedForDeoptimizationAndJump(
Register code_data_container, Register scratch, Condition cond, Register code_data_container, Register scratch, Condition cond,
Label* target) { Label* target) {
Ld_wu(scratch, FieldMemOperand(code_data_container, Ld_wu(scratch,
CodeDataContainer::kKindSpecificFlagsOffset)); FieldMemOperand(code_data_container, Code::kKindSpecificFlagsOffset));
And(scratch, scratch, And(scratch, scratch,
Operand(1 << InstructionStream::kMarkedForDeoptimizationBit)); Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
Branch(target, cond, scratch, Operand(zero_reg)); Branch(target, cond, scratch, Operand(zero_reg));
@ -3150,10 +3148,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset)); Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) { switch (type) {
case InvokeType::kCall: case InvokeType::kCall:
CallCodeDataContainerObject(code); CallCodeObject(code);
break; break;
case InvokeType::kJump: case InvokeType::kJump:
JumpCodeDataContainerObject(code); JumpCodeObject(code);
break; break;
} }
@ -3375,8 +3373,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter. // smarter.
PrepareCEntryArgs(num_arguments); PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f)); PrepareCEntryFunction(ExternalReference::Create(f));
Handle<CodeDataContainer> code = Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET); Call(code, RelocInfo::CODE_TARGET);
} }
@ -3393,7 +3390,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) { bool builtin_exit_frame) {
PrepareCEntryFunction(builtin); PrepareCEntryFunction(builtin);
Handle<CodeDataContainer> code = Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
} }
@ -4144,37 +4141,34 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize); : Deoptimizer::kEagerDeoptExitSize);
} }
void TurboAssembler::LoadCodeDataContainerEntry( void TurboAssembler::LoadCodeEntry(Register destination,
Register destination, Register code_data_container_object) { Register code_data_container_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
Ld_d(destination, FieldMemOperand(code_data_container_object, Ld_d(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset)); Code::kCodeEntryPointOffset));
} }
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin( void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) { Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point. // Compute the InstructionStream object pointer from the code entry point.
Ld_d(destination, FieldMemOperand(code_data_container_object, Ld_d(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset)); Code::kCodeEntryPointOffset));
Sub_d(destination, destination, Sub_d(destination, destination,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
} }
void TurboAssembler::CallCodeDataContainerObject( void TurboAssembler::CallCodeObject(Register code_data_container_object) {
Register code_data_container_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object, LoadCodeEntry(code_data_container_object, code_data_container_object);
code_data_container_object);
Call(code_data_container_object); Call(code_data_container_object);
} }
void TurboAssembler::JumpCodeDataContainerObject( void TurboAssembler::JumpCodeObject(Register code_data_container_object,
Register code_data_container_object, JumpMode jump_mode) { JumpMode jump_mode) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode); DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object, LoadCodeEntry(code_data_container_object, code_data_container_object);
code_data_container_object);
Jump(code_data_container_object); Jump(code_data_container_object);
} }
@ -4199,8 +4193,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump( __ TestCodeIsMarkedForDeoptimizationAndJump(optimized_code_entry, a6, ne,
optimized_code_entry, a6, ne, &heal_optimized_code_slot); &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into // Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code. // the optimized functions list, then tail call the optimized code.
@ -4209,7 +4203,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure); __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadCodeDataContainerEntry(a2, optimized_code_entry); __ LoadCodeEntry(a2, optimized_code_entry);
__ Jump(a2); __ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and // Optimized code slot contains deoptimized code or code is cleared and
@ -4258,7 +4252,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister); kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1); CallRuntime(function_id, 1);
LoadCodeDataContainerEntry(a2, a0); LoadCodeEntry(a2, a0);
// Restore target function, new target and actual argument count. // Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister); kJavaScriptCallArgCountRegister);

View File

@ -188,12 +188,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// it to register use ld_d, it can be used in wasm jump table for concurrent // it to register use ld_d, it can be used in wasm jump table for concurrent
// patching. // patching.
void PatchAndJump(Address target); void PatchAndJump(Address target);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, COND_ARGS); void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Jump(const ExternalReference& reference); void Jump(const ExternalReference& reference);
void Call(Register target, COND_ARGS); void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<CodeDataContainer> code, void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS); COND_ARGS);
void Call(Label* target); void Call(Label* target);
// Load the builtin given by the Smi in |builtin_index| into the same // Load the builtin given by the Smi in |builtin_index| into the same
@ -206,18 +206,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin); void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin); void TailCallBuiltin(Builtin builtin);
// Load the code entry point from the CodeDataContainer object. // Load the code entry point from the Code object.
void LoadCodeDataContainerEntry(Register destination, void LoadCodeEntry(Register destination, Register code_data_container_object);
Register code_data_container_object); // Load code entry point from the Code object and compute
// Load code entry point from the CodeDataContainer object and compute
// InstructionStream object pointer out of it. Must not be used for // InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points // Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section. // values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin( void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register destination, Register code_data_container_object); Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object); void CallCodeObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object, void JumpCodeObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump); JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the // Generates an instruction sequence s.t. the return address points to the
// instruction following the call. // instruction following the call.
@ -832,9 +831,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// less efficient form using xor instead of mov is emitted. // less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg); void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void TestCodeDataContainerIsMarkedForDeoptimizationAndJump( void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container,
Register code_data_container, Register scratch, Condition cond, Register scratch,
Label* target); Condition cond, Label* target);
Operand ClearedValue() const; Operand ClearedValue() const;
void PushRoot(RootIndex index) { void PushRoot(RootIndex index) {

View File

@ -4326,7 +4326,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd); Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
} }
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt, Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) { BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK(RelocInfo::IsCodeTarget(rmode));
@ -4399,7 +4399,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Call(t9, cond, rs, rt, bd); Call(t9, cond, rs, rt, bd);
} }
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt, Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) { BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
@ -4454,10 +4454,9 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
break; break;
} }
case BuiltinCallJumpMode::kForMksnapshot: { case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code = Handle<Code> code = isolate()->builtins()->code_handle(builtin);
isolate()->builtins()->code_handle(builtin);
IndirectLoadConstant(temp, code); IndirectLoadConstant(temp, code);
CallCodeDataContainerObject(temp); CallCodeObject(temp);
break; break;
} }
case BuiltinCallJumpMode::kPCRelative: case BuiltinCallJumpMode::kPCRelative:
@ -4483,10 +4482,9 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
break; break;
} }
case BuiltinCallJumpMode::kForMksnapshot: { case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code = Handle<Code> code = isolate()->builtins()->code_handle(builtin);
isolate()->builtins()->code_handle(builtin);
IndirectLoadConstant(temp, code); IndirectLoadConstant(temp, code);
JumpCodeDataContainerObject(temp); JumpCodeObject(temp);
break; break;
} }
case BuiltinCallJumpMode::kPCRelative: case BuiltinCallJumpMode::kPCRelative:
@ -4921,11 +4919,11 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Branch(stack_overflow, le, scratch1, Operand(scratch2)); Branch(stack_overflow, le, scratch1, Operand(scratch2));
} }
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimizationAndJump( void MacroAssembler::TestCodeIsMarkedForDeoptimizationAndJump(
Register code_data_container, Register scratch, Condition cond, Register code_data_container, Register scratch, Condition cond,
Label* target) { Label* target) {
Lwu(scratch, FieldMemOperand(code_data_container, Lwu(scratch,
CodeDataContainer::kKindSpecificFlagsOffset)); FieldMemOperand(code_data_container, Code::kKindSpecificFlagsOffset));
And(scratch, scratch, And(scratch, scratch,
Operand(1 << InstructionStream::kMarkedForDeoptimizationBit)); Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
Branch(target, cond, scratch, Operand(zero_reg)); Branch(target, cond, scratch, Operand(zero_reg));
@ -5070,10 +5068,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset)); Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) { switch (type) {
case InvokeType::kCall: case InvokeType::kCall:
CallCodeDataContainerObject(code); CallCodeObject(code);
break; break;
case InvokeType::kJump: case InvokeType::kJump:
JumpCodeDataContainerObject(code); JumpCodeObject(code);
break; break;
} }
@ -5279,8 +5277,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter. // smarter.
PrepareCEntryArgs(num_arguments); PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f)); PrepareCEntryFunction(ExternalReference::Create(f));
Handle<CodeDataContainer> code = Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET); Call(code, RelocInfo::CODE_TARGET);
} }
@ -5298,7 +5295,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd, BranchDelaySlot bd,
bool builtin_exit_frame) { bool builtin_exit_frame) {
PrepareCEntryFunction(builtin); PrepareCEntryFunction(builtin);
Handle<CodeDataContainer> code = Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame); CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
} }
@ -6189,37 +6186,34 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize); : Deoptimizer::kEagerDeoptExitSize);
} }
void TurboAssembler::LoadCodeDataContainerEntry( void TurboAssembler::LoadCodeEntry(Register destination,
Register destination, Register code_data_container_object) { Register code_data_container_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
Ld(destination, FieldMemOperand(code_data_container_object, Ld(destination,
CodeDataContainer::kCodeEntryPointOffset)); FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset));
} }
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin( void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) { Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point. // Compute the InstructionStream object pointer from the code entry point.
Ld(destination, FieldMemOperand(code_data_container_object, Ld(destination,
CodeDataContainer::kCodeEntryPointOffset)); FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset));
Dsubu(destination, destination, Dsubu(destination, destination,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
} }
void TurboAssembler::CallCodeDataContainerObject( void TurboAssembler::CallCodeObject(Register code_data_container_object) {
Register code_data_container_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object, LoadCodeEntry(code_data_container_object, code_data_container_object);
code_data_container_object);
Call(code_data_container_object); Call(code_data_container_object);
} }
void TurboAssembler::JumpCodeDataContainerObject( void TurboAssembler::JumpCodeObject(Register code_data_container_object,
Register code_data_container_object, JumpMode jump_mode) { JumpMode jump_mode) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode); DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object, LoadCodeEntry(code_data_container_object, code_data_container_object);
code_data_container_object);
Jump(code_data_container_object); Jump(code_data_container_object);
} }
@ -6246,8 +6240,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump( __ TestCodeIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1,
optimized_code_entry, scratch1, ne, &heal_optimized_code_slot); ne, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into // Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code. // the optimized functions list, then tail call the optimized code.
@ -6257,7 +6251,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch1, scratch2); scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadCodeDataContainerEntry(a2, optimized_code_entry); __ LoadCodeEntry(a2, optimized_code_entry);
__ Jump(a2); __ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and // Optimized code slot contains deoptimized code or code is cleared and
@ -6317,7 +6311,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
} }
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
LoadCodeDataContainerEntry(a2, v0); LoadCodeEntry(a2, v0);
Jump(a2); Jump(a2);
} }

View File

@ -244,12 +244,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// it to register use ld, it can be used in wasm jump table for concurrent // it to register use ld, it can be used in wasm jump table for concurrent
// patching. // patching.
void PatchAndJump(Address target); void PatchAndJump(Address target);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, COND_ARGS); void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Jump(const ExternalReference& reference); void Jump(const ExternalReference& reference);
void Call(Register target, COND_ARGS); void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<CodeDataContainer> code, void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS); COND_ARGS);
void Call(Label* target); void Call(Label* target);
void LoadAddress(Register dst, Label* target); void LoadAddress(Register dst, Label* target);
@ -263,18 +263,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin); void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin); void TailCallBuiltin(Builtin builtin);
// Load the code entry point from the CodeDataContainer object. // Load the code entry point from the Code object.
void LoadCodeDataContainerEntry(Register destination, void LoadCodeEntry(Register destination, Register code_data_container_object);
Register code_data_container_object); // Load code entry point from the Code object and compute
// Load code entry point from the CodeDataContainer object and compute
// InstructionStream object pointer out of it. Must not be used for // InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points // Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section. // values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin( void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register destination, Register code_data_container_object); Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object); void CallCodeObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object, void JumpCodeObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump); JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the // Generates an instruction sequence s.t. the return address points to the
// instruction following the call. // instruction following the call.
@ -1005,9 +1004,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// less efficient form using xor instead of mov is emitted. // less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg); void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void TestCodeDataContainerIsMarkedForDeoptimizationAndJump( void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container,
Register code_data_container, Register scratch, Condition cond, Register scratch,
Label* target); Condition cond, Label* target);
Operand ClearedValue() const; Operand ClearedValue() const;
void PushRoot(RootIndex index) { void PushRoot(RootIndex index) {

View File

@ -530,17 +530,15 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to: // to:
// 1. read from memory the word that contains that bit, which can be found in // 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object; // the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and // 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
int offset = InstructionStream::kCodeDataContainerOffset - int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
InstructionStream::kHeaderSize;
__ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Ld_w(scratch, FieldMemOperand( __ Ld_w(scratch, FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(scratch, scratch, __ And(scratch, scratch,
Operand(1 << InstructionStream::kMarkedForDeoptimizationBit)); Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
@ -562,7 +560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES( DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister); reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg); __ CallCodeObject(reg);
} }
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
@ -610,7 +608,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES( DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister); reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg); __ JumpCodeObject(reg);
} }
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault(); frame_access_state()->SetFrameAccessToDefault();
@ -638,7 +636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(a2); __ CallCodeObject(a2);
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
break; break;

View File

@ -545,16 +545,14 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to: // to:
// 1. read from memory the word that contains that bit, which can be found in // 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object; // the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and // 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
int offset = InstructionStream::kCodeDataContainerOffset - int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
InstructionStream::kHeaderSize;
__ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(kScratchReg, __ Lw(kScratchReg,
FieldMemOperand(kScratchReg, FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset));
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg, __ And(kScratchReg, kScratchReg,
Operand(1 << InstructionStream::kMarkedForDeoptimizationBit)); Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
@ -576,7 +574,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES( DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister); reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg); __ CallCodeObject(reg);
} }
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
@ -626,7 +624,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES( DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister); reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg); __ JumpCodeObject(reg);
} }
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault(); frame_access_state()->SetFrameAccessToDefault();
@ -653,7 +651,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(a2); __ CallCodeObject(a2);
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
break; break;