[riscv][codet] Remove the CodeT=Code implementation

Port commit 177b6be920
Port commit 651d4d9748
Port commit dd38db94df
Port commit 3e43010abb

Change-Id: I88cf4fb58e53f8ab2f3c0a26a0886b8c172ca1b0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4166713
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Reviewed-by: ji qiu <qiuji@iscas.ac.cn>
Commit-Queue: ji qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#85327}
This commit is contained in:
Lu Yahan 2023-01-17 12:04:09 +08:00 committed by V8 LUCI CQ
parent 44b8ca4eab
commit b692796f75
6 changed files with 119 additions and 108 deletions

View File

@ -322,12 +322,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ LoadWord(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ LoadWord(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
Operand(static_cast<int64_t>(CodeKind::BASELINE)));
}
@ -341,7 +341,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
if (v8_flags.debug_code) {
Label not_baseline;
__ Branch(&not_baseline, ne, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline);
__ bind(&not_baseline);
} else {
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
}
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
Label::Distance::kNear);
@ -461,7 +469,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ JumpCodeObject(a2);
__ JumpCodeDataContainerObject(a2);
}
__ bind(&prepare_step_in_if_stepping);
@ -683,7 +691,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<Code> trampoline_code =
Handle<CodeDataContainer> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -796,9 +804,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// s7 is cp. Do not init.
// Invoke the code.
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Leave internal frame.
@ -1120,10 +1128,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = a1;
Register feedback_vector = a2;
UseScratchRegisterScope temps(masm);
temps.Include(t0, t1);
Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedPointerField(
@ -1293,7 +1297,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&do_return);
// The return value is in a0.
LeaveInterpreterFrame(masm, scratch, scratch2);
LeaveInterpreterFrame(masm, t0, t1);
__ Jump(ra);
__ bind(&stack_check_interrupt);
@ -1337,10 +1341,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadTaggedPointerField(
scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
// Check for an tiering state.
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
@ -1350,7 +1353,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(a2, closure);
__ JumpCodeObject(a2);
__ JumpCodeDataContainerObject(a2);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1519,7 +1522,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
__ AddWord(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeDataContainerEntry(t0, t0);
__ BranchShort(&trampoline_loaded);
__ bind(&builtin_trampoline);
@ -1747,11 +1750,32 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// If the code object is null, just return to the caller.
__ Ret(eq, a0, Operand(Smi::zero()));
__ bind(&jump_to_optimized_code);
// OSR entry tracing.
{
Label next;
__ li(a1, ExternalReference::address_of_log_or_trace_osr());
__ Lbu(a1, MemOperand(a1));
__ Branch(&next, eq, a1, Operand(zero_reg));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a0); // Preserve the code object.
__ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
__ Pop(a0);
}
__ bind(&next);
}
if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerCodeNonBuiltin(a0, a0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
@ -2071,7 +2095,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
Handle<CodeDataContainer> code) {
UseScratchRegisterScope temps(masm);
temps.Include(t1, t0);
// ----------- S t a t e -------------
@ -2145,9 +2169,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments
// -- a3 : the new.target (for [[Construct]] calls)
@ -3686,8 +3710,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (v8_flags.debug_code) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
AssertCodeIsBaseline(masm, code_obj, scratch);
AssertCodeDataContainerIsBaseline(masm, code_obj, scratch);
}
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(

View File

@ -235,14 +235,14 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
Handle<Code> Assembler::relative_code_target_object_handle_at(
Handle<CodeDataContainer> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instr instr1 = Assembler::instr_at(pc);
Instr instr2 = Assembler::instr_at(pc + kInstrSize);
DCHECK(IsAuipc(instr1));
DCHECK(IsJalr(instr2));
int32_t code_target_index = BrachlongOffset(instr1, instr2);
return Handle<Code>::cast(GetEmbeddedObject(code_target_index));
return Handle<CodeDataContainer>::cast(GetEmbeddedObject(code_target_index));
}
Builtin Assembler::target_builtin_at(Address pc) {

View File

@ -488,7 +488,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
void CheckTrampolinePool();
// Get the code target object for a pc-relative call or jump.
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
V8_INLINE Handle<CodeDataContainer> relative_code_target_object_handle_at(
Address pc_) const;
inline int UnboundLabelsCount() { return unbound_labels_count_; }

View File

@ -124,7 +124,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadCodeObjectEntry(a2, optimized_code_entry);
__ LoadCodeDataContainerEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
@ -175,7 +175,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
CallRuntime(function_id, 1);
// Use the return value before restoring a0
AddWord(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
LoadCodeDataContainerEntry(a2, a0);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
@ -4268,7 +4268,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt);
}
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@ -4340,7 +4340,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Call(t6, cond, rs, rt);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@ -4356,7 +4356,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(code->IsExecutable());
if (CanUseNearCallOrJump(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(code);
@ -4414,7 +4413,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET,
@ -4922,10 +4922,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
CallCodeDataContainerObject(code);
break;
case InvokeType::kJump:
JumpCodeObject(code);
JumpCodeDataContainerObject(code);
break;
}
@ -5343,7 +5343,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f));
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@ -5361,7 +5362,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
PrepareCEntryFunction(builtin);
Handle<Code> code =
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
@ -5737,11 +5738,9 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch,
Label* if_marked_for_deoptimization) {
LoadTaggedPointerField(
scratch,
FieldMemOperand(code_data_container, Code::kCodeDataContainerOffset));
Lw(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Load32U(scratch,
FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Branch(if_marked_for_deoptimization, ne, scratch, Operand(zero_reg));
}
@ -6146,7 +6145,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
auto pc = -pc_offset();
auipc(dst, 0);
if(pc != 0) SubWord(dst, dst, pc);
if (pc != 0) {
SubWord(dst, dst, pc);
}
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
@ -6162,67 +6163,42 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
AddWord(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
Branch(&out);
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
slli(destination, scratch, kSystemPointerSizeLog2);
AddWord(destination, destination, kRootRegister);
LoadWord(
destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
AddWord(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
}
LoadWord(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
}
void TurboAssembler::CallCodeObject(Register code_object) {
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
// Compute the Code object pointer from the code entry point.
LoadWord(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
SubWord(destination, destination,
Operand(Code::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Call(code_data_container_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Jump(code_data_container_object);
}
#if V8_TARGET_ARCH_RISCV64
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand) {

View File

@ -273,17 +273,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
return rmode != RelocInfo::EXTERNAL_REFERENCE;
}
void PatchAndJump(Address target);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, COND_ARGS);
void Jump(const ExternalReference& reference);
void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Handle<CodeDataContainer> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS);
void Call(Label* target);
void LoadAddress(
Register dst, Label* target,
RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
// Load the code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Code object pointer out of it. Must not be used for CodeDataContainers
// corresponding to builtins, because their entry points values point to
// the embedded instruction stream in .text section.
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
// Load the builtin given by the Smi in |builtin| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin);
@ -293,11 +306,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
void LoadCodeObjectEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.

View File

@ -654,7 +654,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
__ CallCodeDataContainerObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -689,7 +689,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
__ JumpCodeDataContainerObject(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@ -731,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadTaggedPointerField(a2,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(a2);
__ CallCodeDataContainerObject(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;