PPC/s390: Rename CodeDataContainer to Code

Port c53c026e6e

Original Commit Message:

    This completes the big Code/CodeDataContainer name shuffle.

R=jgruber@chromium.org, joransiu@ca.ibm.com, junyan@redhat.com, midawson@redhat.com
BUG=
LOG=N

Change-Id: I1de7bcf669e0561fc6886abe312f4242d83a764b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4177456
Reviewed-by: Junliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#85375}
This commit is contained in:
Milad Fa 2023-01-18 12:10:47 -05:00 committed by V8 LUCI CQ
parent 29dca1c0c0
commit b7cce4aa16
12 changed files with 144 additions and 186 deletions

View File

@ -537,8 +537,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result,
scratch, r0);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
__ beq(on_result, cr0);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(

View File

@ -550,8 +550,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result,
scratch);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ beq(on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(

View File

@ -35,12 +35,12 @@ namespace internal {
#define __ ACCESS_MASM(masm)
namespace {
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ LoadU16(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ LoadU16(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)), r0);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@ -52,11 +52,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
USE(GetSharedFunctionInfoBytecodeOrBaseline);
ASM_CODE_COMMENT(masm);
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ beq(is_baseline);
__ bind(&not_baseline);
} else {
@ -131,7 +131,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, r6, r6, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -144,14 +144,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, r6, r6, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, r6);
AssertCodeIsBaseline(masm, code_obj, r6);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r5;
@ -427,7 +427,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(r3, r3);
__ LoadCodeInstructionStreamNonBuiltin(r3, r3);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@ -758,7 +758,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
r0);
__ JumpCodeDataContainerObject(r5);
__ JumpCodeObject(r5);
}
__ bind(&prepare_step_in_if_stepping);
@ -946,7 +946,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -1062,8 +1062,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mr(r17, r7);
// Invoke the code.
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
@ -1613,7 +1613,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ mr(r5, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(r5, closure, ip, r7);
__ JumpCodeDataContainerObject(r5);
__ JumpCodeObject(r5);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1740,8 +1740,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
Handle<CodeDataContainer> code =
BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r3, r4, and r6 unmodified.
@ -1786,7 +1785,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
r0);
__ LoadCodeDataContainerEntry(r5, r5);
__ LoadCodeEntry(r5, r5);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@ -2226,7 +2225,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r4 : target
// -- r3 : number of parameters on the stack
@ -2297,9 +2296,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments
// -- r6 : the new.target (for [[Construct]] calls)

View File

@ -36,12 +36,12 @@ namespace internal {
namespace {
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ LoadU16(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ LoadU16(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@ -53,11 +53,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
USE(GetSharedFunctionInfoBytecodeOrBaseline);
ASM_CODE_COMMENT(masm);
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ beq(is_baseline);
__ bind(&not_baseline);
} else {
@ -131,7 +131,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, r5, r5, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, r5, r5, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -144,14 +144,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, r5, r5, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, r5, r5, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, r5);
AssertCodeIsBaseline(masm, code_obj, r5);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r4;
@ -316,7 +316,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(r2, r2);
__ LoadCodeInstructionStreamNonBuiltin(r2, r2);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@ -731,7 +731,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(r4);
__ JumpCodeObject(r4);
}
__ bind(&prepare_step_in_if_stepping);
@ -940,7 +940,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
USE(pushed_stack_space);
DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
@ -1090,8 +1090,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r9, r6);
// Invoke the code.
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
@ -1634,7 +1634,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ mov(r4, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(r4, closure, ip, r1);
__ JumpCodeDataContainerObject(r4);
__ JumpCodeObject(r4);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1759,8 +1759,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
Handle<CodeDataContainer> code =
BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r2, r3, and r5 unmodified.
@ -1804,7 +1803,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(r4, r4);
__ LoadCodeEntry(r4, r4);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@ -2226,7 +2225,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r3 : target
// -- r2 : number of parameters on the stack
@ -2298,9 +2297,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments
// -- r5 : the new.target (for [[Construct]] calls)

View File

@ -189,7 +189,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Jump(static_cast<intptr_t>(target), rmode, cond, cr);
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@ -252,7 +252,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
bctrl();
}
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@ -293,8 +293,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
Call(static_cast<Address>(code_target_index), RelocInfo::CODE_TARGET,
cond);
@ -337,8 +336,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond,
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
Jump(static_cast<intptr_t>(code_target_index), RelocInfo::CODE_TARGET,
cond, cr);
@ -365,11 +363,10 @@ void TurboAssembler::Drop(Register count, Register scratch) {
add(sp, sp, scratch);
}
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch1, Register scratch2) {
LoadS32(scratch1,
FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset),
void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
Register scratch1,
Register scratch2) {
LoadS32(scratch1, FieldMemOperand(code, Code::kKindSpecificFlagsOffset),
scratch2);
TestBit(scratch1, InstructionStream::kMarkedForDeoptimizationBit, scratch2);
}
@ -1652,10 +1649,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
switch (type) {
case InvokeType::kCall:
CallCodeDataContainerObject(code);
CallCodeObject(code);
break;
case InvokeType::kJump:
JumpCodeDataContainerObject(code);
JumpCodeObject(code);
break;
}
@ -2048,8 +2045,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
{
UseScratchRegisterScope temps(masm);
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry,
temps.Acquire(), scratch);
__ TestCodeIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire(),
scratch);
__ bne(&heal_optimized_code_slot, cr0);
}
@ -2058,7 +2055,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
r8);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadCodeDataContainerEntry(r5, optimized_code_entry);
__ LoadCodeEntry(r5, optimized_code_entry);
__ Jump(r5);
// Optimized code slot contains deoptimized code or code is cleared and
@ -2124,7 +2121,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
JumpCodeDataContainerObject(r5);
JumpCodeObject(r5);
}
// Read off the flags in the feedback vector and check if there
@ -2189,10 +2186,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
mov(r3, Operand(num_arguments));
Move(r4, ExternalReference::Create(f));
#if V8_TARGET_ARCH_PPC64
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
#else
Handle<CodeDataContainer> code = CodeFactory::CEntry(isolate(), 1);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1);
#endif
Call(code, RelocInfo::CODE_TARGET);
}
@ -2209,7 +2205,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r4, builtin);
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@ -4901,42 +4897,33 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
LoadU64(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset),
r0);
FieldMemOperand(code_object, Code::kCodeEntryPointOffset), r0);
}
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
LoadU64(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset),
r0);
FieldMemOperand(code_object, Code::kCodeEntryPointOffset), r0);
SubS64(destination, destination,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
void TurboAssembler::CallCodeObject(Register code_object) {
ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Call(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Jump(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {

View File

@ -716,15 +716,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
Condition cond = al, CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(const ExternalReference& reference);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<CodeDataContainer> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Call(Label* target);
@ -734,17 +733,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltin(Builtin builtin, Register destination);
MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
// Load the code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Load the code entry point from the Code object.
void LoadCodeEntry(Register destination, Register code_object);
// Load code entry point from the Code object and compute
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
void CallBuiltinByIndex(Register builtin_index);
@ -1724,8 +1722,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg, rc);
}
void TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch1, Register scratch2);
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch1,
Register scratch2);
Operand ClearedValue() const;
private:

View File

@ -691,7 +691,7 @@ void Assembler::EnsureSpaceFor(int space_needed) {
}
}
void Assembler::call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
@ -700,7 +700,7 @@ void Assembler::call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
brasl(r14, Operand(target_index));
}
void Assembler::jump(Handle<CodeDataContainer> target, RelocInfo::Mode rmode,
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
EnsureSpace ensure_space(this);

View File

@ -1073,9 +1073,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
basr(r14, r1);
}
void call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode);
void jump(Handle<CodeDataContainer> target, RelocInfo::Mode rmode,
Condition cond);
void call(Handle<Code> target, RelocInfo::Mode rmode);
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation
#define DECLARE_VRR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \

View File

@ -416,7 +416,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(target), rmode, cond);
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@ -469,7 +469,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
basr(r14, ip);
}
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
@ -502,8 +502,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
Call(ip);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
@ -529,8 +528,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
} else {
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
@ -559,11 +557,9 @@ void TurboAssembler::Drop(Register count, Register scratch) {
AddS64(sp, sp, scratch);
}
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch) {
LoadS32(scratch,
FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
Register scratch) {
LoadS32(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
TestBit(scratch, InstructionStream::kMarkedForDeoptimizationBit, scratch);
}
@ -1835,10 +1831,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeDataContainerObject(code);
CallCodeObject(code);
break;
case InvokeType::kJump:
JumpCodeDataContainerObject(code);
JumpCodeObject(code);
break;
}
// Continue here if InvokePrologue does handle the invocation due to
@ -2046,8 +2042,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
{
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry,
scratch);
__ TestCodeIsMarkedForDeoptimization(optimized_code_entry, scratch);
__ bne(&heal_optimized_code_slot);
}
@ -2056,7 +2051,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
r7);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadCodeDataContainerEntry(r4, optimized_code_entry);
__ LoadCodeEntry(r4, optimized_code_entry);
__ Jump(r4);
// Optimized code slot contains deoptimized code or code is cleared and
@ -2122,7 +2117,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
JumpCodeDataContainerObject(r4);
JumpCodeObject(r4);
}
// Read off the flags in the feedback vector and check if there
@ -2186,10 +2181,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
mov(r2, Operand(num_arguments));
Move(r3, ExternalReference::Create(f));
#if V8_TARGET_ARCH_S390X
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
#else
Handle<CodeDataContainer> code = CodeFactory::CEntry(isolate(), 1);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1);
#endif
Call(code, RelocInfo::CODE_TARGET);
@ -2207,7 +2201,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r3, builtin);
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@ -4976,40 +4970,33 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
LoadU64(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
LoadU64(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
SubS64(destination, destination,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
void TurboAssembler::CallCodeObject(Register code_object) {
ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Call(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Jump(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {

View File

@ -96,8 +96,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Jump(const ExternalReference& reference);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
@ -111,8 +110,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<CodeDataContainer> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
@ -139,17 +137,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltin(Builtin builtin, Register destination);
MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
// Load the code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Load the code entry point from the Code object.
void LoadCodeEntry(Register destination, Register code_object);
// Load code entry point from the Code object and compute
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
void CallBuiltinByIndex(Register builtin_index);
@ -1801,8 +1798,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
SmiCheck smi_check = SmiCheck::kInline);
void TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch);
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch);
Operand ClearedValue() const;
private:

View File

@ -781,7 +781,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@ -792,13 +792,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
__ LoadS32(r11,
FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset),
r0);
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne, cr0);
@ -819,7 +816,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg);
__ CallCodeObject(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@ -882,7 +879,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg);
__ JumpCodeObject(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@ -919,7 +916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadTaggedPointerField(
r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
__ CallCodeDataContainerObject(r5);
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();

View File

@ -1122,7 +1122,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@ -1133,12 +1133,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ LoadTaggedPointerField(
ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
__ LoadS32(ip,
FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ LoadS32(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset));
__ TestBit(ip, InstructionStream::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne);
@ -1164,7 +1162,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg);
__ CallCodeObject(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@ -1216,7 +1214,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg);
__ JumpCodeObject(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@ -1250,7 +1248,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadTaggedPointerField(r4,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(r4);
__ CallCodeObject(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;