Rename CodeDataContainer to Code

This completes the big Code/CodeDataContainer name shuffle.

Bug: v8:13654
Change-Id: If033e9153e751ab0d2a38572da2ce2f7029bc359
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4173569
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85354}
This commit is contained in:
Jakob Linke 2023-01-17 14:39:32 +01:00 committed by V8 LUCI CQ
parent 08aa1e2891
commit c53c026e6e
222 changed files with 1620 additions and 1919 deletions

View File

@ -83,8 +83,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(getter)),
Object);
Handle<CodeDataContainer> trampoline =
BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(getter)->set_code(*trampoline);
}
if (setter->IsFunctionTemplateInfo() &&
@ -94,8 +93,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(setter)),
Object);
Handle<CodeDataContainer> trampoline =
BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(setter)->set_code(*trampoline);
}
RETURN_ON_EXCEPTION(

View File

@ -6642,8 +6642,7 @@ Local<Context> NewContext(
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
// fail.
// Sanity-check that the isolate is initialized and usable.
CHECK(
i_isolate->builtins()->code(i::Builtin::kIllegal).IsCodeDataContainer());
CHECK(i_isolate->builtins()->code(i::Builtin::kIllegal).IsCode());
TRACE_EVENT_CALL_STATS_SCOPED(i_isolate, "v8", "V8.NewContext");
API_RCS_SCOPE(i_isolate, Context, New);
@ -9708,7 +9707,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() {
{i::Builtin::kJSRunMicrotasksEntry,
&entry_stubs.js_run_microtasks_entry_stub}}};
for (auto& pair : stubs) {
i::CodeDataContainer js_entry = i_isolate->builtins()->code(pair.first);
i::Code js_entry = i_isolate->builtins()->code(pair.first);
pair.second->code.start =
reinterpret_cast<const void*>(js_entry.InstructionStart());
pair.second->code.length_in_bytes = js_entry.InstructionSize();

View File

@ -409,8 +409,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result,
scratch);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ b(eq, on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(

View File

@ -464,8 +464,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
__ JumpIfCodeDataContainerIsMarkedForDeoptimization(
scratch_and_result, temps.AcquireScratch(), &clear_slot);
__ JumpIfCodeIsMarkedForDeoptimization(scratch_and_result,
temps.AcquireScratch(), &clear_slot);
__ B(on_result);
}

View File

@ -74,8 +74,7 @@ class BaselineCompilerTask {
return;
}
shared_function_info_->set_baseline_code(ToCodeDataContainer(*code),
kReleaseStore);
shared_function_info_->set_baseline_code(ToCode(*code), kReleaseStore);
if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
std::stringstream ss;

View File

@ -387,7 +387,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result);
__ j(equal, on_result, distance);
__ mov(FieldOperand(feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt())),

View File

@ -410,7 +410,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
// Is it marked_for_deoptimization? If yes, clear the slot.
{
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result);
__ j(equal, on_result, distance);
__ StoreTaggedField(
FieldOperand(feedback_vector,

View File

@ -311,12 +311,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ ldr(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@ -327,11 +327,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ b(eq, is_baseline);
__ bind(&not_baseline);
} else {
@ -446,7 +446,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(r1, r4);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(r2);
__ JumpCodeObject(r2);
}
__ bind(&prepare_step_in_if_stepping);
@ -631,7 +631,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
//
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
USE(pushed_stack_space);
@ -769,9 +769,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
}
// Invoke the code.
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS frame and remove the parameters (except function), and
@ -1328,7 +1328,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ mov(r2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(r2, closure);
__ JumpCodeDataContainerObject(r2);
__ JumpCodeObject(r2);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1458,8 +1458,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
Handle<CodeDataContainer> code =
BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
@ -1501,7 +1500,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(r2, r2);
__ LoadCodeEntry(r2, r2);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@ -1759,7 +1758,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(r0, r0);
__ LoadCodeInstructionStreamNonBuiltin(r0, r0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@ -2007,7 +2006,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r1 : target
// -- r0 : number of parameters on the stack
@ -2073,9 +2072,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments
// -- r3 : the new.target (for [[Construct]] calls)
@ -3573,7 +3572,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, r3, r3, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, r3, r3, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -3586,14 +3585,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, r3, r3, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, r3, r3, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, r3);
AssertCodeIsBaseline(masm, code_obj, r3);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r2;

View File

@ -388,20 +388,19 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
static void AssertCodeDataContainerIsBaselineAllowClobber(MacroAssembler* masm,
Register code,
Register scratch) {
static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
__ Ldr(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
return AssertCodeDataContainerIsBaselineAllowClobber(masm, code, scratch);
return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
@ -412,11 +411,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ B(ne, &not_baseline);
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ B(eq, is_baseline);
__ Bind(&not_baseline);
} else {
@ -551,7 +550,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(x2);
__ JumpCodeObject(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@ -759,7 +758,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
//
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -933,9 +932,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// x28 : pointer cage base register (kPtrComprCageBaseRegister).
// x29 : frame pointer (fp).
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS internal frame and remove the parameters (except function),
@ -1504,7 +1503,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(x2, closure);
__ JumpCodeDataContainerObject(x2);
__ JumpCodeObject(x2);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1744,7 +1743,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(x1, x1);
__ LoadCodeEntry(x1, x1);
__ B(&trampoline_loaded);
__ Bind(&builtin_trampoline);
@ -1994,7 +1993,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(x0, x0);
__ LoadCodeInstructionStreamNonBuiltin(x0, x0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@ -2337,7 +2336,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : target
// -- x0 : number of parameters on the stack
@ -2410,9 +2409,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
@ -5708,7 +5707,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, x3, x3, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, x3, x3, CODE_TYPE);
__ B(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -5721,14 +5720,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, x3, x3, CODE_DATA_CONTAINER_TYPE);
__ CompareObjectType(code_obj, x3, x3, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, x3);
AssertCodeIsBaseline(masm, code_obj, x3);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = x2;

View File

@ -1808,7 +1808,7 @@ TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
TNode<CodeDataContainer> code = HeapConstant(callable.code());
TNode<Code> code = HeapConstant(callable.code());
// We are going to call here ArrayNoArgumentsConstructor or
// ArraySingleArgumentsConstructor which in addition to the register arguments

View File

@ -180,7 +180,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
// which almost doubles the size of `await` builtins (unnecessarily).
TNode<Smi> builtin_id = LoadObjectField<Smi>(
shared_info, SharedFunctionInfo::kFunctionDataOffset);
TNode<CodeDataContainer> code = LoadBuiltin(builtin_id);
TNode<Code> code = LoadBuiltin(builtin_id);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}

View File

@ -252,8 +252,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
TNode<CodeDataContainer> lazy_builtin =
HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
TNode<Code> lazy_builtin = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
}

View File

@ -104,7 +104,7 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
BIND(&tailcall_to_shared);
// Tail call into code object on the SharedFunctionInfo.
TNode<CodeDataContainer> code = GetSharedFunctionInfoCode(shared);
TNode<Code> code = GetSharedFunctionInfoCode(shared);
TailCallJSCode(code, context, function, new_target, arg_count);
}
@ -1230,7 +1230,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
TNode<CodeDataContainer> code = HeapConstant(
TNode<Code> code = HeapConstant(
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame));
// Unconditionally push argc, target and new target as extra stack arguments.
@ -1568,7 +1568,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
// On failure, tail call back to regular JavaScript by re-calling the given
// function which has been reset to the compile lazy builtin.
TNode<CodeDataContainer> code = LoadJSFunctionCode(function);
TNode<Code> code = LoadJSFunctionCode(function);
TailCallJSCode(code, context, function, new_target, arg_count);
}

View File

@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
TNode<CodeDataContainer> code, TNode<JSFunction> function) {
TNode<Code> code, TNode<JSFunction> function) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
@ -25,8 +25,7 @@ void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, TNode<JSFunction> function) {
auto context = Parameter<Context>(Descriptor::kContext);
TNode<CodeDataContainer> code =
CAST(CallRuntime(function_id, context, function));
TNode<Code> code = CAST(CallRuntime(function_id, context, function));
GenerateTailCallToJSCode(code, function);
}
@ -64,8 +63,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
// Optimized code slot is a weak reference to CodeDataContainer object.
TNode<CodeDataContainer> optimized_code = CAST(GetHeapObjectAssumeWeak(
// Optimized code slot is a weak reference to Code object.
TNode<Code> optimized_code = CAST(GetHeapObjectAssumeWeak(
maybe_optimized_code_entry, &heal_optimized_code_slot));
// Check if the optimized code is marked for deopt. If it is, call the
@ -101,7 +100,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
TVARIABLE(Uint16T, sfi_data_type);
TNode<CodeDataContainer> sfi_code =
TNode<Code> sfi_code =
GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function);
TNode<HeapObject> feedback_cell_value = LoadFeedbackCellValue(function);
@ -131,18 +130,17 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// existing bytecode.
BIND(&maybe_use_sfi_code);
Label tailcall_code(this), baseline(this);
TVARIABLE(CodeDataContainer, code);
TVARIABLE(Code, code);
// Check if we have baseline code.
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODE_DATA_CONTAINER_TYPE),
&baseline);
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODE_TYPE), &baseline);
code = sfi_code;
Goto(&tailcall_code);
BIND(&baseline);
// Ensure we have a feedback vector.
code = Select<CodeDataContainer>(
code = Select<Code>(
IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
[=]() {
return CAST(CallRuntime(Runtime::kInstallBaselineCode,
@ -167,8 +165,7 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
auto function = Parameter<JSFunction>(Descriptor::kTarget);
TNode<CodeDataContainer> code =
HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
// Set the code slot inside the JSFunction to CompileLazy.
StoreObjectField(function, JSFunction::kCodeOffset, code);
GenerateTailCallToJSCode(code, function);

View File

@ -17,8 +17,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
void GenerateTailCallToJSCode(TNode<CodeDataContainer> code,
TNode<JSFunction> function);
void GenerateTailCallToJSCode(TNode<Code> code, TNode<JSFunction> function);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);

View File

@ -516,7 +516,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
#endif
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
TNode<CodeDataContainer> code = CAST(var_code.value());
TNode<Code> code = CAST(var_code.value());
Label if_success(this), if_exception(this, Label::kDeferred);
{

View File

@ -121,7 +121,7 @@ const char* Builtins::Lookup(Address pc) {
return nullptr;
}
Handle<CodeDataContainer> Builtins::CallFunction(ConvertReceiverMode mode) {
Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCallFunction_ReceiverIsNullOrUndefined);
@ -133,7 +133,7 @@ Handle<CodeDataContainer> Builtins::CallFunction(ConvertReceiverMode mode) {
UNREACHABLE();
}
Handle<CodeDataContainer> Builtins::Call(ConvertReceiverMode mode) {
Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCall_ReceiverIsNullOrUndefined);
@ -145,8 +145,7 @@ Handle<CodeDataContainer> Builtins::Call(ConvertReceiverMode mode) {
UNREACHABLE();
}
Handle<CodeDataContainer> Builtins::NonPrimitiveToPrimitive(
ToPrimitiveHint hint) {
Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
return code_handle(Builtin::kNonPrimitiveToPrimitive_Default);
@ -158,8 +157,7 @@ Handle<CodeDataContainer> Builtins::NonPrimitiveToPrimitive(
UNREACHABLE();
}
Handle<CodeDataContainer> Builtins::OrdinaryToPrimitive(
OrdinaryToPrimitiveHint hint) {
Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
switch (hint) {
case OrdinaryToPrimitiveHint::kNumber:
return code_handle(Builtin::kOrdinaryToPrimitive_Number);
@ -181,21 +179,21 @@ FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) {
return FullObjectSlot(location);
}
void Builtins::set_code(Builtin builtin, CodeDataContainer code) {
void Builtins::set_code(Builtin builtin, Code code) {
DCHECK_EQ(builtin, code.builtin_id());
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
// The given builtin may be uninitialized thus we cannot check its type here.
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
}
CodeDataContainer Builtins::code(Builtin builtin) {
Code Builtins::code(Builtin builtin) {
Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)];
return CodeDataContainer::cast(Object(ptr));
return Code::cast(Object(ptr));
}
Handle<CodeDataContainer> Builtins::code_handle(Builtin builtin) {
Handle<Code> Builtins::code_handle(Builtin builtin) {
Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)];
return Handle<CodeDataContainer>(location);
return Handle<Code>(location);
}
// static
@ -231,7 +229,7 @@ CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Builtin builtin) {
// static
Callable Builtins::CallableFor(Isolate* isolate, Builtin builtin) {
Handle<CodeDataContainer> code = isolate->builtins()->code_handle(builtin);
Handle<Code> code = isolate->builtins()->code_handle(builtin);
return Callable{code, CallInterfaceDescriptorFor(builtin)};
}
@ -258,7 +256,7 @@ void Builtins::PrintBuiltinCode() {
base::CStrVector(v8_flags.print_builtin_code_filter))) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
OFStream os(trace_scope.file());
CodeDataContainer builtin_code = code(builtin);
Code builtin_code = code(builtin);
builtin_code.Disassemble(builtin_name, os, isolate_);
os << "\n";
}
@ -272,7 +270,7 @@ void Builtins::PrintBuiltinSize() {
++builtin) {
const char* builtin_name = name(builtin);
const char* kind = KindNameOf(builtin);
CodeDataContainer code = Builtins::code(builtin);
Code code = Builtins::code(builtin);
PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
code.InstructionSize());
}
@ -333,7 +331,7 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
int i = 0;
HandleScope scope(isolate);
for (; i < ToInt(Builtin::kFirstBytecodeHandler); i++) {
Handle<CodeDataContainer> builtin_code(&builtins[i]);
Handle<Code> builtin_code(&builtins[i]);
Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kBuiltin, code,
Builtins::name(FromInt(i))));
@ -341,7 +339,7 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
static_assert(kLastBytecodeHandlerPlusOne == kBuiltinCount);
for (; i < kBuiltinCount; i++) {
Handle<CodeDataContainer> builtin_code(&builtins[i]);
Handle<Code> builtin_code(&builtins[i]);
Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
interpreter::Bytecode bytecode =
builtin_metadata[i].data.bytecode_and_scale.bytecode;

View File

@ -138,19 +138,17 @@ class Builtins {
}
// Convenience wrappers.
Handle<CodeDataContainer> CallFunction(
ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<CodeDataContainer> Call(
ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<CodeDataContainer> NonPrimitiveToPrimitive(
Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
Handle<CodeDataContainer> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
// Used by CreateOffHeapTrampolines in isolate.cc.
void set_code(Builtin builtin, CodeDataContainer code);
void set_code(Builtin builtin, Code code);
V8_EXPORT_PRIVATE CodeDataContainer code(Builtin builtin);
V8_EXPORT_PRIVATE Handle<CodeDataContainer> code_handle(Builtin builtin);
V8_EXPORT_PRIVATE Code code(Builtin builtin);
V8_EXPORT_PRIVATE Handle<Code> code_handle(Builtin builtin);
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin);
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate,
@ -194,8 +192,8 @@ class Builtins {
}
// True, iff the given code object is a builtin with off-heap embedded code.
template <typename CodeOrCodeDataContainer>
static bool IsIsolateIndependentBuiltin(CodeOrCodeDataContainer code) {
template <typename CodeOrCode>
static bool IsIsolateIndependentBuiltin(CodeOrCode code) {
Builtin builtin = code.builtin_id();
return Builtins::IsBuiltinId(builtin) &&
Builtins::IsIsolateIndependent(builtin);
@ -289,10 +287,10 @@ class Builtins {
enum class CallOrConstructMode { kCall, kConstruct };
static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code);
static void Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code);
Handle<Code> code);
static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code);
enum class InterpreterEntryTrampolineMode {
// The version of InterpreterEntryTrampoline used by default.

View File

@ -35,8 +35,8 @@ macro IsInstructionStream(o: HeapObject): bool {
}
@export
macro IsCodeDataContainer(o: HeapObject): bool {
return Is<CodeDataContainer>(o);
macro IsCode(o: HeapObject): bool {
return Is<Code>(o);
}
@export

View File

@ -414,7 +414,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -513,9 +513,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(edi, Operand(scratch2, EntryFrameConstants::kFunctionArgOffset));
// Invoke the code.
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the internal frame. Notice that this also removes the empty.
@ -555,12 +555,12 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ bind(&done);
}
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
// Verify that the code kind is baseline code via the CodeKind.
__ mov(scratch, FieldOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ mov(scratch, FieldOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ cmp(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
@ -573,11 +573,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
__ CmpInstanceType(scratch1, CODE_DATA_CONTAINER_TYPE);
__ CmpInstanceType(scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ j(not_equal, &not_baseline);
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ j(equal, is_baseline);
__ bind(&not_baseline);
} else {
@ -689,7 +689,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
__ Pop(eax);
__ CmpObjectType(ecx, CODE_DATA_CONTAINER_TYPE, ecx);
__ CmpObjectType(ecx, CODE_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@ -706,7 +706,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(ecx);
__ JumpCodeObject(ecx);
}
__ bind(&prepare_step_in_if_stepping);
@ -1148,7 +1148,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ pop(eax); // Restore the argument count.
__ pop(ecx);
__ pop(edx);
__ JumpCodeDataContainerObject(ecx);
__ JumpCodeObject(ecx);
__ bind(&install_baseline_code);
__ movd(eax, xmm0); // Recover argument count.
@ -1419,7 +1419,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ mov(scratch,
FieldOperand(scratch, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(scratch, scratch);
__ LoadCodeEntry(scratch, scratch);
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
@ -2055,7 +2055,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- edi : target
// -- esi : context for the Call / Construct builtin
@ -2148,9 +2148,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- eax : the number of arguments
// -- edi : the target to call (can be any Object)
@ -2728,7 +2728,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ leave();
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(eax, eax);
__ LoadCodeInstructionStreamNonBuiltin(eax, eax);
// Load deoptimization data from the code object.
__ mov(ecx,
@ -4213,8 +4213,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE,
kInterpreterBytecodeOffsetRegister);
__ CmpObjectType(code_obj, CODE_TYPE, kInterpreterBytecodeOffsetRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -4227,15 +4226,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE,
kInterpreterBytecodeOffsetRegister);
__ CmpObjectType(code_obj, CODE_TYPE, kInterpreterBytecodeOffsetRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, ecx);
AssertCodeIsBaseline(masm, code_obj, ecx);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = ecx;

View File

@ -215,7 +215,7 @@ InstructionStream BuildWithCodeStubAssemblerCS(
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
InstructionStream code) {
DCHECK_EQ(builtin, code.builtin_id());
builtins->set_code(builtin, ToCodeDataContainer(code));
builtins->set_code(builtin, ToCode(code));
}
// static
@ -246,7 +246,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
PtrComprCageBase cage_base(isolate);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
InstructionStream code = FromCodeDataContainer(builtins->code(builtin));
InstructionStream code = FromCode(builtins->code(builtin));
isolate->heap()->UnprotectAndRegisterMemoryChunk(
code, UnprotectMemoryOrigin::kMainThread);
bool flush_icache = false;
@ -258,16 +258,16 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
Builtins::IsIsolateIndependent(target.builtin_id()));
if (!target.is_builtin()) continue;
CodeDataContainer new_target = builtins->code(target.builtin_id());
Code new_target = builtins->code(target.builtin_id());
rinfo->set_target_address(new_target.raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
Object object = rinfo->target_object(cage_base);
if (!object.IsCodeDataContainer(cage_base)) continue;
CodeDataContainer target = CodeDataContainer::cast(object);
if (!object.IsCode(cage_base)) continue;
Code target = Code::cast(object);
if (!target.is_builtin()) continue;
CodeDataContainer new_target = builtins->code(target.builtin_id());
Code new_target = builtins->code(target.builtin_id());
rinfo->set_target_object(isolate->heap(), new_target,
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}

View File

@ -505,7 +505,7 @@ builtin WasmI64AtomicWait(
// Type feedback collection support for `call_ref`.
extern macro GetCodeEntry(InstructionStream): RawPtr;
extern macro GetCodeEntry(CodeDataContainer): RawPtr;
extern macro GetCodeEntry(Code): RawPtr;
struct TargetAndInstance {
target: RawPtr;

View File

@ -464,7 +464,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeDataContainer> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -637,9 +637,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(r9);
// Invoke the builtin code.
Handle<CodeDataContainer> builtin =
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the internal frame. Notice that this also removes the empty
@ -664,20 +664,19 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
static void AssertCodeDataContainerIsBaselineAllowClobber(MacroAssembler* masm,
Register code,
Register scratch) {
static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
__ movl(scratch, FieldOperand(code, CodeDataContainer::kFlagsOffset));
__ DecodeField<CodeDataContainer::KindField>(scratch);
__ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
return AssertCodeDataContainerIsBaselineAllowClobber(masm, code, scratch);
return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
}
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@ -688,11 +687,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
__ CmpInstanceType(scratch1, CODE_DATA_CONTAINER_TYPE);
__ CmpInstanceType(scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ j(not_equal, &not_baseline);
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ j(equal, is_baseline);
__ bind(&not_baseline);
} else {
@ -808,7 +807,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&ok);
__ bind(&is_baseline);
__ CmpObjectType(rcx, CODE_DATA_CONTAINER_TYPE, rcx);
__ CmpObjectType(rcx, CODE_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@ -826,7 +825,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ JumpCodeDataContainerObject(rcx);
__ JumpCodeObject(rcx);
}
__ bind(&prepare_step_in_if_stepping);
@ -1243,7 +1242,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ ReplaceClosureCodeWithOptimizedCode(
rcx, closure, kInterpreterBytecodeArrayRegister,
WriteBarrierDescriptor::SlotAddressRegister());
__ JumpCodeDataContainerObject(rcx);
__ JumpCodeObject(rcx);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1429,7 +1428,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeDataContainerEntry(rbx, rbx);
__ LoadCodeEntry(rbx, rbx);
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
@ -2049,7 +2048,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- rdi : target
// -- rax : number of parameters on the stack
@ -2118,9 +2117,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(
MacroAssembler* masm, CallOrConstructMode mode,
Handle<CodeDataContainer> code) {
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the new target (for [[Construct]] calls)
@ -2674,7 +2673,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ leave();
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(rax, rax);
__ LoadCodeInstructionStreamNonBuiltin(rax, rax);
// Load deoptimization data from the code object.
const TaggedRegister deopt_data(rbx);
@ -2773,15 +2772,13 @@ void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
// A modified version of BailoutIfDeoptimized that drops the builtin frame
// before deoptimizing.
{
static constexpr int kCodeStartToCodeDataContainerOffset =
InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
__ LoadTaggedPointerField(scratch0,
Operand(kJavaScriptCallCodeStartRegister,
kCodeStartToCodeDataContainerOffset));
__ testl(
FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
static constexpr int kCodeStartToCodeOffset =
InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ LoadTaggedPointerField(
scratch0,
Operand(kJavaScriptCallCodeStartRegister, kCodeStartToCodeOffset));
__ testl(FieldOperand(scratch0, Code::kKindSpecificFlagsOffset),
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ j(not_zero, &deoptimize);
}
@ -2881,7 +2878,7 @@ void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
__ Drop(kStackParameterCount + kReturnAddressCount);
__ Move(scratch0,
BUILTIN_CODE(masm->isolate(), CompileLazyDeoptimizedCode));
__ LoadCodeDataContainerEntry(scratch0, scratch0);
__ LoadCodeEntry(scratch0, scratch0);
__ PushReturnAddressFrom(scratch0);
__ ret(0);
}
@ -5347,7 +5344,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE, kScratchRegister);
__ CmpObjectType(code_obj, CODE_TYPE, kScratchRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -5360,14 +5357,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE, kScratchRegister);
__ CmpObjectType(code_obj, CODE_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, r11);
AssertCodeIsBaseline(masm, code_obj, r11);
}
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r11;

View File

@ -156,7 +156,7 @@ void RelocInfo::WipeOut() {
}
}
Handle<CodeDataContainer> Assembler::relative_code_target_object_handle_at(
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instruction* branch = Instruction::At(pc);
int code_target_index = branch->GetBranchOffset() / kInstrSize;

View File

@ -1190,7 +1190,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Get the code target object for a pc-relative call or jump.
V8_INLINE Handle<CodeDataContainer> relative_code_target_object_handle_at(
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
Address pc_) const;
protected:

View File

@ -161,7 +161,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(target), rmode, cond);
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@ -225,7 +225,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
}
}
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
@ -294,8 +294,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
bl(code_target_index * kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
@ -327,8 +326,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
b(code_target_index * kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
@ -341,38 +339,31 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
}
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
sub(destination, destination,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
void TurboAssembler::CallCodeObject(Register code_object) {
ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Call(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Jump(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
@ -405,10 +396,9 @@ void TurboAssembler::Drop(Register count, Condition cond) {
add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
}
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch) {
ldr(scratch, FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
Register scratch) {
ldr(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
}
@ -1709,10 +1699,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeDataContainerObject(code);
CallCodeObject(code);
break;
case InvokeType::kJump:
JumpCodeDataContainerObject(code);
JumpCodeObject(code);
break;
}
b(&done);
@ -1933,8 +1923,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
{
UseScratchRegisterScope temps(masm);
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry,
temps.Acquire());
__ TestCodeIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire());
__ b(ne, &heal_optimized_code_slot);
}
@ -1942,7 +1931,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// into the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeDataContainerEntry(r2, optimized_code_entry);
__ LoadCodeEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code or code is cleared and
@ -2001,7 +1990,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
JumpCodeDataContainerObject(r2);
JumpCodeObject(r2);
}
// Read off the flags in the feedback vector and check if there
@ -2062,8 +2051,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
mov(r0, Operand(num_arguments));
Move(r1, ExternalReference::Create(f));
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@ -2088,7 +2076,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
DCHECK_EQ(builtin.address() & 1, 1);
#endif
Move(r1, builtin);
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}

View File

@ -308,8 +308,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
bool check_constant_pool = true);
void Call(Handle<CodeDataContainer> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, Condition cond = al,
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
bool check_constant_pool = true);
void Call(Label* target);
@ -323,18 +323,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al);
// Load the code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Load the code entry point from the Code object.
void LoadCodeEntry(Register destination, Register code_object);
// Load code entry point from the Code object and compute
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
@ -440,8 +439,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register exclusion3 = no_reg);
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Jump(const ExternalReference& reference);
// Perform a floating-point min or max operation with the
@ -892,8 +890,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
void TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch);
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch);
Operand ClearedValue() const;
private:

View File

@ -485,15 +485,15 @@ Tagged_t Assembler::target_compressed_address_at(Address pc,
return Memory<Tagged_t>(target_pointer_address_at(pc));
}
Handle<CodeDataContainer> Assembler::code_target_object_handle_at(Address pc) {
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
return Handle<CodeDataContainer>(reinterpret_cast<Address*>(
return Handle<Code>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
return Handle<CodeDataContainer>::cast(
return Handle<Code>::cast(
GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
}
}
@ -662,7 +662,7 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
compressed));
// Embedding of compressed InstructionStream objects must not happen when
// external code space is enabled, because CodeDataContainers must be used
// external code space is enabled, because Codes must be used
// instead.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!IsCodeSpaceObject(HeapObject::cast(obj)));

View File

@ -4384,7 +4384,7 @@ void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
void Assembler::near_call(HeapNumberRequest request) {
BlockPoolsScope no_pool_before_bl_instr(this);
RequestHeapNumber(request);
EmbeddedObjectIndex index = AddEmbeddedObject(Handle<CodeDataContainer>());
EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
DCHECK(is_int32(index));
bl(static_cast<int>(index));

View File

@ -262,7 +262,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
inline Handle<CodeDataContainer> code_target_object_handle_at(Address pc);
inline Handle<Code> code_target_object_handle_at(Address pc);
inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc);
inline void set_embedded_object_index_referenced_from(
Address p, EmbeddedObjectIndex index);

View File

@ -1413,16 +1413,16 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ AssertCodeDataContainer(optimized_code_entry);
__ JumpIfCodeDataContainerIsMarkedForDeoptimization(
optimized_code_entry, scratch, &heal_optimized_code_slot);
__ AssertCode(optimized_code_entry);
__ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch,
&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Move(x2, optimized_code_entry);
__ JumpCodeDataContainerObject(x2);
__ JumpCodeObject(x2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
@ -1447,7 +1447,7 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
AssertCodeDataContainer(optimized_code);
AssertCode(optimized_code);
StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
@ -1483,7 +1483,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
JumpCodeDataContainerObject(x2);
JumpCodeObject(x2);
}
// Read off the flags in the feedback vector and check if there
@ -1596,16 +1596,16 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
Check(ls, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
void MacroAssembler::AssertCodeDataContainer(Register object) {
void MacroAssembler::AssertCode(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
AssertNotSmi(object, AbortReason::kOperandIsNotACodeDataContainer);
AssertNotSmi(object, AbortReason::kOperandIsNotACode);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, CODE_DATA_CONTAINER_TYPE);
Check(eq, AbortReason::kOperandIsNotACodeDataContainer);
CompareObjectType(object, temp, temp, CODE_TYPE);
Check(eq, AbortReason::kOperandIsNotACode);
}
void MacroAssembler::AssertConstructor(Register object) {
@ -1913,8 +1913,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
Mov(x0, num_arguments);
Mov(x1, ExternalReference::Create(f));
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@ -1922,7 +1921,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
Mov(x1, builtin);
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@ -2147,7 +2146,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
JumpHelper(offset, rmode, cond);
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@ -2191,8 +2190,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
}
}
void TurboAssembler::Call(Handle<CodeDataContainer> code,
RelocInfo::Mode rmode) {
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
BlockPoolsScope scope(this);
@ -2203,7 +2201,7 @@ void TurboAssembler::Call(Handle<CodeDataContainer> code,
return;
}
DCHECK(FromCodeDataContainer(*code).IsExecutable());
DCHECK(FromCode(*code).IsExecutable());
DCHECK(RelocInfo::IsCodeTarget(rmode));
if (CanUseNearCallOrJump(rmode)) {
@ -2285,8 +2283,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
near_call(static_cast<int32_t>(index), RelocInfo::CODE_TARGET);
@ -2339,8 +2336,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
JumpHelper(static_cast<int64_t>(index), RelocInfo::CODE_TARGET, cond);
@ -2353,41 +2349,34 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
}
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
Ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
Ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
Ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
Ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
Sub(destination, destination,
Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
void TurboAssembler::CallCodeObject(Register code_object) {
ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Call(code_data_container_object);
LoadCodeEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
LoadCodeEntry(code_object, code_object);
UseScratchRegisterScope temps(this);
if (code_data_container_object != x17) {
if (code_object != x17) {
temps.Exclude(x17);
Mov(x17, code_data_container_object);
Mov(x17, code_object);
}
Jump(x17);
}
@ -2442,18 +2431,16 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void TurboAssembler::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
LoadTaggedPointerField(scratch,
MemOperand(kJavaScriptCallCodeStartRegister, offset));
Ldr(scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Ldr(scratch.W(), FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
Label not_deoptimized;
Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
&not_deoptimized);
@ -2669,10 +2656,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeDataContainerObject(code);
CallCodeObject(code);
break;
case InvokeType::kJump:
JumpCodeDataContainerObject(code);
JumpCodeObject(code);
break;
}
B(&done);
@ -2688,12 +2675,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Bind(&done);
}
void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch,
Label* if_marked_for_deoptimization) {
Ldr(scratch.W(),
FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization(
Register code, Register scratch, Label* if_marked_for_deoptimization) {
Ldr(scratch.W(), FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
Tbnz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
if_marked_for_deoptimization);
}

View File

@ -974,14 +974,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Jump(const ExternalReference& reference);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode);
void Call(Handle<CodeDataContainer> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void Call(ExternalReference target);
// Generate an indirect call (for when a direct call's range is not adequate).
@ -996,18 +994,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin, Condition cond = al);
// Load code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Load code entry point from the Code object.
void LoadCodeEntry(Register destination, Register code_object);
// Load code entry point from the Code object and compute
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
@ -1899,9 +1896,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
void JumpIfCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch,
Label* if_marked_for_deoptimization);
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch,
Label* if_marked_for_deoptimization);
Operand ClearedValue() const;
Operand ReceiverOperand(const Register arg_count);
@ -1910,9 +1906,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is not a CodeDataContainer, enabled via
// Abort execution if argument is not a Code, enabled via
// --debug-code.
void AssertCodeDataContainer(Register object) NOOP_UNLESS_DEBUG_CODE
void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE
// Abort execution if argument is not a Constructor, enabled via
// --debug-code.

View File

@ -264,7 +264,7 @@ void AssemblerBase::RequestHeapNumber(HeapNumberRequest request) {
heap_number_requests_.push_front(request);
}
int AssemblerBase::AddCodeTarget(Handle<CodeDataContainer> target) {
int AssemblerBase::AddCodeTarget(Handle<Code> target) {
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
@ -276,8 +276,7 @@ int AssemblerBase::AddCodeTarget(Handle<CodeDataContainer> target) {
}
}
Handle<CodeDataContainer> AssemblerBase::GetCodeTarget(
intptr_t code_target_index) const {
Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
return code_targets_[code_target_index];
}

View File

@ -355,8 +355,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
int AddCodeTarget(Handle<CodeDataContainer> target);
Handle<CodeDataContainer> GetCodeTarget(intptr_t code_target_index) const;
int AddCodeTarget(Handle<Code> target);
Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
// Add 'object' to the {embedded_objects_} vector and return the index at
// which it is stored.
@ -412,7 +412,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// guaranteed to fit in the instruction's offset field. We keep track of the
// code handles we encounter in calls in this vector, and encode the index of
// the code handle in the vector instead.
std::vector<Handle<CodeDataContainer>> code_targets_;
std::vector<Handle<Code>> code_targets_;
// If an assembler needs a small number to refer to a heap object handle
// (for example, because there are only 32bit available on a 64bit arch), the

View File

@ -54,7 +54,7 @@ namespace internal {
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotACodeDataContainer, "Operand is not a CodeDataContainer") \
V(kOperandIsNotACode, "Operand is not a Code object") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \

View File

@ -16,14 +16,14 @@ class InstructionStream;
// Associates a body of code with an interface descriptor.
class Callable final {
public:
Callable(Handle<CodeDataContainer> code, CallInterfaceDescriptor descriptor)
Callable(Handle<Code> code, CallInterfaceDescriptor descriptor)
: code_(code), descriptor_(descriptor) {}
Handle<CodeDataContainer> code() const { return code_; }
Handle<Code> code() const { return code_; }
CallInterfaceDescriptor descriptor() const { return descriptor_; }
private:
const Handle<CodeDataContainer> code_;
const Handle<Code> code_;
const CallInterfaceDescriptor descriptor_;
};

View File

@ -14,15 +14,13 @@ namespace v8 {
namespace internal {
// static
Handle<CodeDataContainer> CodeFactory::RuntimeCEntry(Isolate* isolate,
int result_size) {
Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
return CodeFactory::CEntry(isolate, result_size);
}
// static
Handle<CodeDataContainer> CodeFactory::CEntry(Isolate* isolate, int result_size,
ArgvMode argv_mode,
bool builtin_exit_frame) {
Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
ArgvMode argv_mode, bool builtin_exit_frame) {
// Aliases for readability below.
const int rs = result_size;
const ArgvMode am = argv_mode;
@ -256,7 +254,7 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
// static
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate, result_size, ArgvMode::kRegister);
if (result_size == 1) {
return Callable(code, InterpreterCEntry1Descriptor{});

View File

@ -26,12 +26,11 @@ class V8_EXPORT_PRIVATE CodeFactory final {
// stack and the arguments count is passed via register) which currently
// can't be expressed in CallInterfaceDescriptor. Therefore only the code
// is exported here.
static Handle<CodeDataContainer> RuntimeCEntry(Isolate* isolate,
int result_size = 1);
static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
static Handle<CodeDataContainer> CEntry(Isolate* isolate, int result_size = 1,
ArgvMode argv_mode = ArgvMode::kStack,
bool builtin_exit_frame = false);
static Handle<Code> CEntry(Isolate* isolate, int result_size = 1,
ArgvMode argv_mode = ArgvMode::kStack,
bool builtin_exit_frame = false);
// Initial states for ICs.
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);

View File

@ -18,9 +18,9 @@ namespace internal {
namespace {
template <typename CodeOrCodeDataContainer>
struct CodeOrCodeDataContainerOps {
Handle<CodeOrCodeDataContainer> code;
template <typename CodeOrInstructionStream>
struct CodeOrInstructionStreamOps {
Handle<CodeOrInstructionStream> code;
Address constant_pool() const { return code->constant_pool(); }
Address instruction_start() const { return code->InstructionStart(); }
@ -33,8 +33,8 @@ struct CodeOrCodeDataContainerOps {
int code_comments_size() const { return code->code_comments_size(); }
};
using CodeOps = CodeOrCodeDataContainerOps<InstructionStream>;
using CodeDataContainerOps = CodeOrCodeDataContainerOps<CodeDataContainer>;
using InstructionStreamOps = CodeOrInstructionStreamOps<InstructionStream>;
using CodeOps = CodeOrInstructionStreamOps<Code>;
#if V8_ENABLE_WEBASSEMBLY
struct WasmCodeOps {
@ -92,21 +92,21 @@ struct CodeDescOps {
#define HANDLE_WASM(...) UNREACHABLE()
#endif
#define DISPATCH(ret, method) \
ret CodeReference::method() const { \
DCHECK(!is_null()); \
switch (kind_) { \
case Kind::INSTRUCTION_STREAM: \
return CodeOps{instruction_stream_}.method(); \
case Kind::CODE_DATA_CONTAINER: \
return CodeDataContainerOps{code_data_container_}.method(); \
case Kind::WASM_CODE: \
HANDLE_WASM(return WasmCodeOps{wasm_code_}.method()); \
case Kind::CODE_DESC: \
return CodeDescOps{code_desc_}.method(); \
default: \
UNREACHABLE(); \
} \
#define DISPATCH(ret, method) \
ret CodeReference::method() const { \
DCHECK(!is_null()); \
switch (kind_) { \
case Kind::INSTRUCTION_STREAM: \
return InstructionStreamOps{instruction_stream_}.method(); \
case Kind::CODE: \
return CodeOps{code_}.method(); \
case Kind::WASM_CODE: \
HANDLE_WASM(return WasmCodeOps{wasm_code_}.method()); \
case Kind::CODE_DESC: \
return CodeDescOps{code_desc_}.method(); \
default: \
UNREACHABLE(); \
} \
}
DISPATCH(Address, constant_pool)

View File

@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
class InstructionStream;
class CodeDataContainer;
class Code;
class CodeDesc;
namespace wasm {
@ -29,9 +29,7 @@ class CodeReference {
: kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
explicit CodeReference(Handle<InstructionStream> code)
: kind_(Kind::INSTRUCTION_STREAM), instruction_stream_(code) {}
explicit CodeReference(Handle<CodeDataContainer> code_data_container)
: kind_(Kind::CODE_DATA_CONTAINER),
code_data_container_(code_data_container) {}
explicit CodeReference(Handle<Code> code) : kind_(Kind::CODE), code_(code) {}
Address constant_pool() const;
Address instruction_start() const;
@ -47,9 +45,7 @@ class CodeReference {
bool is_instruction_stream() const {
return kind_ == Kind::INSTRUCTION_STREAM;
}
bool is_code_data_container() const {
return kind_ == Kind::CODE_DATA_CONTAINER;
}
bool is_code() const { return kind_ == Kind::CODE; }
bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; }
Handle<InstructionStream> as_instruction_stream() const {
@ -57,9 +53,9 @@ class CodeReference {
return instruction_stream_;
}
Handle<CodeDataContainer> as_code_data_container() const {
DCHECK_EQ(Kind::CODE_DATA_CONTAINER, kind_);
return code_data_container_;
Handle<Code> as_code() const {
DCHECK_EQ(Kind::CODE, kind_);
return code_;
}
const wasm::WasmCode* as_wasm_code() const {
@ -71,7 +67,7 @@ class CodeReference {
enum class Kind {
NONE,
INSTRUCTION_STREAM,
CODE_DATA_CONTAINER,
CODE,
WASM_CODE,
CODE_DESC
} kind_;
@ -80,7 +76,7 @@ class CodeReference {
const wasm::WasmCode* wasm_code_;
const CodeDesc* code_desc_;
Handle<InstructionStream> instruction_stream_;
Handle<CodeDataContainer> code_data_container_;
Handle<Code> code_;
};
DISALLOW_NEW_AND_DELETE()

View File

@ -3145,19 +3145,19 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
Label check_for_interpreter_data(this, &var_result);
Label done(this, &var_result);
GotoIfNot(HasInstanceType(var_result.value(), CODE_DATA_CONTAINER_TYPE),
GotoIfNot(HasInstanceType(var_result.value(), CODE_TYPE),
&check_for_interpreter_data);
{
TNode<CodeDataContainer> code = CAST(var_result.value());
TNode<Code> code = CAST(var_result.value());
#ifdef DEBUG
TNode<Int32T> code_flags =
LoadObjectField<Int16T>(code, CodeDataContainer::kFlagsOffset);
CSA_DCHECK(this, Word32Equal(
DecodeWord32<CodeDataContainer::KindField>(code_flags),
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
LoadObjectField<Int16T>(code, Code::kFlagsOffset);
CSA_DCHECK(
this, Word32Equal(DecodeWord32<Code::KindField>(code_flags),
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
#endif // DEBUG
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
FromCodeDataContainerNonBuiltin(code),
FromCodeNonBuiltin(code),
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset);
var_result = baseline_data;
}
@ -15463,7 +15463,7 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
return Word32NotEqual(flags, Int32Constant(0));
}
TNode<CodeDataContainer> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
@ -15475,13 +15475,13 @@ TNode<CodeDataContainer> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
return CAST(BitcastWordToTagged(Load<RawPtrT>(table, offset)));
}
TNode<CodeDataContainer> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info, TVariable<Uint16T>* data_type_out,
Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
TVARIABLE(CodeDataContainer, sfi_code);
TVARIABLE(Code, sfi_code);
Label done(this);
Label check_instance_type(this);
@ -15507,7 +15507,7 @@ TNode<CodeDataContainer> CodeStubAssembler::GetSharedFunctionInfoCode(
int32_t case_values[] = {
BYTECODE_ARRAY_TYPE,
CODE_DATA_CONTAINER_TYPE,
CODE_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE,
@ -15557,7 +15557,7 @@ TNode<CodeDataContainer> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
TNode<CodeDataContainer> baseline_code = CAST(sfi_data);
TNode<Code> baseline_code = CAST(sfi_data);
sfi_code = baseline_code;
Goto(&done);
}
@ -15579,7 +15579,7 @@ TNode<CodeDataContainer> CodeStubAssembler::GetSharedFunctionInfoCode(
CSA_DCHECK(this,
Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
{
TNode<CodeDataContainer> trampoline =
TNode<Code> trampoline =
LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data));
sfi_code = trampoline;
}
@ -15607,22 +15607,20 @@ TNode<CodeDataContainer> CodeStubAssembler::GetSharedFunctionInfoCode(
return sfi_code.value();
}
TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeDataContainer> code) {
return LoadObjectField<RawPtrT>(
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset));
TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<Code> code) {
return LoadObjectField<RawPtrT>(code,
IntPtrConstant(Code::kCodeEntryPointOffset));
}
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(
TNode<CodeDataContainer> code_data_container) {
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(TNode<Code> code) {
return IsSetWord32<InstructionStream::MarkedForDeoptimizationField>(
LoadObjectField<Int32T>(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
LoadObjectField<Int32T>(code, Code::kKindSpecificFlagsOffset));
}
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
TNode<Context> context) {
const TNode<CodeDataContainer> code = GetSharedFunctionInfoCode(shared_info);
const TNode<Code> code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove

View File

@ -835,27 +835,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void FastCheck(TNode<BoolT> condition);
// TODO(v8:11880): remove once InstructionStream::bytecode_or_interpreter_data
// field is cached in or moved to CodeDataContainer.
TNode<InstructionStream> FromCodeDataContainerNonBuiltin(
TNode<CodeDataContainer> code) {
// field is cached in or moved to Code.
TNode<InstructionStream> FromCodeNonBuiltin(TNode<Code> code) {
// Compute the InstructionStream object pointer from the code entry point.
TNode<RawPtrT> code_entry = Load<RawPtrT>(
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
kHeapObjectTag));
code, IntPtrConstant(Code::kCodeEntryPointOffset - kHeapObjectTag));
TNode<Object> o = BitcastWordToTagged(IntPtrSub(
code_entry,
IntPtrConstant(InstructionStream::kHeaderSize - kHeapObjectTag)));
return CAST(o);
}
TNode<CodeDataContainer> ToCodeDataContainer(TNode<InstructionStream> code) {
return LoadObjectField<CodeDataContainer>(
code, InstructionStream::kCodeDataContainerOffset);
TNode<Code> ToCode(TNode<InstructionStream> code) {
return LoadObjectField<Code>(code, InstructionStream::kCodeOffset);
}
TNode<RawPtrT> GetCodeEntry(TNode<CodeDataContainer> code);
TNode<BoolT> IsMarkedForDeoptimization(
TNode<CodeDataContainer> code_data_container);
TNode<RawPtrT> GetCodeEntry(TNode<Code> code);
TNode<BoolT> IsMarkedForDeoptimization(TNode<Code> code);
// The following Call wrappers call an object according to the semantics that
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
@ -3853,7 +3849,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ElementsKind kind = HOLEY_ELEMENTS);
// Load a builtin's code from the builtin array in the isolate.
TNode<CodeDataContainer> LoadBuiltin(TNode<Smi> builtin_id);
TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
// If |data_type_out| is provided, the instance type of the function data will
@ -3861,7 +3857,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// data_type_out will be set to 0.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
TNode<CodeDataContainer> GetSharedFunctionInfoCode(
TNode<Code> GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info,
TVariable<Uint16T>* data_type_out = nullptr,
Label* if_compile_lazy = nullptr);

View File

@ -652,7 +652,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
INTERPRETER_DATA_TYPE, AllocationType::kOld));
interpreter_data->set_bytecode_array(*bytecode_array);
interpreter_data->set_interpreter_trampoline(ToCodeDataContainer(*code));
interpreter_data->set_interpreter_trampoline(ToCode(*code));
shared_info->set_interpreter_data(*interpreter_data);
@ -922,7 +922,7 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
// A wrapper to access the optimized code cache slots on the feedback vector.
class OptimizedCodeCache : public AllStatic {
public:
static V8_WARN_UNUSED_RESULT MaybeHandle<CodeDataContainer> Get(
static V8_WARN_UNUSED_RESULT MaybeHandle<Code> Get(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
CodeKind code_kind) {
if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {};
@ -932,13 +932,13 @@ class OptimizedCodeCache : public AllStatic {
SharedFunctionInfo shared = function->shared();
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
CodeDataContainer code;
Code code;
FeedbackVector feedback_vector = function->feedback_vector();
if (IsOSR(osr_offset)) {
Handle<BytecodeArray> bytecode(shared.GetBytecodeArray(isolate), isolate);
interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt());
DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop);
base::Optional<CodeDataContainer> maybe_code =
base::Optional<Code> maybe_code =
feedback_vector.GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
if (maybe_code.has_value()) code = maybe_code.value();
} else {
@ -961,7 +961,7 @@ class OptimizedCodeCache : public AllStatic {
}
static void Insert(Isolate* isolate, JSFunction function,
BytecodeOffset osr_offset, CodeDataContainer code,
BytecodeOffset osr_offset, Code code,
bool is_function_context_specializing) {
const CodeKind kind = code.kind();
if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
@ -1052,7 +1052,7 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
DCHECK(!isolate->has_pending_exception());
OptimizedCodeCache::Insert(isolate, *compilation_info->closure(),
compilation_info->osr_offset(),
ToCodeDataContainer(*compilation_info->code()),
ToCode(*compilation_info->code()),
compilation_info->function_context_specializing());
job->RecordFunctionCompilation(LogEventListener::CodeTag::kFunction, isolate);
return true;
@ -1128,10 +1128,11 @@ bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
}
}
MaybeHandle<CodeDataContainer> CompileTurbofan(
Isolate* isolate, Handle<JSFunction> function,
Handle<SharedFunctionInfo> shared, ConcurrencyMode mode,
BytecodeOffset osr_offset, CompileResultBehavior result_behavior) {
MaybeHandle<Code> CompileTurbofan(Isolate* isolate, Handle<JSFunction> function,
Handle<SharedFunctionInfo> shared,
ConcurrencyMode mode,
BytecodeOffset osr_offset,
CompileResultBehavior result_behavior) {
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
@ -1163,7 +1164,7 @@ MaybeHandle<CodeDataContainer> CompileTurbofan(
} else {
DCHECK(IsSynchronous(mode));
if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
return ToCodeDataContainer(job->compilation_info()->code(), isolate);
return ToCode(job->compilation_info()->code(), isolate);
}
}
@ -1176,12 +1177,11 @@ MaybeHandle<CodeDataContainer> CompileTurbofan(
void RecordMaglevFunctionCompilation(Isolate* isolate,
Handle<JSFunction> function) {
PtrComprCageBase cage_base(isolate);
// TODO(v8:13261): We should be able to pass a CodeDataContainer AbstractCode
// TODO(v8:13261): We should be able to pass a Code AbstractCode
// in here, but LinuxPerfJitLogger only supports InstructionStream
// AbstractCode.
Handle<AbstractCode> abstract_code(
AbstractCode::cast(FromCodeDataContainer(function->code(cage_base))),
isolate);
AbstractCode::cast(FromCode(function->code(cage_base))), isolate);
Handle<SharedFunctionInfo> shared(function->shared(cage_base), isolate);
Handle<Script> script(Script::cast(shared->script(cage_base)), isolate);
Handle<FeedbackVector> feedback_vector(function->feedback_vector(cage_base),
@ -1197,9 +1197,9 @@ void RecordMaglevFunctionCompilation(Isolate* isolate,
}
#endif // V8_ENABLE_MAGLEV
MaybeHandle<CodeDataContainer> CompileMaglev(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
BytecodeOffset osr_offset, CompileResultBehavior result_behavior) {
MaybeHandle<Code> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, BytecodeOffset osr_offset,
CompileResultBehavior result_behavior) {
#ifdef V8_ENABLE_MAGLEV
DCHECK(v8_flags.maglev);
// TODO(v8:7700): Add missing support.
@ -1267,7 +1267,7 @@ MaybeHandle<CodeDataContainer> CompileMaglev(
#endif // V8_ENABLE_MAGLEV
}
MaybeHandle<CodeDataContainer> GetOrCompileOptimized(
MaybeHandle<Code> GetOrCompileOptimized(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
CompileResultBehavior result_behavior = CompileResultBehavior::kDefault) {
@ -1297,7 +1297,7 @@ MaybeHandle<CodeDataContainer> GetOrCompileOptimized(
// turbo_filter.
if (!ShouldOptimize(code_kind, shared)) return {};
Handle<CodeDataContainer> cached_code;
Handle<Code> cached_code;
if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
.ToHandle(&cached_code)) {
return cached_code;
@ -2568,7 +2568,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
DCHECK(is_compiled_scope->is_compiled());
Handle<CodeDataContainer> code = handle(shared_info->GetCode(), isolate);
Handle<Code> code = handle(shared_info->GetCode(), isolate);
// Initialize the feedback cell for this JSFunction and reset the interrupt
// budget for feedback vector allocation even if there is a closure feedback
@ -2597,7 +2597,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
concurrency_mode, code_kind);
}
Handle<CodeDataContainer> maybe_code;
Handle<Code> maybe_code;
if (GetOrCompileOptimized(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
@ -2650,7 +2650,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
shared->set_baseline_code(ToCodeDataContainer(*code), kReleaseStore);
shared->set_baseline_code(ToCode(*code), kReleaseStore);
}
double time_taken_ms = time_taken.InMillisecondsF();
@ -2678,7 +2678,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
CodeDataContainer baseline_code = shared->baseline_code(kAcquireLoad);
Code baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
return true;
@ -2722,7 +2722,7 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
code_kind);
}
Handle<CodeDataContainer> code;
Handle<Code> code;
if (GetOrCompileOptimized(isolate, function, mode, code_kind)
.ToHandle(&code)) {
function->set_code(*code, kReleaseStore);
@ -3856,9 +3856,10 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
MaybeHandle<CodeDataContainer> Compiler::CompileOptimizedOSR(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
ConcurrencyMode mode) {
MaybeHandle<Code> Compiler::CompileOptimizedOSR(Isolate* isolate,
Handle<JSFunction> function,
BytecodeOffset osr_offset,
ConcurrencyMode mode) {
DCHECK(IsOSR(osr_offset));
if (V8_UNLIKELY(isolate->serializer_enabled())) return {};
@ -3881,7 +3882,7 @@ MaybeHandle<CodeDataContainer> Compiler::CompileOptimizedOSR(
function->feedback_vector().reset_osr_urgency();
CompilerTracer::TraceOptimizeOSRStarted(isolate, function, osr_offset, mode);
MaybeHandle<CodeDataContainer> result = GetOrCompileOptimized(
MaybeHandle<Code> result = GetOrCompileOptimized(
isolate, function, mode, CodeKind::TURBOFAN, osr_offset);
if (result.is_null()) {
@ -3945,8 +3946,7 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
ResetTieringState(*function, osr_offset);
OptimizedCodeCache::Insert(
isolate, *compilation_info->closure(),
compilation_info->osr_offset(),
ToCodeDataContainer(*compilation_info->code()),
compilation_info->osr_offset(), ToCode(*compilation_info->code()),
compilation_info->function_context_specializing());
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (IsOSR(osr_offset)) {
@ -4036,7 +4036,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// deoptimized code just before installing it on the funciton.
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
*shared, "new function from shared function info");
CodeDataContainer code = function->feedback_vector().optimized_code();
Code code = function->feedback_vector().optimized_code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());

View File

@ -95,9 +95,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Generate and return optimized code for OSR. The empty handle is returned
// either on failure, or after spawning a concurrent OSR task (in which case
// a future OSR request will pick up the resulting code object).
V8_WARN_UNUSED_RESULT static MaybeHandle<CodeDataContainer>
CompileOptimizedOSR(Isolate* isolate, Handle<JSFunction> function,
BytecodeOffset osr_offset, ConcurrencyMode mode);
V8_WARN_UNUSED_RESULT static MaybeHandle<Code> CompileOptimizedOSR(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
ConcurrencyMode mode);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,

View File

@ -23,7 +23,7 @@ HandlerTable::HandlerTable(InstructionStream code)
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
kReturnAddressBasedEncoding) {}
HandlerTable::HandlerTable(CodeDataContainer code)
HandlerTable::HandlerTable(Code code)
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
kReturnAddressBasedEncoding) {}

View File

@ -16,7 +16,7 @@ class Assembler;
class ByteArray;
class BytecodeArray;
class InstructionStream;
class CodeDataContainer;
class Code;
namespace wasm {
class WasmCode;
@ -56,7 +56,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
// Constructors for the various encodings.
explicit HandlerTable(InstructionStream code);
explicit HandlerTable(CodeDataContainer code);
explicit HandlerTable(Code code);
explicit HandlerTable(ByteArray byte_array);
#if V8_ENABLE_WEBASSEMBLY
explicit HandlerTable(const wasm::WasmCode* code);

View File

@ -166,7 +166,7 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
emit(x);
}
void Assembler::emit(Handle<CodeDataContainer> code, RelocInfo::Mode rmode) {
void Assembler::emit(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code.address(), rmode);
}

View File

@ -1640,7 +1640,7 @@ void Assembler::call(Operand adr) {
emit_operand(edx, adr);
}
void Assembler::call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8);
@ -1710,7 +1710,7 @@ void Assembler::jmp(Operand adr) {
emit_operand(esp, adr);
}
void Assembler::jmp(Handle<CodeDataContainer> code, RelocInfo::Mode rmode) {
void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE9);
@ -1770,8 +1770,7 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
emit(entry - (pc_ + sizeof(int32_t)), rmode);
}
void Assembler::j(Condition cc, Handle<CodeDataContainer> code,
RelocInfo::Mode rmode) {
void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F);

View File

@ -746,7 +746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void call(Address entry, RelocInfo::Mode rmode);
void call(Register reg) { call(Operand(reg)); }
void call(Operand adr);
void call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void wasm_call(Address address, RelocInfo::Mode rmode);
// Jumps
@ -755,7 +755,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void jmp(Address entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(Operand adr);
void jmp(Handle<CodeDataContainer> code, RelocInfo::Mode rmode);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Unconditional jump relative to the current address. Low-level routine,
// use with caution!
void jmp_rel(int offset);
@ -763,7 +763,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional jumps
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<CodeDataContainer> code,
void j(Condition cc, Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Floating-point operations
@ -1688,7 +1688,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void emit(uint32_t x);
inline void emit(Handle<HeapObject> handle);
inline void emit(uint32_t x, RelocInfo::Mode rmode);
inline void emit(Handle<CodeDataContainer> code, RelocInfo::Mode rmode);
inline void emit(Handle<Code> code, RelocInfo::Mode rmode);
inline void emit(const Immediate& x);
inline void emit_b(Immediate x);
inline void emit_w(const Immediate& x);

View File

@ -706,10 +706,8 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
}
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container) {
test(FieldOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset),
void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code) {
test(FieldOperand(code, Code::kKindSpecificFlagsOffset),
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
}
@ -742,7 +740,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry);
__ TestCodeIsMarkedForDeoptimization(optimized_code_entry);
__ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
@ -752,7 +750,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
ecx);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Pop(optimized_code_entry);
__ LoadCodeDataContainerEntry(ecx, optimized_code_entry);
__ LoadCodeEntry(ecx, optimized_code_entry);
__ Pop(edx);
__ Pop(eax);
__ jmp(ecx);
@ -818,7 +816,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
JumpCodeDataContainerObject(ecx);
JumpCodeObject(ecx);
}
// Read off the flags in the feedback vector and check if there
@ -1284,8 +1282,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@ -1317,7 +1314,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
Move(kRuntimeCallFunctionRegister, Immediate(ext));
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@ -1520,10 +1517,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeDataContainerObject(ecx);
CallCodeObject(ecx);
break;
case InvokeType::kJump:
JumpCodeDataContainerObject(ecx);
JumpCodeObject(ecx);
break;
}
jmp(&done, Label::kNear);
@ -1967,8 +1964,7 @@ void TurboAssembler::PushPC() {
bind(&get_pc);
}
void TurboAssembler::Call(Handle<CodeDataContainer> code_object,
RelocInfo::Mode rmode) {
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASM_CODE_COMMENT(this);
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
@ -2016,8 +2012,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
call(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
@ -2038,8 +2033,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
jmp(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jmp(code, RelocInfo::CODE_TARGET);
break;
}
@ -2051,39 +2045,32 @@ Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
mov(destination, FieldOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
mov(destination, FieldOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
call(code_data_container_object);
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeEntry(code_object, code_object);
call(code_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
LoadCodeEntry(code_object, code_object);
switch (jump_mode) {
case JumpMode::kJump:
jmp(code_data_container_object);
jmp(code_object);
return;
case JumpMode::kPushAndReturn:
push(code_data_container_object);
push(code_object);
ret(0);
return;
}
@ -2095,8 +2082,7 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
isolate(), reference)));
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
RelocInfo::Mode rmode) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;

View File

@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Call(Register reg) { call(reg); }
void Call(Operand op) { call(op); }
void Call(Label* target) { call(target); }
void Call(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
@ -158,21 +158,20 @@ class V8_EXPORT_PRIVATE TurboAssembler
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
// Load the code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Load the code entry point from the Code object.
void LoadCodeEntry(Register destination, Register code_object);
// Load code entry point from the Code object and compute
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
void Jump(const ExternalReference& reference);
void Jump(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void LoadMap(Register destination, Register object);
@ -559,8 +558,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
and_(reg, Immediate(mask));
}
void TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container);
void TestCodeIsMarkedForDeoptimization(Register code);
Immediate ClearedValue() const;
// Tiering support.

View File

@ -20,7 +20,7 @@ MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
}
MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
CodeDataContainer code)
Code code)
: MaglevSafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {
DCHECK(code.is_maglevved());

View File

@ -73,8 +73,7 @@ class MaglevSafepointTable {
// belongs to the embedded or un-embedded code blob.
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
InstructionStream code);
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
CodeDataContainer code);
explicit MaglevSafepointTable(Isolate* isolate, Address pc, Code code);
MaglevSafepointTable(const MaglevSafepointTable&) = delete;
MaglevSafepointTable& operator=(const MaglevSafepointTable&) = delete;

View File

@ -25,8 +25,7 @@ SafepointTable::SafepointTable(Isolate* isolate, Address pc,
: SafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {}
SafepointTable::SafepointTable(Isolate* isolate, Address pc,
CodeDataContainer code)
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
: SafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {}

View File

@ -61,7 +61,7 @@ class SafepointTable {
// The isolate and pc arguments are used for figuring out whether pc
// belongs to the embedded or un-embedded code blob.
explicit SafepointTable(Isolate* isolate, Address pc, InstructionStream code);
explicit SafepointTable(Isolate* isolate, Address pc, CodeDataContainer code);
explicit SafepointTable(Isolate* isolate, Address pc, Code code);
#if V8_ENABLE_WEBASSEMBLY
explicit SafepointTable(const wasm::WasmCode* code);
#endif // V8_ENABLE_WEBASSEMBLY

View File

@ -226,7 +226,7 @@ int Assembler::deserialization_special_target_size(
return kSpecialTargetSize;
}
Handle<CodeDataContainer> Assembler::code_target_object_handle_at(Address pc) {
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
return GetCodeTarget(ReadUnalignedValue<int32_t>(pc));
}
@ -286,7 +286,7 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
compressed));
// Embedding of compressed InstructionStream objects must not happen when
// external code space is enabled, because CodeDataContainers must be used
// external code space is enabled, because Codes must be used
// instead.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!IsCodeSpaceObject(HeapObject::cast(obj)));

View File

@ -1024,9 +1024,9 @@ void Assembler::call(Label* L) {
}
}
void Assembler::call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(FromCodeDataContainer(*target).IsExecutable());
DCHECK(FromCode(*target).IsExecutable());
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@ -1437,8 +1437,7 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
emitl(static_cast<int32_t>(entry));
}
void Assembler::j(Condition cc, Handle<CodeDataContainer> target,
RelocInfo::Mode rmode) {
void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
// 0000 1111 1000 tttn #32-bit disp.
@ -1517,7 +1516,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
}
}
void Assembler::jmp(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
// 1110 1001 #32-bit disp.

View File

@ -478,7 +478,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
inline Handle<CodeDataContainer> code_target_object_handle_at(Address pc);
inline Handle<Code> code_target_object_handle_at(Address pc);
inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
@ -827,7 +827,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
void near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode);
void call(Handle<CodeDataContainer> target,
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Call near absolute indirect, address in register
@ -838,7 +838,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Use a 32-bit signed displacement.
// Unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(Handle<CodeDataContainer> target, RelocInfo::Mode rmode);
void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
void jmp(Register adr);
@ -851,7 +851,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional jumps
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, Address entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<CodeDataContainer> target, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Floating-point operations
void fld(int i);

View File

@ -575,8 +575,7 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value,
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<CodeDataContainer> code_target =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
@ -617,8 +616,7 @@ void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
Handle<CodeDataContainer> code_target =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
@ -778,8 +776,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Move(rax, num_arguments);
LoadAddress(rbx, ExternalReference::Create(f));
Handle<CodeDataContainer> code =
CodeFactory::CEntry(isolate(), f->result_size);
Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@ -807,7 +804,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
Handle<CodeDataContainer> code =
Handle<Code> code =
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@ -837,8 +834,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ AssertCodeDataContainer(optimized_code_entry);
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry);
__ AssertCode(optimized_code_entry);
__ TestCodeIsMarkedForDeoptimization(optimized_code_entry);
__ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
@ -847,7 +844,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
__ JumpCodeDataContainerObject(rcx, jump_mode);
__ JumpCodeObject(rcx, jump_mode);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
@ -896,7 +893,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
Pop(kJavaScriptCallTargetRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
JumpCodeDataContainerObject(rcx, jump_mode);
JumpCodeObject(rcx, jump_mode);
}
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
@ -906,7 +903,7 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
DCHECK_EQ(closure, kJSFunctionRegister);
// Store the optimized code in the closure.
AssertCodeDataContainer(optimized_code);
AssertCode(optimized_code);
StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
// Write barrier clobbers scratch1 below.
@ -2147,8 +2144,7 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode,
bind(&skip);
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
RelocInfo::Mode rmode) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@ -2160,8 +2156,8 @@ void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
jmp(code_object, rmode);
}
void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
RelocInfo::Mode rmode, Condition cc) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@ -2197,8 +2193,7 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
call(kScratchRegister);
}
void TurboAssembler::Call(Handle<CodeDataContainer> code_object,
RelocInfo::Mode rmode) {
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@ -2249,8 +2244,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Call(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
@ -2271,8 +2265,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
Jump(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jmp(code, RelocInfo::CODE_TARGET);
break;
}
@ -2293,47 +2286,39 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) {
Jump(EntryFromBuiltinAsOperand(builtin), cc);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
Handle<CodeDataContainer> code =
isolate()->builtins()->code_handle(builtin);
Handle<Code> code = isolate()->builtins()->code_handle(builtin);
j(cc, code, RelocInfo::CODE_TARGET);
break;
}
}
}
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
movq(destination, FieldOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
movq(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point.
movq(destination, FieldOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
movq(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
subq(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
call(code_data_container_object);
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeEntry(code_object, code_object);
call(code_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
LoadCodeEntry(code_object, code_object);
switch (jump_mode) {
case JumpMode::kJump:
jmp(code_data_container_object);
jmp(code_object);
return;
case JumpMode::kPushAndReturn:
pushq(code_data_container_object);
pushq(code_object);
Ret();
return;
}
@ -2606,10 +2591,8 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
CompareRange(instance_type_out, lower_limit, higher_limit);
}
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container) {
testl(FieldOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset),
void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code) {
testl(FieldOperand(code, Code::kKindSpecificFlagsOffset),
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
}
@ -2657,16 +2640,16 @@ void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) {
Check(zero, AbortReason::kSignedBitOfSmiIsNotZero);
}
void TurboAssembler::AssertCodeDataContainer(Register object) {
void TurboAssembler::AssertCode(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsNotACodeDataContainer);
Check(not_equal, AbortReason::kOperandIsNotACode);
Push(object);
LoadMap(object, object);
CmpInstanceType(object, CODE_DATA_CONTAINER_TYPE);
CmpInstanceType(object, CODE_TYPE);
popq(object);
Check(equal, AbortReason::kOperandIsNotACodeDataContainer);
Check(equal, AbortReason::kOperandIsNotACode);
}
void MacroAssembler::AssertConstructor(Register object) {
@ -2865,10 +2848,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeDataContainerObject(rcx);
CallCodeObject(rcx);
break;
case InvokeType::kJump:
JumpCodeDataContainerObject(rcx);
JumpCodeObject(rcx);
break;
}
jmp(&done, Label::kNear);
@ -3387,15 +3370,14 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
LoadTaggedPointerField(scratch,
Operand(kJavaScriptCallCodeStartRegister, offset));
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
testl(FieldOperand(scratch, Code::kKindSpecificFlagsOffset),
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, not_zero);

View File

@ -388,7 +388,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Call(Register reg) { call(reg); }
void Call(Operand op);
void Call(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
@ -400,27 +400,25 @@ class V8_EXPORT_PRIVATE TurboAssembler
void TailCallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin, Condition cc);
// Load the code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Load the code entry point from the Code object.
void LoadCodeEntry(Register destination, Register code_object);
// Load code entry point from the Code object and compute
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// Codes corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
void LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(Address destination, RelocInfo::Mode rmode, Condition cc);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Operand op, Condition cc);
void Jump(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
void Jump(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode,
Condition cc);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode, Condition cc);
void BailoutIfDeoptimized(Register scratch);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
@ -474,9 +472,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
// Abort execution if argument is not a CodeDataContainer, enabled via
// Abort execution if argument is not a Code, enabled via
// --debug-code.
void AssertCodeDataContainer(Register object) NOOP_UNLESS_DEBUG_CODE
void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE
// Print a message to stdout and abort execution.
void Abort(AbortReason msg);
@ -830,8 +828,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
andq(reg, Immediate(mask));
}
void TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container);
void TestCodeIsMarkedForDeoptimization(Register code);
Immediate ClearedValue() const;
// Tiering support.

View File

@ -868,7 +868,7 @@ using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
class Assembler;
class ClassScope;
class InstructionStream;
class CodeDataContainer;
class Code;
class CodeSpace;
class Context;
class DeclarationScope;

View File

@ -13,7 +13,7 @@ namespace v8::internal {
// This is just a collection of compression scheme related functions. Having
// such a class allows plugging different decompression scheme in certain
// places by introducing another CompressionScheme class with a customized
// implementation. This is useful, for example, for CodeDataContainer::code
// implementation. This is useful, for example, for Code::code
// field (see CodeObjectSlot).
class V8HeapCompressionScheme {
public:
@ -61,7 +61,7 @@ class V8HeapCompressionScheme {
#ifdef V8_EXTERNAL_CODE_SPACE
// Compression scheme used for fields containing InstructionStream objects
// (namely for the CodeDataContainer::code field). Same as
// (namely for the Code::code field). Same as
// V8HeapCompressionScheme but with a different base value.
class ExternalCodeCompressionScheme {
public:

View File

@ -641,17 +641,15 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ ldr(scratch, FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne);
@ -674,7 +672,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg);
__ CallCodeObject(reg);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@ -726,7 +724,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg);
__ JumpCodeObject(reg);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@ -758,7 +756,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(r2);
__ CallCodeObject(r2);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();

View File

@ -670,7 +670,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg);
__ CallCodeObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -724,7 +724,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg);
__ JumpCodeObject(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@ -760,7 +760,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(x2);
__ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;

View File

@ -90,7 +90,7 @@ class InstructionOperandConverter {
return ToExternalReference(instr_->InputAt(index));
}
Handle<CodeDataContainer> InputCode(size_t index) {
Handle<Code> InputCode(size_t index) {
return ToCode(instr_->InputAt(index));
}
@ -172,7 +172,7 @@ class InstructionOperandConverter {
return ToConstant(op).ToExternalReference();
}
Handle<CodeDataContainer> ToCode(InstructionOperand* op) {
Handle<Code> ToCode(InstructionOperand* op) {
return ToConstant(op).ToCode();
}

View File

@ -658,15 +658,14 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ push(eax); // Push eax so we can use it as a scratch register.
__ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
__ test(FieldOperand(eax, Code::kKindSpecificFlagsOffset),
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ pop(eax); // Restore eax.
@ -687,14 +686,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
InstructionOperand* op = instr->InputAt(0);
if (op->IsImmediate()) {
Handle<CodeDataContainer> code = i.InputCode(0);
Handle<Code> code = i.InputCode(0);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeDataContainerObject(reg);
__ CallCodeObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@ -740,14 +739,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<CodeDataContainer> code = i.InputCode(0);
Handle<Code> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeDataContainerObject(reg);
__ JumpCodeObject(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@ -773,7 +772,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(ecx);
__ CallCodeObject(ecx);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;

View File

@ -581,11 +581,10 @@ Handle<HeapObject> Constant::ToHeapObject() const {
return value;
}
Handle<CodeDataContainer> Constant::ToCode() const {
Handle<Code> Constant::ToCode() const {
DCHECK_EQ(kHeapObject, type());
Handle<CodeDataContainer> value(
reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
DCHECK(value->IsCodeDataContainer(GetPtrComprCageBaseSlow(*value)));
Handle<Code> value(reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
DCHECK(value->IsCode(GetPtrComprCageBaseSlow(*value)));
return value;
}

View File

@ -1192,7 +1192,7 @@ class V8_EXPORT_PRIVATE Constant final {
}
Handle<HeapObject> ToHeapObject() const;
Handle<CodeDataContainer> ToCode() const;
Handle<Code> ToCode() const;
private:
Type type_;

View File

@ -1263,14 +1263,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<CodeDataContainer> code = i.InputCode(0);
Handle<Code> code = i.InputCode(0);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeDataContainerEntry(reg, reg);
__ LoadCodeEntry(reg, reg);
__ call(reg);
}
RecordCallPosition(instr);
@ -1323,14 +1323,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<CodeDataContainer> code = i.InputCode(0);
Handle<Code> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeDataContainerEntry(reg, reg);
__ LoadCodeEntry(reg, reg);
__ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
@ -1360,7 +1360,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx,
FieldOperand(func, JSFunction::kCodeOffset));
__ CallCodeDataContainerObject(rcx);
__ CallCodeObject(rcx);
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;

View File

@ -2202,7 +2202,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
bytecode_iterator().GetFlag8Operand(2))
? AllocationType::kOld
: AllocationType::kYoung;
CodeDataContainerRef compile_lazy =
CodeRef compile_lazy =
MakeRef(broker(), *BUILTIN_CODE(jsgraph()->isolate(), CompileLazy));
const Operator* op =
javascript()->CreateClosure(shared_info, compile_lazy, allocation);

View File

@ -1000,7 +1000,7 @@ Node* CodeAssembler::CallRuntimeImpl(
Runtime::FunctionId function, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
TNode<CodeDataContainer> centry =
TNode<Code> centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
constexpr size_t kMaxNumArgs = 6;
DCHECK_GE(kMaxNumArgs, args.size());
@ -1033,7 +1033,7 @@ void CodeAssembler::TailCallRuntimeImpl(
Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
TNode<CodeDataContainer> centry =
TNode<Code> centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
constexpr size_t kMaxNumArgs = 6;
DCHECK_GE(kMaxNumArgs, args.size());
@ -1089,8 +1089,7 @@ Node* CodeAssembler::CallStubN(StubCallMode call_mode,
}
void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
TNode<CodeDataContainer> target,
TNode<Object> context,
TNode<Code> target, TNode<Object> context,
std::initializer_list<Node*> args) {
constexpr size_t kMaxNumArgs = 11;
DCHECK_GE(kMaxNumArgs, args.size());
@ -1195,8 +1194,7 @@ template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch(
TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>,
TNode<ExternalReference>);
void CodeAssembler::TailCallJSCode(TNode<CodeDataContainer> code,
TNode<Context> context,
void CodeAssembler::TailCallJSCode(TNode<Code> code, TNode<Context> context,
TNode<JSFunction> function,
TNode<Object> new_target,
TNode<Int32T> arg_count) {

View File

@ -1170,14 +1170,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class T = Object, class... TArgs>
TNode<T> CallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
TNode<CodeDataContainer> target = HeapConstant(callable.code());
TNode<Code> target = HeapConstant(callable.code());
return CallStub<T>(callable.descriptor(), target, context, args...);
}
template <class T = Object, class... TArgs>
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
TNode<CodeDataContainer> target, TNode<Object> context,
TArgs... args) {
TNode<Code> target, TNode<Object> context, TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
target, context, args...));
}
@ -1193,14 +1192,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
void TailCallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
TNode<CodeDataContainer> target = HeapConstant(callable.code());
TNode<Code> target = HeapConstant(callable.code());
TailCallStub(callable.descriptor(), target, context, args...);
}
template <class... TArgs>
void TailCallStub(const CallInterfaceDescriptor& descriptor,
TNode<CodeDataContainer> target, TNode<Object> context,
TArgs... args) {
TNode<Code> target, TNode<Object> context, TArgs... args) {
TailCallStubImpl(descriptor, target, context, {args...});
}
@ -1223,7 +1221,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Note that no arguments adaption is going on here - all the JavaScript
// arguments are left on the stack unmodified. Therefore, this tail call can
// only be used after arguments adaptation has been performed already.
void TailCallJSCode(TNode<CodeDataContainer> code, TNode<Context> context,
void TailCallJSCode(TNode<Code> code, TNode<Context> context,
TNode<JSFunction> function, TNode<Object> new_target,
TNode<Int32T> arg_count);
@ -1232,7 +1230,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* receiver, TArgs... args) {
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<CodeDataContainer> target = HeapConstant(callable.code());
TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
CAST(function), {}, arity, {receiver, args...}));
}
@ -1243,7 +1241,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
TNode<CodeDataContainer> target = HeapConstant(callable.code());
TNode<Code> target = HeapConstant(callable.code());
return CallJSStubImpl(callable.descriptor(), target, CAST(context),
CAST(function), CAST(new_target), arity,
{receiver, args...});
@ -1342,7 +1340,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
std::initializer_list<TNode<Object>> args);
void TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
TNode<CodeDataContainer> target, TNode<Object> context,
TNode<Code> target, TNode<Object> context,
std::initializer_list<Node*> args);
void TailCallStubThenBytecodeDispatchImpl(

View File

@ -2201,8 +2201,8 @@ BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C
CodeDataContainerRef JSFunctionRef::code() const {
CodeDataContainer code = object()->code(kAcquireLoad);
CodeRef JSFunctionRef::code() const {
Code code = object()->code(kAcquireLoad);
return MakeRefAssumeMemoryFence(broker(), code);
}
@ -2303,17 +2303,16 @@ unsigned InstructionStreamRef::GetInlinedBytecodeSize() const {
return GetInlinedBytecodeSizeImpl(*object());
}
unsigned CodeDataContainerRef::GetInlinedBytecodeSize() const {
CodeDataContainer code_data_container = *object();
if (code_data_container.is_off_heap_trampoline()) {
unsigned CodeRef::GetInlinedBytecodeSize() const {
Code code = *object();
if (code.is_off_heap_trampoline()) {
return 0;
}
// Safe to do a relaxed conversion to InstructionStream here since
// CodeDataContainer::code field is modified only by GC and the
// CodeDataContainer was acquire-loaded.
InstructionStream code = code_data_container.instruction_stream(kRelaxedLoad);
return GetInlinedBytecodeSizeImpl(code);
// Code::instruction_stream field is modified only by GC and the
// Code was acquire-loaded.
return GetInlinedBytecodeSizeImpl(code.instruction_stream(kRelaxedLoad));
}
#undef BIMODAL_ACCESSOR

View File

@ -112,7 +112,7 @@ enum class RefSerializationKind {
NEVER_SERIALIZED(CallHandlerInfo) \
NEVER_SERIALIZED(Cell) \
NEVER_SERIALIZED(InstructionStream) \
NEVER_SERIALIZED(CodeDataContainer) \
NEVER_SERIALIZED(Code) \
NEVER_SERIALIZED(Context) \
NEVER_SERIALIZED(DescriptorArray) \
NEVER_SERIALIZED(FeedbackCell) \
@ -459,7 +459,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
CodeDataContainerRef code() const;
CodeRef code() const;
bool has_initial_map(CompilationDependencies* dependencies) const;
bool PrototypeRequiresRuntimeLookup(
@ -1019,11 +1019,11 @@ class InstructionStreamRef : public HeapObjectRef {
unsigned GetInlinedBytecodeSize() const;
};
class CodeDataContainerRef : public HeapObjectRef {
class CodeRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(CodeDataContainer, HeapObjectRef)
DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
Handle<CodeDataContainer> object() const;
Handle<Code> object() const;
unsigned GetInlinedBytecodeSize() const;
};

View File

@ -586,7 +586,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
CodeDataContainerRef code = MakeRef(broker_, *callable.code());
CodeRef code = MakeRef(broker_, *callable.code());
return AddNode<JSFunction>(graph()->NewNode(
javascript()->CreateClosure(shared, code), HeapConstant(feedback_cell),
context, effect(), control()));
@ -6909,7 +6909,7 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
CodeDataContainerRef code = MakeRef(broker(), *callable.code());
CodeRef code = MakeRef(broker(), *callable.code());
return graph()->NewNode(javascript()->CreateClosure(shared, code),
jsgraph()->HeapConstant(feedback_cell), context,
effect, control);

View File

@ -1310,7 +1310,7 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
}
const Operator* JSOperatorBuilder::CreateClosure(
const SharedFunctionInfoRef& shared_info, const CodeDataContainerRef& code,
const SharedFunctionInfoRef& shared_info, const CodeRef& code,
AllocationType allocation) {
static constexpr int kFeedbackCell = 1;
static constexpr int kArity = kFeedbackCell;

View File

@ -676,21 +676,18 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
class CreateClosureParameters final {
public:
CreateClosureParameters(const SharedFunctionInfoRef& shared_info,
const CodeDataContainerRef& code,
AllocationType allocation)
const CodeRef& code, AllocationType allocation)
: shared_info_(shared_info), code_(code), allocation_(allocation) {}
SharedFunctionInfoRef shared_info(JSHeapBroker* broker) const {
return shared_info_.AsRef(broker);
}
CodeDataContainerRef code(JSHeapBroker* broker) const {
return code_.AsRef(broker);
}
CodeRef code(JSHeapBroker* broker) const { return code_.AsRef(broker); }
AllocationType allocation() const { return allocation_; }
private:
const SharedFunctionInfoTinyRef shared_info_;
const CodeDataContainerTinyRef code_;
const CodeTinyRef code_;
AllocationType const allocation_;
friend bool operator==(CreateClosureParameters const&,
@ -953,8 +950,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
const Operator* CreateBoundFunction(size_t arity, const MapRef& map);
const Operator* CreateClosure(
const SharedFunctionInfoRef& shared_info,
const CodeDataContainerRef& code,
const SharedFunctionInfoRef& shared_info, const CodeRef& code,
AllocationType allocation = AllocationType::kYoung);
const Operator* CreateIterResultObject();
const Operator* CreateStringIterator();

View File

@ -9,7 +9,7 @@
namespace v8::internal::compiler::turboshaft {
Handle<CodeDataContainer> BuiltinCodeHandle(Builtin builtin, Isolate* isolate) {
Handle<Code> BuiltinCodeHandle(Builtin builtin, Isolate* isolate) {
return isolate->builtins()->code_handle(builtin);
}

View File

@ -33,7 +33,7 @@ enum class Builtin : int32_t;
namespace v8::internal::compiler::turboshaft {
Handle<CodeDataContainer> BuiltinCodeHandle(Builtin builtin, Isolate* isolate);
Handle<Code> BuiltinCodeHandle(Builtin builtin, Isolate* isolate);
// Forward declarations
template <class Assembler>

View File

@ -369,7 +369,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case WITH_CONTEXT_TYPE:
case SCRIPT_TYPE:
case INSTRUCTION_STREAM_TYPE:
case CODE_DATA_CONTAINER_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
case SOURCE_TEXT_MODULE_TYPE:
case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE:

View File

@ -3032,7 +3032,7 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset));
Node* call_target = gasm_->LoadFromObject(
MachineType::Pointer(), wrapper_code,
wasm::ObjectAccess::ToTagged(CodeDataContainer::kCodeEntryPointOffset));
wasm::ObjectAccess::ToTagged(Code::kCodeEntryPointOffset));
gasm_->Goto(&end_label, call_target);
}
@ -8412,9 +8412,8 @@ MaybeHandle<InstructionStream> CompileJSToJSWrapper(
return code;
}
Handle<CodeDataContainer> CompileCWasmEntry(Isolate* isolate,
const wasm::FunctionSig* sig,
const wasm::WasmModule* module) {
Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
const wasm::WasmModule* module) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
Graph* graph = zone->New<Graph>(zone.get());
@ -8463,7 +8462,7 @@ Handle<CodeDataContainer> CompileCWasmEntry(Isolate* isolate,
CompilationJob::FAILED);
CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
return ToCodeDataContainer(job->compilation_info()->code(), isolate);
return ToCode(job->compilation_info()->code(), isolate);
}
namespace {

View File

@ -163,7 +163,7 @@ enum CWasmEntryParameters {
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
// which knows how to feed it its parameters.
V8_EXPORT_PRIVATE Handle<CodeDataContainer> CompileCWasmEntry(
V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
// Values from the instance object are cached between Wasm-level function calls.

View File

@ -1231,8 +1231,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
if (state != DebugInfo::kHasNoSideEffect) continue;
InstructionStream code =
FromCodeDataContainer(isolate->builtins()->code(caller));
InstructionStream code = FromCode(isolate->builtins()->code(caller));
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);

View File

@ -1636,8 +1636,7 @@ void Debug::InstallDebugBreakTrampoline() {
if (!needs_to_use_trampoline) return;
Handle<CodeDataContainer> trampoline =
BUILTIN_CODE(isolate_, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
std::vector<Handle<JSFunction>> needs_compile;
using AccessorPairWithContext =
std::pair<Handle<AccessorPair>, Handle<NativeContext>>;

View File

@ -229,7 +229,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
namespace {
class ActivationsFinder : public ThreadVisitor {
public:
ActivationsFinder(CodeDataContainer topmost_optimized_code,
ActivationsFinder(Code topmost_optimized_code,
bool safe_to_deopt_topmost_optimized_code) {
#ifdef DEBUG
topmost_ = topmost_optimized_code;
@ -243,8 +243,7 @@ class ActivationsFinder : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
if (it.frame()->is_optimized()) {
CodeDataContainer code =
it.frame()->LookupCodeDataContainer().ToCodeDataContainer();
Code code = it.frame()->LookupCode().ToCode();
if (CodeKindCanDeoptimize(code.kind()) &&
code.marked_for_deoptimization()) {
// Obtain the trampoline to the deoptimizer call.
@ -273,7 +272,7 @@ class ActivationsFinder : public ThreadVisitor {
private:
#ifdef DEBUG
CodeDataContainer topmost_;
Code topmost_;
bool safe_to_deopt_;
#endif
};
@ -284,7 +283,7 @@ class ActivationsFinder : public ThreadVisitor {
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
DisallowGarbageCollection no_gc;
CodeDataContainer topmost_optimized_code;
Code topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
// Make sure all activations of optimized code can deopt at their current PC.
@ -293,8 +292,7 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
it.Advance()) {
if (it.frame()->is_optimized()) {
CodeDataContainer code =
it.frame()->LookupCodeDataContainer().ToCodeDataContainer();
Code code = it.frame()->LookupCode().ToCode();
JSFunction function =
static_cast<OptimizedFrame*>(it.frame())->function();
TraceFoundActivation(isolate, function);
@ -350,8 +348,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
DeoptimizeMarkedCode(isolate);
}
void Deoptimizer::DeoptimizeFunction(JSFunction function,
CodeDataContainer code) {
void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
Isolate* isolate = function.GetIsolate();
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
@ -940,7 +937,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const bool deopt_to_baseline =
shared.HasBaselineCode() && v8_flags.deopt_to_baseline;
const bool restart_frame = goto_catch_handler && is_restart_frame();
CodeDataContainer dispatch_builtin = builtins->code(
Code dispatch_builtin = builtins->code(
DispatchBuiltinFor(deopt_to_baseline, advance_bc, restart_frame));
if (verbose_tracing_enabled()) {
@ -1181,8 +1178,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
// Set the continuation for the topmost frame.
CodeDataContainer continuation =
builtins->code(Builtin::kNotifyDeoptimized);
Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@ -1262,8 +1258,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
Builtins* builtins = isolate_->builtins();
CodeDataContainer construct_stub =
builtins->code(Builtin::kJSConstructStubGeneric);
Code construct_stub = builtins->code(Builtin::kJSConstructStubGeneric);
BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
const int parameters_count = translated_frame->height();
@ -1417,8 +1412,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
if (is_topmost) {
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
CodeDataContainer continuation =
builtins->code(Builtin::kNotifyDeoptimized);
Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@ -1842,7 +1836,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// For JSToWasmBuiltinContinuations use ContinueToCodeStubBuiltin, and not
// ContinueToCodeStubBuiltinWithResult because we don't want to overwrite the
// return value that we have already set.
CodeDataContainer continue_to_builtin =
Code continue_to_builtin =
isolate()->builtins()->code(TrampolineForBuiltinContinuation(
mode, frame_info.frame_has_result_stack_slot() &&
!is_js_to_wasm_builtin_continuation));
@ -1859,8 +1853,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
static_cast<intptr_t>(continue_to_builtin.InstructionStart()));
}
CodeDataContainer continuation =
isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
Code continuation = isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}

View File

@ -79,8 +79,7 @@ class Deoptimizer : public Malloced {
// again and any activations of the optimized code will get deoptimized when
// execution returns. If {code} is specified then the given code is targeted
// instead of the function code (e.g. OSR code not installed on function).
static void DeoptimizeFunction(JSFunction function,
CodeDataContainer code = {});
static void DeoptimizeFunction(JSFunction function, Code code = {});
// Deoptimize all code in the given isolate.
V8_EXPORT_PRIVATE static void DeoptimizeAll(Isolate* isolate);

View File

@ -2216,9 +2216,8 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
previously_materialized_objects);
CHECK_EQ(frames_[0].kind(), TranslatedFrame::kUnoptimizedFunction);
CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
Deoptimizer::DeoptimizeFunction(
frame->function(),
frame->LookupCodeDataContainer().ToCodeDataContainer());
Deoptimizer::DeoptimizeFunction(frame->function(),
frame->LookupCode().ToCode());
}
}

View File

@ -257,10 +257,9 @@ static void PrintRelocInfo(std::ostringstream& out, Isolate* isolate,
out << " ;; external reference (" << reference_name << ")";
} else if (RelocInfo::IsCodeTargetMode(rmode)) {
out << " ;; code:";
CodeDataContainer code =
isolate->heap()
->GcSafeFindCodeForInnerPointer(relocinfo->target_address())
.ToCodeDataContainer();
Code code = isolate->heap()
->GcSafeFindCodeForInnerPointer(relocinfo->target_address())
.ToCode();
CodeKind kind = code.kind();
if (code.is_builtin()) {
out << " Builtin::" << Builtins::name(code.builtin_id());

View File

@ -284,8 +284,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
break;
case FILLER_TYPE:
break;
case CODE_DATA_CONTAINER_TYPE:
CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
case CODE_TYPE:
Code::cast(*this).CodeVerify(isolate);
break;
#define MAKE_TORQUE_CASE(Name, TYPE) \
@ -921,7 +921,7 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
VerifyPointer(isolate, raw_feedback_cell(isolate));
CHECK(raw_feedback_cell(isolate).IsFeedbackCell());
VerifyPointer(isolate, code(isolate));
CHECK(code(isolate).IsCodeDataContainer());
CHECK(code(isolate).IsCode());
CHECK(map(isolate).is_callable());
Handle<JSFunction> function(*this, isolate);
LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
@ -1089,8 +1089,8 @@ void PropertyCell::PropertyCellVerify(Isolate* isolate) {
CheckDataIsCompatible(property_details(), value());
}
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
CHECK(IsCodeDataContainer());
void Code::CodeVerify(Isolate* isolate) {
CHECK(IsCode());
if (raw_instruction_stream() != Smi::zero()) {
InstructionStream code = this->instruction_stream();
CHECK_EQ(code.kind(), kind());
@ -1101,10 +1101,10 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
// builtins.
CHECK_IMPLIES(isolate->embedded_blob_code() && is_off_heap_trampoline(),
builtin_id() == Builtin::kInterpreterEntryTrampoline);
CHECK_EQ(code.code_data_container(kAcquireLoad), *this);
CHECK_EQ(code.code(kAcquireLoad), *this);
// Ensure the cached code entry point corresponds to the InstructionStream
// object associated with this CodeDataContainer.
// object associated with this Code.
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
if (V8_SHORT_BUILTIN_CALLS_BOOL) {
if (code.InstructionStart() == code_entry_point()) {
@ -1114,11 +1114,11 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
// embedded code blob copy then the
// InstructionStream::InstructionStart() might return the address of the
// remapped builtin regardless of whether the builtins copy existed when
// the code_entry_point value was cached in the CodeDataContainer (see
// the code_entry_point value was cached in the Code (see
// InstructionStream::OffHeapInstructionStart()). So, do a reverse
// InstructionStream object lookup via code_entry_point value to ensure
// it corresponds to the same InstructionStream object associated with
// this CodeDataContainer.
// this Code.
CodeLookupResult lookup_result =
isolate->heap()->GcSafeFindCodeForInnerPointer(code_entry_point());
CHECK(lookup_result.IsFound());
@ -1150,7 +1150,7 @@ void InstructionStream::InstructionStreamVerify(Isolate* isolate) {
#endif // !defined(_MSC_VER) || defined(__clang__)
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
CHECK_EQ(*this, code_data_container(kAcquireLoad).instruction_stream());
CHECK_EQ(*this, code(kAcquireLoad).instruction_stream());
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
// following CHECK works builtin trampolines. It currently fails because
// InstructionStreamVerify is called halfway through constructing the
@ -1549,9 +1549,9 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
Object latin1_bytecode = arr.get(JSRegExp::kIrregexpLatin1BytecodeIndex);
Object uc16_bytecode = arr.get(JSRegExp::kIrregexpUC16BytecodeIndex);
bool is_compiled = latin1_code.IsCodeDataContainer();
bool is_compiled = latin1_code.IsCode();
if (is_compiled) {
CHECK_EQ(CodeDataContainer::cast(latin1_code).builtin_id(),
CHECK_EQ(Code::cast(latin1_code).builtin_id(),
Builtin::kRegExpExperimentalTrampoline);
CHECK_EQ(uc16_code, latin1_code);
@ -1584,11 +1584,11 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
// interpreter.
CHECK((one_byte_data.IsSmi() &&
Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
one_byte_data.IsCodeDataContainer());
one_byte_data.IsCode());
Object uc16_data = arr.get(JSRegExp::kIrregexpUC16CodeIndex);
CHECK((uc16_data.IsSmi() &&
Smi::ToInt(uc16_data) == JSRegExp::kUninitializedValue) ||
uc16_data.IsCodeDataContainer());
uc16_data.IsCode());
Object one_byte_bytecode =
arr.get(JSRegExp::kIrregexpLatin1BytecodeIndex);
@ -1859,7 +1859,7 @@ void DataHandler::DataHandlerVerify(Isolate* isolate) {
CHECK(IsDataHandler());
VerifyPointer(isolate, smi_handler(isolate));
CHECK_IMPLIES(!smi_handler().IsSmi(),
IsStoreHandler() && smi_handler().IsCodeDataContainer());
IsStoreHandler() && smi_handler().IsCode());
VerifyPointer(isolate, validity_cell(isolate));
CHECK(validity_cell().IsSmi() || validity_cell().IsCell());
int data_count = data_field_count();

View File

@ -213,8 +213,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case INSTRUCTION_STREAM_TYPE:
InstructionStream::cast(*this).InstructionStreamPrint(os);
break;
case CODE_DATA_CONTAINER_TYPE:
CodeDataContainer::cast(*this).CodeDataContainerPrint(os);
case CODE_TYPE:
Code::cast(*this).CodePrint(os);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
@ -1794,8 +1794,7 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) {
void InstructionStream::InstructionStreamPrint(std::ostream& os) {
PrintHeader(os, "InstructionStream");
os << "\n - code_data_container: "
<< Brief(code_data_container(kAcquireLoad));
os << "\n - code: " << Brief(code(kAcquireLoad));
if (is_builtin()) {
os << "\n - builtin_id: " << Builtins::name(builtin_id());
}
@ -1805,8 +1804,8 @@ void InstructionStream::InstructionStreamPrint(std::ostream& os) {
#endif
}
void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
PrintHeader(os, "CodeDataContainer");
void Code::CodePrint(std::ostream& os) {
PrintHeader(os, "Code");
os << "\n - kind: " << CodeKindToString(kind());
if (is_builtin()) {
os << "\n - builtin: " << Builtins::name(builtin_id());
@ -3047,17 +3046,16 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
#ifdef ENABLE_DISASSEMBLER
i::StdoutStream os;
if (lookup_result.IsCodeDataContainer()) {
i::CodeDataContainer code =
i::CodeDataContainer::cast(lookup_result.code_data_container());
if (lookup_result.IsCode()) {
i::Code code = i::Code::cast(lookup_result.code());
code.Disassemble(nullptr, os, isolate, address);
} else {
lookup_result.instruction_stream().Disassemble(nullptr, os, isolate,
address);
}
#else // ENABLE_DISASSEMBLER
if (lookup_result.IsCodeDataContainer()) {
lookup_result.code_data_container().Print();
if (lookup_result.IsCode()) {
lookup_result.code().Print();
} else {
lookup_result.instruction_stream().Print();
}

View File

@ -168,9 +168,8 @@ InvokeParams InvokeParams::SetUpForRunMicrotasks(
return params;
}
Handle<CodeDataContainer> JSEntry(Isolate* isolate,
Execution::Target execution_target,
bool is_construct) {
Handle<Code> JSEntry(Isolate* isolate, Execution::Target execution_target,
bool is_construct) {
if (is_construct) {
DCHECK_EQ(Execution::Target::kCallable, execution_target);
return BUILTIN_CODE(isolate, JSConstructEntry);
@ -398,7 +397,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
// Placeholder for return value.
Object value;
Handle<CodeDataContainer> code =
Handle<Code> code =
JSEntry(isolate, params.execution_target, params.is_construct);
{
// Save and restore context around invocation and block the
@ -612,8 +611,7 @@ static_assert(offsetof(StackHandlerMarker, padding) ==
static_assert(sizeof(StackHandlerMarker) == StackHandlerConstants::kSize);
#if V8_ENABLE_WEBASSEMBLY
void Execution::CallWasm(Isolate* isolate,
Handle<CodeDataContainer> wrapper_code,
void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
Address wasm_call_target, Handle<Object> object_ref,
Address packed_args) {
using WasmEntryStub = GeneratedCode<Address(

View File

@ -77,7 +77,7 @@ class Execution final : public AllStatic {
// Upon return, either isolate->has_pending_exception() is true, or
// the function's return values are in {packed_args}.
V8_EXPORT_PRIVATE static void CallWasm(Isolate* isolate,
Handle<CodeDataContainer> wrapper_code,
Handle<Code> wrapper_code,
Address wasm_call_target,
Handle<Object> object_ref,
Address packed_args);

View File

@ -205,8 +205,7 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
void TypedFrameWithJSLinkage::Iterate(RootVisitor* v) const {
IterateExpressions(v);
IteratePc(v, pc_address(), constant_pool_address(),
LookupCodeDataContainer());
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
// -------------------------------------------------------------------------
@ -567,7 +566,7 @@ CodeLookupResult GetContainingCode(Isolate* isolate, Address pc) {
}
} // namespace
CodeLookupResult StackFrame::LookupCodeDataContainer() const {
CodeLookupResult StackFrame::LookupCode() const {
CodeLookupResult result = GetContainingCode(isolate(), pc());
if (DEBUG_BOOL) {
CHECK(result.IsFound());
@ -576,7 +575,7 @@ CodeLookupResult StackFrame::LookupCodeDataContainer() const {
CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
} else {
CodeDataContainer code = result.code_data_container();
Code code = result.code();
CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
}
@ -587,10 +586,10 @@ CodeLookupResult StackFrame::LookupCodeDataContainer() const {
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address,
CodeLookupResult lookup_result) const {
if (lookup_result.IsCodeDataContainer()) {
if (lookup_result.IsCode()) {
// The embedded builtins are immovable, so there's no need to update PCs on
// the stack, just visit the CodeDataContainer object.
Object code = lookup_result.code_data_container();
// the stack, just visit the Code object.
Object code = lookup_result.code();
v->VisitRunningCode(FullObjectSlot(&code));
return;
}
@ -619,8 +618,8 @@ void StackFrame::SetReturnAddressLocationResolver(
namespace {
template <typename CodeOrCodeDataContainer>
inline StackFrame::Type ComputeBuiltinFrameType(CodeOrCodeDataContainer code) {
template <typename CodeOrCode>
inline StackFrame::Type ComputeBuiltinFrameType(CodeOrCode code) {
if (code.is_interpreter_trampoline_builtin() ||
// Frames for baseline entry trampolines on the stack are still
// interpreted frames.
@ -703,11 +702,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
switch (lookup_result.kind()) {
case CodeKind::BUILTIN: {
if (StackFrame::IsTypeMarker(marker)) break;
// We can't use lookup_result.ToCodeDataContainer() because we might
// We can't use lookup_result.ToCode() because we might
// in the middle of GC.
if (lookup_result.IsCodeDataContainer()) {
return ComputeBuiltinFrameType(
CodeDataContainer::cast(lookup_result.code_data_container()));
if (lookup_result.IsCode()) {
return ComputeBuiltinFrameType(Code::cast(lookup_result.code()));
}
return ComputeBuiltinFrameType(lookup_result.instruction_stream());
}
@ -850,8 +848,7 @@ void ExitFrame::ComputeCallerState(State* state) const {
void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), constant_pool_address(),
LookupCodeDataContainer());
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
@ -927,7 +924,7 @@ void BuiltinExitFrame::Summarize(std::vector<FrameSummary>* frames) const {
DCHECK(frames->empty());
Handle<FixedArray> parameters = GetParameters();
DisallowGarbageCollection no_gc;
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
int code_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
FrameSummary::JavaScriptFrameSummary summary(
isolate(), receiver(), function(), code.ToAbstractCode(), code_offset,
@ -1039,7 +1036,7 @@ Object CommonFrame::context() const {
}
int CommonFrame::position() const {
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
int code_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
return code.ToAbstractCode().SourcePosition(isolate(), code_offset);
}
@ -1548,8 +1545,8 @@ bool CommonFrame::HasTaggedOutgoingParams(CodeLookupResult& code_lookup) const {
HeapObject TurbofanStubWithContextFrame::unchecked_code() const {
CodeLookupResult code_lookup = isolate()->FindCodeObject(pc());
if (code_lookup.IsCodeDataContainer()) {
return code_lookup.code_data_container();
if (code_lookup.IsCode()) {
return code_lookup.code();
}
if (code_lookup.IsInstructionStream()) {
return code_lookup.instruction_stream();
@ -1646,8 +1643,8 @@ void TurbofanFrame::Iterate(RootVisitor* v) const {
HeapObject StubFrame::unchecked_code() const {
CodeLookupResult code_lookup = isolate()->FindCodeObject(pc());
if (code_lookup.IsCodeDataContainer()) {
return code_lookup.code_data_container();
if (code_lookup.IsCode()) {
return code_lookup.code();
}
if (code_lookup.IsInstructionStream()) {
return code_lookup.instruction_stream();
@ -1656,10 +1653,10 @@ HeapObject StubFrame::unchecked_code() const {
}
int StubFrame::LookupExceptionHandlerInTable() {
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
DCHECK(code.is_turbofanned());
DCHECK_EQ(code.kind(), CodeKind::BUILTIN);
HandlerTable table(code.code_data_container());
HandlerTable table(code.code());
int pc_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
return table.LookupReturn(pc_offset);
}
@ -1683,7 +1680,7 @@ HeapObject CommonFrameWithJSLinkage::unchecked_code() const {
}
int TurbofanFrame::ComputeParametersCount() const {
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return static_cast<int>(
Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
@ -1721,7 +1718,7 @@ bool CommonFrameWithJSLinkage::IsConstructor() const {
void CommonFrameWithJSLinkage::Summarize(
std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
int offset = code.GetOffsetFromInstructionStart(isolate(), pc());
Handle<AbstractCode> abstract_code(code.ToAbstractCode(), isolate());
Handle<FixedArray> params = GetParameters();
@ -1760,7 +1757,7 @@ Script JavaScriptFrame::script() const {
int CommonFrameWithJSLinkage::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
if (DEBUG_BOOL) {
CodeLookupResult code_lookup_result = LookupCodeDataContainer();
CodeLookupResult code_lookup_result = LookupCode();
CHECK(!code_lookup_result.has_handler_table());
CHECK(!code_lookup_result.is_optimized_code() ||
code_lookup_result.kind() == CodeKind::BASELINE);
@ -1819,7 +1816,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
code_offset = baseline_frame->GetBytecodeOffset();
abstract_code = AbstractCode::cast(baseline_frame->GetBytecodeArray());
} else {
CodeLookupResult code = frame->LookupCodeDataContainer();
CodeLookupResult code = frame->LookupCode();
code_offset = code.GetOffsetFromInstructionStart(isolate, frame->pc());
}
PrintFunctionAndOffset(function, abstract_code, code_offset, file,
@ -2144,7 +2141,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// Delegate to JS frame in absence of deoptimization info.
// TODO(turbofan): Revisit once we support deoptimization across the board.
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return JavaScriptFrame::Summarize(frames);
}
@ -2246,7 +2243,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
// to use FrameSummary to find the corresponding code offset in unoptimized
// code to perform prediction there.
DCHECK_NULL(prediction);
CodeDataContainer code = LookupCodeDataContainer().ToCodeDataContainer();
Code code = LookupCode().ToCode();
HandlerTable table(code);
if (table.NumberOfReturnEntries() == 0) return -1;
@ -2263,15 +2260,14 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
return table.LookupReturn(pc_offset);
}
int MaglevFrame::FindReturnPCForTrampoline(CodeDataContainer code,
int trampoline_pc) const {
int MaglevFrame::FindReturnPCForTrampoline(Code code, int trampoline_pc) const {
DCHECK_EQ(code.kind(), CodeKind::MAGLEV);
DCHECK(code.marked_for_deoptimization());
MaglevSafepointTable safepoints(isolate(), pc(), code);
return safepoints.find_return_pc(trampoline_pc);
}
int TurbofanFrame::FindReturnPCForTrampoline(CodeDataContainer code,
int TurbofanFrame::FindReturnPCForTrampoline(Code code,
int trampoline_pc) const {
DCHECK_EQ(code.kind(), CodeKind::TURBOFAN);
DCHECK(code.marked_for_deoptimization());
@ -2284,7 +2280,7 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
DCHECK(is_optimized());
JSFunction opt_function = function();
CodeDataContainer code = opt_function.code();
Code code = opt_function.code();
// The code object may have been replaced by lazy deoptimization. Fall
// back to a slow search in this case to find the original optimized
@ -2293,7 +2289,7 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
CodeLookupResult lookup_result =
isolate()->heap()->GcSafeFindCodeForInnerPointer(pc());
CHECK(lookup_result.IsFound());
code = lookup_result.ToCodeDataContainer();
code = lookup_result.ToCode();
}
DCHECK(!code.is_null());
DCHECK(CodeKindCanDeoptimize(code.kind()));
@ -2323,7 +2319,7 @@ void OptimizedFrame::GetFunctions(
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return JavaScriptFrame::GetFunctions(functions);
}
@ -2451,12 +2447,12 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
}
int BaselineFrame::GetBytecodeOffset() const {
InstructionStream code = LookupCodeDataContainer().instruction_stream();
InstructionStream code = LookupCode().instruction_stream();
return code.GetBytecodeOffsetForBaselinePC(this->pc(), GetBytecodeArray());
}
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
InstructionStream code = LookupCodeDataContainer().instruction_stream();
InstructionStream code = LookupCode().instruction_stream();
return code.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
GetBytecodeArray());
}
@ -2906,8 +2902,7 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
}
void EntryFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(),
LookupCodeDataContainer());
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
void CommonFrame::IterateExpressions(RootVisitor* v) const {
@ -2930,12 +2925,11 @@ void CommonFrame::IterateExpressions(RootVisitor* v) const {
void JavaScriptFrame::Iterate(RootVisitor* v) const {
IterateExpressions(v);
IteratePc(v, pc_address(), constant_pool_address(),
LookupCodeDataContainer());
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
void InternalFrame::Iterate(RootVisitor* v) const {
CodeLookupResult code = LookupCodeDataContainer();
CodeLookupResult code = LookupCode();
IteratePc(v, pc_address(), constant_pool_address(), code);
// Internal frames typically do not receive any arguments, hence their stack
// only contains tagged pointers.

View File

@ -298,15 +298,14 @@ class StackFrame {
virtual Type type() const = 0;
// Get the code associated with this frame. The result might be a
// InstructionStream object, a CodeDataContainer object or an empty value.
// InstructionStream object, a Code object or an empty value.
// This method is used by Isolate::PushStackTraceAndDie() for collecting a
// stack trace on fatal error and thus it might be called in the middle of GC
// and should be as safe as possible.
virtual HeapObject unchecked_code() const = 0;
// Search for the code associated with this frame.
// TODO(v8:11880): rename to LookupCode()
V8_EXPORT_PRIVATE CodeLookupResult LookupCodeDataContainer() const;
V8_EXPORT_PRIVATE CodeLookupResult LookupCode() const;
virtual void Iterate(RootVisitor* v) const = 0;
void IteratePc(RootVisitor* v, Address* pc_address,
@ -857,8 +856,7 @@ class OptimizedFrame : public JavaScriptFrame {
int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction) override;
virtual int FindReturnPCForTrampoline(CodeDataContainer code,
int trampoline_pc) const = 0;
virtual int FindReturnPCForTrampoline(Code code, int trampoline_pc) const = 0;
protected:
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
@ -969,8 +967,7 @@ class MaglevFrame : public OptimizedFrame {
void Iterate(RootVisitor* v) const override;
int FindReturnPCForTrampoline(CodeDataContainer code,
int trampoline_pc) const override;
int FindReturnPCForTrampoline(Code code, int trampoline_pc) const override;
BytecodeOffset GetBytecodeOffsetForOSR() const;
@ -991,8 +988,7 @@ class TurbofanFrame : public OptimizedFrame {
void Iterate(RootVisitor* v) const override;
int FindReturnPCForTrampoline(CodeDataContainer code,
int trampoline_pc) const override;
int FindReturnPCForTrampoline(Code code, int trampoline_pc) const override;
protected:
inline explicit TurbofanFrame(StackFrameIteratorBase* iterator);

View File

@ -248,7 +248,7 @@ class IsolateData final {
// through kRootRegister.
Address builtin_entry_table_[Builtins::kBuiltinCount] = {};
// The entries in this array are tagged pointers to CodeDataContainer objects.
// The entries in this array are tagged pointers to Code objects.
Address builtin_table_[Builtins::kBuiltinCount] = {};
LinearAllocationArea new_allocation_info_;

View File

@ -441,42 +441,37 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
// Hash data sections of builtin code objects.
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
CodeDataContainer code_data_container = builtins()->code(builtin);
Code code = builtins()->code(builtin);
DCHECK(Internals::HasHeapObjectTag(code_data_container.ptr()));
uint8_t* const code_ptr =
reinterpret_cast<uint8_t*>(code_data_container.address());
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
uint8_t* const code_ptr = reinterpret_cast<uint8_t*>(code.address());
// These static asserts ensure we don't miss relevant fields. We don't
// hash code cage base and code entry point. Other data fields must
// remain the same.
static_assert(CodeDataContainer::kCodePointerFieldsStrongEndOffset ==
CodeDataContainer::kCodeEntryPointOffset);
static_assert(Code::kCodePointerFieldsStrongEndOffset ==
Code::kCodeEntryPointOffset);
static_assert(CodeDataContainer::kCodeEntryPointOffsetEnd + 1 ==
CodeDataContainer::kFlagsOffset);
static_assert(CodeDataContainer::kFlagsOffsetEnd + 1 ==
CodeDataContainer::kBuiltinIdOffset);
static_assert(CodeDataContainer::kBuiltinIdOffsetEnd + 1 ==
CodeDataContainer::kKindSpecificFlagsOffset);
static_assert(CodeDataContainer::kKindSpecificFlagsOffsetEnd + 1 ==
CodeDataContainer::kUnalignedSize);
constexpr int kStartOffset = CodeDataContainer::kFlagsOffset;
static_assert(Code::kCodeEntryPointOffsetEnd + 1 == Code::kFlagsOffset);
static_assert(Code::kFlagsOffsetEnd + 1 == Code::kBuiltinIdOffset);
static_assert(Code::kBuiltinIdOffsetEnd + 1 ==
Code::kKindSpecificFlagsOffset);
static_assert(Code::kKindSpecificFlagsOffsetEnd + 1 ==
Code::kUnalignedSize);
constexpr int kStartOffset = Code::kFlagsOffset;
// |is_off_heap_trampoline| is false during builtins compilation (since
// the builtins are not trampolines yet) but it's true for off-heap
// builtin trampolines. The rest of the data fields should be the same.
// So we temporarily set |is_off_heap_trampoline| to true during hash
// computation.
bool is_off_heap_trampoline_sav =
code_data_container.is_off_heap_trampoline();
code_data_container.set_is_off_heap_trampoline_for_hash(true);
bool is_off_heap_trampoline_sav = code.is_off_heap_trampoline();
code.set_is_off_heap_trampoline_for_hash(true);
for (int j = kStartOffset; j < CodeDataContainer::kUnalignedSize; j++) {
for (int j = kStartOffset; j < Code::kUnalignedSize; j++) {
hash = base::hash_combine(hash, size_t{code_ptr[j]});
}
code_data_container.set_is_off_heap_trampoline_for_hash(
is_off_heap_trampoline_sav);
code.set_is_off_heap_trampoline_for_hash(is_off_heap_trampoline_sav);
}
// The builtins constants table is also tightly tied to embedded builtins.
@ -775,7 +770,7 @@ class CallSiteBuilder {
Handle<Object> receiver(combinator->native_context().promise_function(),
isolate_);
Handle<CodeDataContainer> code(combinator->code(), isolate_);
Handle<Code> code(combinator->code(), isolate_);
// TODO(mmarchini) save Promises list from the Promise combinator
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
@ -2001,8 +1996,7 @@ Object Isolate::UnwindAndFindHandler() {
CHECK(frame->is_java_script());
if (frame->is_turbofan()) {
InstructionStream code =
frame->LookupCodeDataContainer().instruction_stream();
InstructionStream code = frame->LookupCode().instruction_stream();
// The debugger triggers lazy deopt for the "to-be-restarted" frame
// immediately when the CDP event arrives while paused.
CHECK(code.marked_for_deoptimization());
@ -2025,7 +2019,7 @@ Object Isolate::UnwindAndFindHandler() {
DCHECK(!frame->is_maglev());
debug()->clear_restart_frame();
CodeDataContainer code = *BUILTIN_CODE(this, RestartFrameTrampoline);
Code code = *BUILTIN_CODE(this, RestartFrameTrampoline);
return FoundHandler(Context(), code.InstructionStart(), 0,
code.constant_pool(), kNullAddress, frame->fp(),
visited_frames);
@ -2041,8 +2035,7 @@ Object Isolate::UnwindAndFindHandler() {
thread_local_top()->handler_ = handler->next_address();
// Gather information from the handler.
CodeDataContainer code =
frame->LookupCodeDataContainer().code_data_container();
Code code = frame->LookupCode().code();
HandlerTable table(code);
return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
table.LookupReturn(0), code.constant_pool(),
@ -2054,8 +2047,7 @@ Object Isolate::UnwindAndFindHandler() {
case StackFrame::C_WASM_ENTRY: {
StackHandler* handler = frame->top_handler();
thread_local_top()->handler_ = handler->next_address();
InstructionStream code =
frame->LookupCodeDataContainer().instruction_stream();
InstructionStream code = frame->LookupCode().instruction_stream();
HandlerTable table(code);
Address instruction_start = code.InstructionStart(this, frame->pc());
int return_offset = static_cast<int>(frame->pc() - instruction_start);
@ -2115,8 +2107,7 @@ Object Isolate::UnwindAndFindHandler() {
int offset = opt_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
if (offset < 0) break;
// The code might be an optimized code or a turbofanned builtin.
CodeDataContainer code =
frame->LookupCodeDataContainer().ToCodeDataContainer();
Code code = frame->LookupCode().ToCode();
// Compute the stack pointer from the frame pointer. This ensures
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
@ -2150,8 +2141,7 @@ Object Isolate::UnwindAndFindHandler() {
// The code might be a dynamically generated stub or a turbofanned
// embedded builtin.
CodeDataContainer code =
stub_frame->LookupCodeDataContainer().ToCodeDataContainer();
Code code = stub_frame->LookupCode().ToCode();
if (code.kind() != CodeKind::BUILTIN || !code.is_turbofanned() ||
!code.has_handler_table()) {
break;
@ -2201,8 +2191,7 @@ Object Isolate::UnwindAndFindHandler() {
if (frame->is_baseline()) {
BaselineFrame* sp_frame = BaselineFrame::cast(js_frame);
InstructionStream code =
sp_frame->LookupCodeDataContainer().instruction_stream();
InstructionStream code = sp_frame->LookupCode().instruction_stream();
DCHECK(!code.is_off_heap_trampoline());
intptr_t pc_offset = sp_frame->GetPCForBytecodeOffset(offset);
// Patch the context register directly on the frame, so that we don't
@ -2215,8 +2204,7 @@ Object Isolate::UnwindAndFindHandler() {
InterpretedFrame::cast(js_frame)->PatchBytecodeOffset(
static_cast<int>(offset));
CodeDataContainer code =
*BUILTIN_CODE(this, InterpreterEnterAtBytecode);
Code code = *BUILTIN_CODE(this, InterpreterEnterAtBytecode);
// We subtract a frame from visited_frames because otherwise the
// shadow stack will drop the underlying interpreter entry trampoline
// in which the handler runs.
@ -2248,8 +2236,7 @@ Object Isolate::UnwindAndFindHandler() {
// Reconstruct the stack pointer from the frame pointer.
Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
CodeDataContainer code =
js_frame->LookupCodeDataContainer().code_data_container();
Code code = js_frame->LookupCode().code();
return FoundHandler(Context(), code.InstructionStart(), 0,
code.constant_pool(), return_sp, frame->fp(),
visited_frames);
@ -2266,9 +2253,8 @@ Object Isolate::UnwindAndFindHandler() {
USE(removed);
// If there were any materialized objects, the code should be
// marked for deopt.
DCHECK_IMPLIES(removed, frame->LookupCodeDataContainer()
.ToCodeDataContainer()
.marked_for_deoptimization());
DCHECK_IMPLIES(removed,
frame->LookupCode().ToCode().marked_for_deoptimization());
}
}
@ -2367,7 +2353,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
}
case StackFrame::STUB: {
CodeLookupResult code = frame->LookupCodeDataContainer();
CodeLookupResult code = frame->LookupCode();
if (code.kind() != CodeKind::BUILTIN || !code.has_handler_table() ||
!code.is_turbofanned()) {
break;
@ -2379,7 +2365,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
}
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
CodeLookupResult code = frame->LookupCodeDataContainer();
CodeLookupResult code = frame->LookupCode();
CatchType prediction = ToCatchType(code.GetBuiltinCatchPrediction());
if (prediction != NOT_CAUGHT) return prediction;
break;
@ -2846,7 +2832,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
if (frame->is_java_script()) {
catch_prediction = PredictException(JavaScriptFrame::cast(frame));
} else if (frame->type() == StackFrame::STUB) {
CodeLookupResult code = frame->LookupCodeDataContainer();
CodeLookupResult code = frame->LookupCode();
if (code.kind() != CodeKind::BUILTIN || !code.has_handler_table() ||
!code.is_turbofanned()) {
continue;
@ -3921,9 +3907,8 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
Address instruction_start = d.InstructionStartOfBuiltin(builtin);
Handle<CodeDataContainer> trampoline =
isolate->factory()->NewOffHeapTrampolineFor(
builtins->code_handle(builtin), instruction_start);
Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
builtins->code_handle(builtin), instruction_start);
// From this point onwards, the old builtin code object is unreachable and
// will be collected by the next GC.

View File

@ -110,8 +110,8 @@ class GeneratedCode {
return GeneratedCode(isolate, reinterpret_cast<Signature*>(buffer));
}
template <typename CodeOrCodeDataContainer>
static GeneratedCode FromCode(CodeOrCodeDataContainer code) {
template <typename CodeOrCode>
static GeneratedCode FromCode(CodeOrCode code) {
return FromAddress(code.GetIsolate(), code.entry());
}

View File

@ -107,7 +107,7 @@ void FullEvacuationVerifier::VerifyPointers(MaybeObjectSlot start,
void FullEvacuationVerifier::VerifyCodePointer(CodeObjectSlot slot) {
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
// The slot might contain smi during CodeDataContainer creation, so skip it.
// The slot might contain smi during Code creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
@ -153,7 +153,7 @@ void YoungGenerationEvacuationVerifier::VerifyPointers(MaybeObjectSlot start,
void YoungGenerationEvacuationVerifier::VerifyCodePointer(CodeObjectSlot slot) {
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
// The slot might contain smi during CodeDataContainer creation, so skip it.
// The slot might contain smi during Code creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}

View File

@ -74,13 +74,12 @@ Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
}
template <typename Impl>
Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
int flags, AllocationType allocation) {
Map map = read_only_roots().code_data_container_map();
Handle<Code> FactoryBase<Impl>::NewCode(int flags, AllocationType allocation) {
Map map = read_only_roots().code_map();
int size = map.instance_size();
DCHECK_NE(allocation, AllocationType::kYoung);
CodeDataContainer data_container = CodeDataContainer::cast(
AllocateRawWithImmortalMap(size, allocation, map));
Code data_container =
Code::cast(AllocateRawWithImmortalMap(size, allocation, map));
DisallowGarbageCollection no_gc;
data_container.set_kind_specific_flags(flags, kRelaxedStore);
Isolate* isolate_for_sandbox = impl()->isolate_for_sandbox();

View File

@ -98,9 +98,8 @@ class FactoryBase : public TorqueGeneratedFactory<Impl> {
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
// Creates a new CodeDataContainer for a InstructionStream object.
Handle<CodeDataContainer> NewCodeDataContainer(int flags,
AllocationType allocation);
// Creates a new Code for a InstructionStream object.
Handle<Code> NewCode(int flags, AllocationType allocation);
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(

Some files were not shown because too many files have changed in this diff Show More