[codet] Remove the CodeT type alias
.. now that it unconditionally refers to CodeDataContainer. All previous references to 'CodeT' (the type and as part of names) are now updated to 'CodeDataContainer', including 'codet', 'CODET', etc. Bug: v8:13654 Change-Id: I7abbba040091eddf3ef09028a891aed460363929 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4165619 Commit-Queue: Jakob Linke <jgruber@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#85325}
This commit is contained in:
parent
dab4bb5ccc
commit
dd38db94df
@ -83,7 +83,8 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
|
||||
InstantiateFunction(isolate,
|
||||
Handle<FunctionTemplateInfo>::cast(getter)),
|
||||
Object);
|
||||
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||
Handle<CodeDataContainer> trampoline =
|
||||
BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||
Handle<JSFunction>::cast(getter)->set_code(*trampoline);
|
||||
}
|
||||
if (setter->IsFunctionTemplateInfo() &&
|
||||
@ -93,7 +94,8 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
|
||||
InstantiateFunction(isolate,
|
||||
Handle<FunctionTemplateInfo>::cast(setter)),
|
||||
Object);
|
||||
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||
Handle<CodeDataContainer> trampoline =
|
||||
BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||
Handle<JSFunction>::cast(setter)->set_code(*trampoline);
|
||||
}
|
||||
RETURN_ON_EXCEPTION(
|
||||
|
@ -6642,7 +6642,8 @@ Local<Context> NewContext(
|
||||
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
|
||||
// fail.
|
||||
// Sanity-check that the isolate is initialized and usable.
|
||||
CHECK(i_isolate->builtins()->code(i::Builtin::kIllegal).IsCodeT());
|
||||
CHECK(
|
||||
i_isolate->builtins()->code(i::Builtin::kIllegal).IsCodeDataContainer());
|
||||
|
||||
TRACE_EVENT_CALL_STATS_SCOPED(i_isolate, "v8", "V8.NewContext");
|
||||
API_RCS_SCOPE(i_isolate, Context, New);
|
||||
@ -9707,7 +9708,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() {
|
||||
{i::Builtin::kJSRunMicrotasksEntry,
|
||||
&entry_stubs.js_run_microtasks_entry_stub}}};
|
||||
for (auto& pair : stubs) {
|
||||
i::CodeT js_entry = i_isolate->builtins()->code(pair.first);
|
||||
i::CodeDataContainer js_entry = i_isolate->builtins()->code(pair.first);
|
||||
pair.second->code.start =
|
||||
reinterpret_cast<const void*>(js_entry.InstructionStart());
|
||||
pair.second->code.length_in_bytes = js_entry.InstructionSize();
|
||||
|
@ -409,7 +409,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result,
|
||||
scratch);
|
||||
__ b(eq, on_result);
|
||||
__ mov(scratch, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
|
@ -464,7 +464,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
__ JumpIfCodeTIsMarkedForDeoptimization(
|
||||
__ JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
scratch_and_result, temps.AcquireScratch(), &clear_slot);
|
||||
__ B(on_result);
|
||||
}
|
||||
|
@ -74,7 +74,8 @@ class BaselineCompilerTask {
|
||||
return;
|
||||
}
|
||||
|
||||
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
|
||||
shared_function_info_->set_baseline_code(ToCodeDataContainer(*code),
|
||||
kReleaseStore);
|
||||
if (v8_flags.trace_baseline_concurrent_compilation) {
|
||||
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||
std::stringstream ss;
|
||||
|
@ -387,7 +387,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result);
|
||||
__ j(equal, on_result, distance);
|
||||
__ mov(FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
|
||||
|
@ -383,8 +383,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
|
||||
eq, on_result);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
scratch_and_result, scratch, eq, on_result);
|
||||
__ li(scratch, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
|
||||
|
@ -393,8 +393,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
|
||||
eq, on_result);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
scratch_and_result, scratch, eq, on_result);
|
||||
__ li(scratch, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
|
||||
|
@ -537,7 +537,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result,
|
||||
scratch, r0);
|
||||
__ beq(on_result, cr0);
|
||||
__ mov(scratch, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
|
@ -383,7 +383,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
__ JumpIfCodeTIsMarkedForDeoptimization(
|
||||
__ JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
scratch_and_result, temps.AcquireScratch(), &clear_slot);
|
||||
Jump(on_result);
|
||||
}
|
||||
|
@ -550,7 +550,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result,
|
||||
scratch);
|
||||
__ beq(on_result);
|
||||
__ mov(scratch, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
|
@ -410,7 +410,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(scratch_and_result);
|
||||
__ j(equal, on_result, distance);
|
||||
__ StoreTaggedField(
|
||||
FieldOperand(feedback_vector,
|
||||
|
@ -311,8 +311,8 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
|
||||
Generate_JSBuiltinsConstructStubHelper(masm);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ ldr(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
@ -327,11 +327,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Label* is_baseline) {
|
||||
ASM_CODE_COMMENT(masm);
|
||||
Label done;
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ b(ne, ¬_baseline);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ b(eq, is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -631,7 +631,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
//
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
|
||||
USE(pushed_stack_space);
|
||||
@ -769,9 +769,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Invoke the code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Exit the JS frame and remove the parameters (except function), and
|
||||
@ -1458,7 +1458,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
|
||||
|
||||
// Tail call to the array construct stub (still in the caller
|
||||
// context at this point).
|
||||
Handle<CodeT> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
|
||||
Handle<CodeDataContainer> code =
|
||||
BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
|
||||
// Call the constructor with r0, r1, and r3 unmodified.
|
||||
@ -2001,7 +2002,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
// static
|
||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r1 : target
|
||||
// -- r0 : number of parameters on the stack
|
||||
@ -2067,9 +2068,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : the number of arguments
|
||||
// -- r3 : the new.target (for [[Construct]] calls)
|
||||
@ -3567,7 +3568,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// always have baseline code.
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, r3, r3, CODE_DATA_CONTAINER_TYPE);
|
||||
__ b(eq, &start_with_baseline);
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
@ -3580,12 +3581,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// Start with baseline code.
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, r3, r3, CODE_DATA_CONTAINER_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, r3);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, r3);
|
||||
}
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
|
@ -388,19 +388,20 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
|
||||
__ Unreachable();
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaselineAllowClobber(MacroAssembler* masm,
|
||||
Register code,
|
||||
Register scratch) {
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ Ldr(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ Ldr(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
__ DecodeField<CodeDataContainer::KindField>(scratch);
|
||||
__ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
|
||||
return AssertCodeDataContainerIsBaselineAllowClobber(masm, code, scratch);
|
||||
}
|
||||
|
||||
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
|
||||
@ -411,11 +412,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Label* is_baseline) {
|
||||
ASM_CODE_COMMENT(masm);
|
||||
Label done;
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ B(ne, ¬_baseline);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ B(eq, is_baseline);
|
||||
__ Bind(¬_baseline);
|
||||
} else {
|
||||
@ -758,7 +759,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
//
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -932,9 +933,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// x28 : pointer cage base register (kPtrComprCageBaseRegister).
|
||||
// x29 : frame pointer (fp).
|
||||
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Exit the JS internal frame and remove the parameters (except function),
|
||||
@ -2334,7 +2335,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
|
||||
// static
|
||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- x1 : target
|
||||
// -- x0 : number of parameters on the stack
|
||||
@ -2407,9 +2408,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- x0 : the number of arguments
|
||||
// -- x3 : the new.target (for [[Construct]] calls)
|
||||
@ -5705,7 +5706,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// always have baseline code.
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, x3, x3, CODE_DATA_CONTAINER_TYPE);
|
||||
__ B(eq, &start_with_baseline);
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
@ -5718,12 +5719,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// Start with baseline code.
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, x3, x3, CODE_DATA_CONTAINER_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, x3);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, x3);
|
||||
}
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
|
@ -1808,7 +1808,7 @@ TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
|
||||
void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
|
||||
const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
|
||||
TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
|
||||
TNode<CodeT> code = HeapConstant(callable.code());
|
||||
TNode<CodeDataContainer> code = HeapConstant(callable.code());
|
||||
|
||||
// We are going to call here ArrayNoArgumentsConstructor or
|
||||
// ArraySingleArgumentsConstructor which in addition to the register arguments
|
||||
|
@ -180,7 +180,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
|
||||
// which almost doubles the size of `await` builtins (unnecessarily).
|
||||
TNode<Smi> builtin_id = LoadObjectField<Smi>(
|
||||
shared_info, SharedFunctionInfo::kFunctionDataOffset);
|
||||
TNode<CodeT> code = LoadBuiltin(builtin_id);
|
||||
TNode<CodeDataContainer> code = LoadBuiltin(builtin_id);
|
||||
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
|
||||
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
|
||||
shared_function_info);
|
||||
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
|
||||
TNode<CodeT> lazy_builtin =
|
||||
TNode<CodeDataContainer> lazy_builtin =
|
||||
HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
|
||||
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
|
||||
Return(result);
|
||||
|
@ -104,7 +104,7 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
|
||||
|
||||
BIND(&tailcall_to_shared);
|
||||
// Tail call into code object on the SharedFunctionInfo.
|
||||
TNode<CodeT> code = GetSharedFunctionInfoCode(shared);
|
||||
TNode<CodeDataContainer> code = GetSharedFunctionInfoCode(shared);
|
||||
TailCallJSCode(code, context, function, new_target, arg_count);
|
||||
}
|
||||
|
||||
@ -1230,7 +1230,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
|
||||
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
|
||||
|
||||
const bool builtin_exit_frame = true;
|
||||
TNode<CodeT> code = HeapConstant(
|
||||
TNode<CodeDataContainer> code = HeapConstant(
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame));
|
||||
|
||||
// Unconditionally push argc, target and new target as extra stack arguments.
|
||||
@ -1568,7 +1568,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
|
||||
// On failure, tail call back to regular JavaScript by re-calling the given
|
||||
// function which has been reset to the compile lazy builtin.
|
||||
|
||||
TNode<CodeT> code = LoadJSFunctionCode(function);
|
||||
TNode<CodeDataContainer> code = LoadJSFunctionCode(function);
|
||||
TailCallJSCode(code, context, function, new_target, arg_count);
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
|
||||
TNode<CodeT> code, TNode<JSFunction> function) {
|
||||
TNode<CodeDataContainer> code, TNode<JSFunction> function) {
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
|
||||
@ -25,7 +25,8 @@ void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
|
||||
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
|
||||
Runtime::FunctionId function_id, TNode<JSFunction> function) {
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
TNode<CodeT> code = CAST(CallRuntime(function_id, context, function));
|
||||
TNode<CodeDataContainer> code =
|
||||
CAST(CallRuntime(function_id, context, function));
|
||||
GenerateTailCallToJSCode(code, function);
|
||||
}
|
||||
|
||||
@ -63,8 +64,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
|
||||
TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
|
||||
feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
|
||||
|
||||
// Optimized code slot is a weak reference to CodeT object.
|
||||
TNode<CodeT> optimized_code = CAST(GetHeapObjectAssumeWeak(
|
||||
// Optimized code slot is a weak reference to CodeDataContainer object.
|
||||
TNode<CodeDataContainer> optimized_code = CAST(GetHeapObjectAssumeWeak(
|
||||
maybe_optimized_code_entry, &heal_optimized_code_slot));
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
@ -100,7 +101,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
TNode<SharedFunctionInfo> shared =
|
||||
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
|
||||
TVARIABLE(Uint16T, sfi_data_type);
|
||||
TNode<CodeT> sfi_code =
|
||||
TNode<CodeDataContainer> sfi_code =
|
||||
GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function);
|
||||
|
||||
TNode<HeapObject> feedback_cell_value = LoadFeedbackCellValue(function);
|
||||
@ -129,17 +130,18 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
// be the InterpreterEntryTrampoline to start executing existing bytecode.
|
||||
BIND(&maybe_use_sfi_code);
|
||||
Label tailcall_code(this), baseline(this);
|
||||
TVARIABLE(CodeT, code);
|
||||
TVARIABLE(CodeDataContainer, code);
|
||||
|
||||
// Check if we have baseline code.
|
||||
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
|
||||
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODE_DATA_CONTAINER_TYPE),
|
||||
&baseline);
|
||||
|
||||
code = sfi_code;
|
||||
Goto(&tailcall_code);
|
||||
|
||||
BIND(&baseline);
|
||||
// Ensure we have a feedback vector.
|
||||
code = Select<CodeT>(
|
||||
code = Select<CodeDataContainer>(
|
||||
IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
|
||||
[=]() {
|
||||
return CAST(CallRuntime(Runtime::kInstallBaselineCode,
|
||||
@ -164,7 +166,8 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
|
||||
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
|
||||
auto function = Parameter<JSFunction>(Descriptor::kTarget);
|
||||
|
||||
TNode<CodeT> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
|
||||
TNode<CodeDataContainer> code =
|
||||
HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
|
||||
// Set the code slot inside the JSFunction to CompileLazy.
|
||||
StoreObjectField(function, JSFunction::kCodeOffset, code);
|
||||
GenerateTailCallToJSCode(code, function);
|
||||
|
@ -17,7 +17,8 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
|
||||
explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
|
||||
: CodeStubAssembler(state) {}
|
||||
|
||||
void GenerateTailCallToJSCode(TNode<CodeT> code, TNode<JSFunction> function);
|
||||
void GenerateTailCallToJSCode(TNode<CodeDataContainer> code,
|
||||
TNode<JSFunction> function);
|
||||
|
||||
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
|
||||
TNode<JSFunction> function);
|
||||
|
@ -45,12 +45,6 @@ TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
|
||||
return IntPtrConstant(0);
|
||||
}
|
||||
|
||||
// If code is a builtin, return the address to the (possibly embedded) builtin
|
||||
// code entry, otherwise return the entry of the code object itself.
|
||||
TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<CodeT> code) {
|
||||
return GetCodeEntry(code);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// ES6 section 21.2 RegExp Objects
|
||||
|
||||
@ -522,7 +516,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
|
||||
#endif
|
||||
|
||||
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
|
||||
TNode<CodeT> code = CAST(var_code.value());
|
||||
TNode<CodeDataContainer> code = CAST(var_code.value());
|
||||
|
||||
Label if_success(this), if_exception(this, Label::kDeferred);
|
||||
{
|
||||
@ -586,7 +580,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
|
||||
MachineType arg8_type = type_tagged;
|
||||
TNode<JSRegExp> arg8 = regexp;
|
||||
|
||||
TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
|
||||
TNode<RawPtrT> code_entry = GetCodeEntry(code);
|
||||
|
||||
// AIX uses function descriptors on CFunction calls. code_entry in this case
|
||||
// may also point to a Regex interpreter entry trampoline which does not
|
||||
|
@ -21,8 +21,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
|
||||
TNode<Smi> SmiZero();
|
||||
TNode<IntPtrT> IntPtrZero();
|
||||
|
||||
TNode<RawPtrT> LoadCodeObjectEntry(TNode<CodeT> code);
|
||||
|
||||
// Allocate either a JSRegExpResult or a JSRegExpResultWithIndices (depending
|
||||
// on has_indices) with the given length (the number of captures, including
|
||||
// the match itself), index (the index where the match starts), and input
|
||||
|
@ -121,7 +121,7 @@ const char* Builtins::Lookup(Address pc) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
|
||||
Handle<CodeDataContainer> Builtins::CallFunction(ConvertReceiverMode mode) {
|
||||
switch (mode) {
|
||||
case ConvertReceiverMode::kNullOrUndefined:
|
||||
return code_handle(Builtin::kCallFunction_ReceiverIsNullOrUndefined);
|
||||
@ -133,7 +133,7 @@ Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
|
||||
Handle<CodeDataContainer> Builtins::Call(ConvertReceiverMode mode) {
|
||||
switch (mode) {
|
||||
case ConvertReceiverMode::kNullOrUndefined:
|
||||
return code_handle(Builtin::kCall_ReceiverIsNullOrUndefined);
|
||||
@ -145,7 +145,8 @@ Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
|
||||
Handle<CodeDataContainer> Builtins::NonPrimitiveToPrimitive(
|
||||
ToPrimitiveHint hint) {
|
||||
switch (hint) {
|
||||
case ToPrimitiveHint::kDefault:
|
||||
return code_handle(Builtin::kNonPrimitiveToPrimitive_Default);
|
||||
@ -157,7 +158,8 @@ Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
Handle<CodeT> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
|
||||
Handle<CodeDataContainer> Builtins::OrdinaryToPrimitive(
|
||||
OrdinaryToPrimitiveHint hint) {
|
||||
switch (hint) {
|
||||
case OrdinaryToPrimitiveHint::kNumber:
|
||||
return code_handle(Builtin::kOrdinaryToPrimitive_Number);
|
||||
@ -179,21 +181,21 @@ FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) {
|
||||
return FullObjectSlot(location);
|
||||
}
|
||||
|
||||
void Builtins::set_code(Builtin builtin, CodeT code) {
|
||||
void Builtins::set_code(Builtin builtin, CodeDataContainer code) {
|
||||
DCHECK_EQ(builtin, code.builtin_id());
|
||||
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
|
||||
// The given builtin may be uninitialized thus we cannot check its type here.
|
||||
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
|
||||
}
|
||||
|
||||
CodeT Builtins::code(Builtin builtin) {
|
||||
CodeDataContainer Builtins::code(Builtin builtin) {
|
||||
Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)];
|
||||
return CodeT::cast(Object(ptr));
|
||||
return CodeDataContainer::cast(Object(ptr));
|
||||
}
|
||||
|
||||
Handle<CodeT> Builtins::code_handle(Builtin builtin) {
|
||||
Handle<CodeDataContainer> Builtins::code_handle(Builtin builtin) {
|
||||
Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)];
|
||||
return Handle<CodeT>(location);
|
||||
return Handle<CodeDataContainer>(location);
|
||||
}
|
||||
|
||||
// static
|
||||
@ -229,7 +231,7 @@ CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Builtin builtin) {
|
||||
|
||||
// static
|
||||
Callable Builtins::CallableFor(Isolate* isolate, Builtin builtin) {
|
||||
Handle<CodeT> code = isolate->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code = isolate->builtins()->code_handle(builtin);
|
||||
return Callable{code, CallInterfaceDescriptorFor(builtin)};
|
||||
}
|
||||
|
||||
@ -256,7 +258,7 @@ void Builtins::PrintBuiltinCode() {
|
||||
base::CStrVector(v8_flags.print_builtin_code_filter))) {
|
||||
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
|
||||
OFStream os(trace_scope.file());
|
||||
CodeT builtin_code = code(builtin);
|
||||
CodeDataContainer builtin_code = code(builtin);
|
||||
builtin_code.Disassemble(builtin_name, os, isolate_);
|
||||
os << "\n";
|
||||
}
|
||||
@ -270,7 +272,7 @@ void Builtins::PrintBuiltinSize() {
|
||||
++builtin) {
|
||||
const char* builtin_name = name(builtin);
|
||||
const char* kind = KindNameOf(builtin);
|
||||
CodeT code = Builtins::code(builtin);
|
||||
CodeDataContainer code = Builtins::code(builtin);
|
||||
PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
|
||||
code.InstructionSize());
|
||||
}
|
||||
@ -331,7 +333,7 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
|
||||
int i = 0;
|
||||
HandleScope scope(isolate);
|
||||
for (; i < ToInt(Builtin::kFirstBytecodeHandler); i++) {
|
||||
Handle<CodeT> builtin_code(&builtins[i]);
|
||||
Handle<CodeDataContainer> builtin_code(&builtins[i]);
|
||||
Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
|
||||
PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kBuiltin, code,
|
||||
Builtins::name(FromInt(i))));
|
||||
@ -339,7 +341,7 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
|
||||
|
||||
static_assert(kLastBytecodeHandlerPlusOne == kBuiltinCount);
|
||||
for (; i < kBuiltinCount; i++) {
|
||||
Handle<CodeT> builtin_code(&builtins[i]);
|
||||
Handle<CodeDataContainer> builtin_code(&builtins[i]);
|
||||
Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
|
||||
interpreter::Bytecode bytecode =
|
||||
builtin_metadata[i].data.bytecode_and_scale.bytecode;
|
||||
|
@ -138,17 +138,19 @@ class Builtins {
|
||||
}
|
||||
|
||||
// Convenience wrappers.
|
||||
Handle<CodeT> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
|
||||
Handle<CodeT> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
|
||||
Handle<CodeT> NonPrimitiveToPrimitive(
|
||||
Handle<CodeDataContainer> CallFunction(
|
||||
ConvertReceiverMode = ConvertReceiverMode::kAny);
|
||||
Handle<CodeDataContainer> Call(
|
||||
ConvertReceiverMode = ConvertReceiverMode::kAny);
|
||||
Handle<CodeDataContainer> NonPrimitiveToPrimitive(
|
||||
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
|
||||
Handle<CodeT> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
|
||||
Handle<CodeDataContainer> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
|
||||
|
||||
// Used by CreateOffHeapTrampolines in isolate.cc.
|
||||
void set_code(Builtin builtin, CodeT code);
|
||||
void set_code(Builtin builtin, CodeDataContainer code);
|
||||
|
||||
V8_EXPORT_PRIVATE CodeT code(Builtin builtin);
|
||||
V8_EXPORT_PRIVATE Handle<CodeT> code_handle(Builtin builtin);
|
||||
V8_EXPORT_PRIVATE CodeDataContainer code(Builtin builtin);
|
||||
V8_EXPORT_PRIVATE Handle<CodeDataContainer> code_handle(Builtin builtin);
|
||||
|
||||
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin);
|
||||
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate,
|
||||
@ -192,8 +194,8 @@ class Builtins {
|
||||
}
|
||||
|
||||
// True, iff the given code object is a builtin with off-heap embedded code.
|
||||
template <typename CodeOrCodeT>
|
||||
static bool IsIsolateIndependentBuiltin(CodeOrCodeT code) {
|
||||
template <typename CodeOrCodeDataContainer>
|
||||
static bool IsIsolateIndependentBuiltin(CodeOrCodeDataContainer code) {
|
||||
Builtin builtin = code.builtin_id();
|
||||
return Builtins::IsBuiltinId(builtin) &&
|
||||
Builtins::IsIsolateIndependent(builtin);
|
||||
@ -287,10 +289,10 @@ class Builtins {
|
||||
|
||||
enum class CallOrConstructMode { kCall, kConstruct };
|
||||
static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code);
|
||||
static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code);
|
||||
Handle<CodeDataContainer> code);
|
||||
static void Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code);
|
||||
|
||||
enum class InterpreterEntryTrampolineMode {
|
||||
// The version of InterpreterEntryTrampoline used by default.
|
||||
|
@ -414,7 +414,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -513,9 +513,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
__ mov(edi, Operand(scratch2, EntryFrameConstants::kFunctionArgOffset));
|
||||
|
||||
// Invoke the code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Exit the internal frame. Notice that this also removes the empty.
|
||||
@ -555,12 +555,12 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ mov(scratch, FieldOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ mov(scratch, FieldOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
__ DecodeField<CodeDataContainer::KindField>(scratch);
|
||||
__ cmp(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
|
||||
__ Assert(equal, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
@ -573,11 +573,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Label done;
|
||||
__ LoadMap(scratch1, sfi_data);
|
||||
|
||||
__ CmpInstanceType(scratch1, CODET_TYPE);
|
||||
__ CmpInstanceType(scratch1, CODE_DATA_CONTAINER_TYPE);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ j(not_equal, ¬_baseline);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ j(equal, is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -689,7 +689,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&is_baseline);
|
||||
__ Pop(eax);
|
||||
__ CmpObjectType(ecx, CODET_TYPE, ecx);
|
||||
__ CmpObjectType(ecx, CODE_DATA_CONTAINER_TYPE, ecx);
|
||||
__ Assert(equal, AbortReason::kMissingBytecodeArray);
|
||||
|
||||
__ bind(&ok);
|
||||
@ -2054,7 +2054,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
// static
|
||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- edi : target
|
||||
// -- esi : context for the Call / Construct builtin
|
||||
@ -2147,9 +2147,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : the number of arguments
|
||||
// -- edi : the target to call (can be any Object)
|
||||
@ -4208,7 +4208,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// always have baseline code.
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
|
||||
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE,
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
__ j(equal, &start_with_baseline);
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
@ -4221,12 +4222,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
|
||||
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE,
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
__ Assert(equal, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, ecx);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, ecx);
|
||||
}
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
|
@ -299,12 +299,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
|
||||
Generate_JSBuiltinsConstructStubHelper(masm);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ Ld_d(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ Ld_d(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
__ DecodeField<CodeDataContainer::KindField>(scratch);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
|
||||
Operand(static_cast<int>(CodeKind::BASELINE)));
|
||||
}
|
||||
@ -320,12 +320,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
__ GetObjectType(sfi_data, scratch1, scratch1);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE));
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
__ Branch(¬_baseline, ne, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ Branch(is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
__ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
|
||||
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
}
|
||||
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
|
||||
__ Ld_d(sfi_data,
|
||||
@ -648,7 +648,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -754,9 +754,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// s7 is cp. Do not init.
|
||||
|
||||
// Invoke the code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Leave internal frame.
|
||||
@ -2012,7 +2012,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a1 : target
|
||||
// -- a0 : number of parameters on the stack
|
||||
@ -2081,9 +2081,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : the number of arguments
|
||||
// -- a3 : the new.target (for [[Construct]] calls)
|
||||
@ -3586,7 +3586,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ GetObjectType(code_obj, t2, t2);
|
||||
__ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
|
||||
__ Branch(&start_with_baseline, eq, t2, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
Builtin builtin_id = next_bytecode
|
||||
@ -3599,11 +3599,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ GetObjectType(code_obj, t2, t2);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, t2,
|
||||
Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, t2);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, t2);
|
||||
}
|
||||
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
@ -299,12 +299,12 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
|
||||
Generate_JSBuiltinsConstructStubHelper(masm);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ Ld(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ Ld(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
__ DecodeField<CodeDataContainer::KindField>(scratch);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
|
||||
Operand(static_cast<int>(CodeKind::BASELINE)));
|
||||
}
|
||||
@ -320,12 +320,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
__ GetObjectType(sfi_data, scratch1, scratch1);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE));
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
__ Branch(¬_baseline, ne, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ Branch(is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
__ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
|
||||
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
}
|
||||
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
|
||||
__ Ld(sfi_data,
|
||||
@ -649,7 +649,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -755,9 +755,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// s7 is cp. Do not init.
|
||||
|
||||
// Invoke the code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Leave internal frame.
|
||||
@ -2005,7 +2005,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a1 : target
|
||||
// -- a0 : number of parameters on the stack
|
||||
@ -2074,9 +2074,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : the number of arguments
|
||||
// -- a3 : the new.target (for [[Construct]] calls)
|
||||
@ -3610,7 +3610,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ GetObjectType(code_obj, t2, t2);
|
||||
__ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
|
||||
__ Branch(&start_with_baseline, eq, t2, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
Builtin builtin_id = next_bytecode
|
||||
@ -3623,11 +3623,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ GetObjectType(code_obj, t2, t2);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, t2,
|
||||
Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, t2);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, t2);
|
||||
}
|
||||
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
@ -35,8 +35,8 @@ namespace internal {
|
||||
#define __ ACCESS_MASM(masm)
|
||||
namespace {
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ LoadU32(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
@ -52,11 +52,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
USE(GetSharedFunctionInfoBytecodeOrBaseline);
|
||||
ASM_CODE_COMMENT(masm);
|
||||
Label done;
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ b(ne, ¬_baseline);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ beq(is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -131,7 +131,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// always have baseline code.
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, r6, r6, CODE_DATA_CONTAINER_TYPE);
|
||||
__ b(eq, &start_with_baseline);
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
@ -144,12 +144,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// Start with baseline code.
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, r6, r6, CODE_DATA_CONTAINER_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, r6);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, r6);
|
||||
}
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
@ -940,7 +940,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -1056,9 +1056,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
__ mr(r17, r7);
|
||||
|
||||
// Invoke the code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Exit the JS frame and remove the parameters (except function), and
|
||||
@ -1734,7 +1734,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
|
||||
|
||||
// Tail call to the array construct stub (still in the caller
|
||||
// context at this point).
|
||||
Handle<CodeT> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
|
||||
Handle<CodeDataContainer> code =
|
||||
BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
|
||||
// Call the constructor with r3, r4, and r6 unmodified.
|
||||
@ -2218,7 +2219,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
// static
|
||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r4 : target
|
||||
// -- r3 : number of parameters on the stack
|
||||
@ -2289,9 +2290,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r3 : the number of arguments
|
||||
// -- r6 : the new.target (for [[Construct]] calls)
|
||||
|
@ -341,7 +341,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Label done;
|
||||
|
||||
__ GetObjectType(sfi_data, scratch1, scratch1);
|
||||
__ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
|
||||
__ Branch(is_baseline, eq, scratch1, Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
|
||||
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
|
||||
Label::Distance::kNear);
|
||||
@ -3664,7 +3664,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
__ GetObjectType(code_obj, scratch, scratch);
|
||||
__ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
|
||||
__ Branch(&start_with_baseline, eq, scratch,
|
||||
Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
Builtin builtin_id = next_bytecode
|
||||
@ -3680,7 +3681,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
Register scratch = temps.Acquire();
|
||||
__ GetObjectType(code_obj, scratch, scratch);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
|
||||
Operand(CODET_TYPE));
|
||||
Operand(CODE_DATA_CONTAINER_TYPE));
|
||||
}
|
||||
if (v8_flags.debug_code) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
|
@ -36,8 +36,8 @@ namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ LoadU32(scratch, FieldMemOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
@ -53,11 +53,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
USE(GetSharedFunctionInfoBytecodeOrBaseline);
|
||||
ASM_CODE_COMMENT(masm);
|
||||
Label done;
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_DATA_CONTAINER_TYPE);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ b(ne, ¬_baseline);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ beq(is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -131,7 +131,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// always have baseline code.
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, r5, r5, CODE_DATA_CONTAINER_TYPE);
|
||||
__ b(eq, &start_with_baseline);
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
@ -144,12 +144,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// Start with baseline code.
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
|
||||
__ CompareObjectType(code_obj, r5, r5, CODE_DATA_CONTAINER_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, r5);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, r5);
|
||||
}
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
@ -937,7 +937,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
USE(pushed_stack_space);
|
||||
DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
|
||||
@ -1087,9 +1087,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
__ mov(r9, r6);
|
||||
|
||||
// Invoke the code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Exit the JS frame and remove the parameters (except function), and
|
||||
@ -1756,7 +1756,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
|
||||
|
||||
// Tail call to the array construct stub (still in the caller
|
||||
// context at this point).
|
||||
Handle<CodeT> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
|
||||
Handle<CodeDataContainer> code =
|
||||
BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
|
||||
// Call the constructor with r2, r3, and r5 unmodified.
|
||||
@ -2221,7 +2222,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
// static
|
||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r3 : target
|
||||
// -- r2 : number of parameters on the stack
|
||||
@ -2293,9 +2294,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : the number of arguments
|
||||
// -- r5 : the new.target (for [[Construct]] calls)
|
||||
|
@ -211,7 +211,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
|
||||
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
|
||||
Code code) {
|
||||
DCHECK_EQ(builtin, code.builtin_id());
|
||||
builtins->set_code(builtin, ToCodeT(code));
|
||||
builtins->set_code(builtin, ToCodeDataContainer(code));
|
||||
}
|
||||
|
||||
// static
|
||||
@ -242,7 +242,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
|
||||
PtrComprCageBase cage_base(isolate);
|
||||
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
|
||||
++builtin) {
|
||||
Code code = FromCodeT(builtins->code(builtin));
|
||||
Code code = FromCodeDataContainer(builtins->code(builtin));
|
||||
isolate->heap()->UnprotectAndRegisterMemoryChunk(
|
||||
code, UnprotectMemoryOrigin::kMainThread);
|
||||
bool flush_icache = false;
|
||||
@ -253,16 +253,16 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
|
||||
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
|
||||
Builtins::IsIsolateIndependent(target.builtin_id()));
|
||||
if (!target.is_builtin()) continue;
|
||||
CodeT new_target = builtins->code(target.builtin_id());
|
||||
CodeDataContainer new_target = builtins->code(target.builtin_id());
|
||||
rinfo->set_target_address(new_target.raw_instruction_start(),
|
||||
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
|
||||
} else {
|
||||
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
|
||||
Object object = rinfo->target_object(cage_base);
|
||||
if (!object.IsCodeT(cage_base)) continue;
|
||||
CodeT target = CodeT::cast(object);
|
||||
if (!object.IsCodeDataContainer(cage_base)) continue;
|
||||
CodeDataContainer target = CodeDataContainer::cast(object);
|
||||
if (!target.is_builtin()) continue;
|
||||
CodeT new_target = builtins->code(target.builtin_id());
|
||||
CodeDataContainer new_target = builtins->code(target.builtin_id());
|
||||
rinfo->set_target_object(isolate->heap(), new_target,
|
||||
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
|
||||
// Invoke the function by calling through JS entry trampoline builtin and
|
||||
// pop the faked function when we return.
|
||||
Handle<CodeT> trampoline_code =
|
||||
Handle<CodeDataContainer> trampoline_code =
|
||||
masm->isolate()->builtins()->code_handle(entry_trampoline);
|
||||
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
|
||||
|
||||
@ -637,9 +637,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
__ Push(r9);
|
||||
|
||||
// Invoke the builtin code.
|
||||
Handle<CodeT> builtin = is_construct
|
||||
? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
Handle<CodeDataContainer> builtin =
|
||||
is_construct ? BUILTIN_CODE(masm->isolate(), Construct)
|
||||
: masm->isolate()->builtins()->Call();
|
||||
__ Call(builtin, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Exit the internal frame. Notice that this also removes the empty
|
||||
@ -664,19 +664,20 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaselineAllowClobber(MacroAssembler* masm,
|
||||
Register code,
|
||||
Register scratch) {
|
||||
// Verify that the code kind is baseline code via the CodeKind.
|
||||
__ movl(scratch, FieldOperand(code, CodeT::kFlagsOffset));
|
||||
__ DecodeField<CodeT::KindField>(scratch);
|
||||
__ movl(scratch, FieldOperand(code, CodeDataContainer::kFlagsOffset));
|
||||
__ DecodeField<CodeDataContainer::KindField>(scratch);
|
||||
__ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
|
||||
__ Assert(equal, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
|
||||
Register scratch) {
|
||||
static void AssertCodeDataContainerIsBaseline(MacroAssembler* masm,
|
||||
Register code, Register scratch) {
|
||||
DCHECK(!AreAliased(code, scratch));
|
||||
return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
|
||||
return AssertCodeDataContainerIsBaselineAllowClobber(masm, code, scratch);
|
||||
}
|
||||
|
||||
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
@ -687,11 +688,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Label done;
|
||||
__ LoadMap(scratch1, sfi_data);
|
||||
|
||||
__ CmpInstanceType(scratch1, CODET_TYPE);
|
||||
__ CmpInstanceType(scratch1, CODE_DATA_CONTAINER_TYPE);
|
||||
if (v8_flags.debug_code) {
|
||||
Label not_baseline;
|
||||
__ j(not_equal, ¬_baseline);
|
||||
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
|
||||
AssertCodeDataContainerIsBaseline(masm, sfi_data, scratch1);
|
||||
__ j(equal, is_baseline);
|
||||
__ bind(¬_baseline);
|
||||
} else {
|
||||
@ -807,7 +808,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
__ jmp(&ok);
|
||||
|
||||
__ bind(&is_baseline);
|
||||
__ CmpObjectType(rcx, CODET_TYPE, rcx);
|
||||
__ CmpObjectType(rcx, CODE_DATA_CONTAINER_TYPE, rcx);
|
||||
__ Assert(equal, AbortReason::kMissingBytecodeArray);
|
||||
|
||||
__ bind(&ok);
|
||||
@ -2047,7 +2048,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
||||
// static
|
||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
Handle<CodeT> code) {
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rdi : target
|
||||
// -- rax : number of parameters on the stack
|
||||
@ -2116,9 +2117,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
CallOrConstructMode mode,
|
||||
Handle<CodeT> code) {
|
||||
void Builtins::Generate_CallOrConstructForwardVarargs(
|
||||
MacroAssembler* masm, CallOrConstructMode mode,
|
||||
Handle<CodeDataContainer> code) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : the number of arguments
|
||||
// -- rdx : the new target (for [[Construct]] calls)
|
||||
@ -5343,7 +5344,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// always have baseline code.
|
||||
if (!is_osr) {
|
||||
Label start_with_baseline;
|
||||
__ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
|
||||
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE, kScratchRegister);
|
||||
__ j(equal, &start_with_baseline);
|
||||
|
||||
// Start with bytecode as there is no baseline code.
|
||||
@ -5356,12 +5357,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
// Start with baseline code.
|
||||
__ bind(&start_with_baseline);
|
||||
} else if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
|
||||
__ CmpObjectType(code_obj, CODE_DATA_CONTAINER_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kExpectedBaselineData);
|
||||
}
|
||||
|
||||
if (v8_flags.debug_code) {
|
||||
AssertCodeTIsBaseline(masm, code_obj, r11);
|
||||
AssertCodeDataContainerIsBaseline(masm, code_obj, r11);
|
||||
}
|
||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
||||
|
||||
|
@ -156,7 +156,7 @@ void RelocInfo::WipeOut() {
|
||||
}
|
||||
}
|
||||
|
||||
Handle<CodeT> Assembler::relative_code_target_object_handle_at(
|
||||
Handle<CodeDataContainer> Assembler::relative_code_target_object_handle_at(
|
||||
Address pc) const {
|
||||
Instruction* branch = Instruction::At(pc);
|
||||
int code_target_index = branch->GetBranchOffset() / kInstrSize;
|
||||
|
@ -1190,7 +1190,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
|
||||
|
||||
// Get the code target object for a pc-relative call or jump.
|
||||
V8_INLINE Handle<CodeT> relative_code_target_object_handle_at(
|
||||
V8_INLINE Handle<CodeDataContainer> relative_code_target_object_handle_at(
|
||||
Address pc_) const;
|
||||
|
||||
protected:
|
||||
|
@ -161,7 +161,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
@ -225,7 +225,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond, TargetAddressStorageMode mode,
|
||||
bool check_constant_pool) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
@ -294,7 +294,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
bl(code_target_index * kInstrSize, cond,
|
||||
RelocInfo::RELATIVE_CODE_TARGET);
|
||||
@ -326,7 +327,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
b(code_target_index * kInstrSize, cond,
|
||||
RelocInfo::RELATIVE_CODE_TARGET);
|
||||
@ -402,10 +404,10 @@ void TurboAssembler::Drop(Register count, Condition cond) {
|
||||
add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
|
||||
Register scratch) {
|
||||
ldr(scratch,
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch) {
|
||||
ldr(scratch, FieldMemOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
|
||||
@ -1930,8 +1932,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// runtime to clear it.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry,
|
||||
temps.Acquire());
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry,
|
||||
temps.Acquire());
|
||||
__ b(ne, &heal_optimized_code_slot);
|
||||
}
|
||||
|
||||
@ -2059,7 +2061,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
mov(r0, Operand(num_arguments));
|
||||
Move(r1, ExternalReference::Create(f));
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -2084,7 +2087,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
DCHECK_EQ(builtin.address() & 1, 1);
|
||||
#endif
|
||||
Move(r1, builtin);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
@ -308,8 +308,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
|
||||
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
|
||||
bool check_constant_pool = true);
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
Condition cond = al,
|
||||
void Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, Condition cond = al,
|
||||
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
|
||||
bool check_constant_pool = true);
|
||||
void Call(Label* target);
|
||||
@ -440,7 +440,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
Register exclusion3 = no_reg);
|
||||
void Jump(Register target, Condition cond = al);
|
||||
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond = al);
|
||||
void Jump(const ExternalReference& reference);
|
||||
|
||||
// Perform a floating-point min or max operation with the
|
||||
@ -891,7 +892,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
DecodeField<Field>(reg, reg);
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
private:
|
||||
|
@ -485,15 +485,15 @@ Tagged_t Assembler::target_compressed_address_at(Address pc,
|
||||
return Memory<Tagged_t>(target_pointer_address_at(pc));
|
||||
}
|
||||
|
||||
Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
|
||||
Handle<CodeDataContainer> Assembler::code_target_object_handle_at(Address pc) {
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(pc);
|
||||
if (instr->IsLdrLiteralX()) {
|
||||
return Handle<CodeT>(reinterpret_cast<Address*>(
|
||||
return Handle<CodeDataContainer>(reinterpret_cast<Address*>(
|
||||
Assembler::target_address_at(pc, 0 /* unused */)));
|
||||
} else {
|
||||
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
|
||||
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
|
||||
return Handle<CodeT>::cast(
|
||||
return Handle<CodeDataContainer>::cast(
|
||||
GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
|
||||
}
|
||||
}
|
||||
|
@ -4383,7 +4383,7 @@ void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
|
||||
void Assembler::near_call(HeapNumberRequest request) {
|
||||
BlockPoolsScope no_pool_before_bl_instr(this);
|
||||
RequestHeapNumber(request);
|
||||
EmbeddedObjectIndex index = AddEmbeddedObject(Handle<CodeT>());
|
||||
EmbeddedObjectIndex index = AddEmbeddedObject(Handle<CodeDataContainer>());
|
||||
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
|
||||
DCHECK(is_int32(index));
|
||||
bl(static_cast<int>(index));
|
||||
|
@ -262,7 +262,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
|
||||
// Returns the handle for the code object called at 'pc'.
|
||||
// This might need to be temporarily encoded as an offset into code_targets_.
|
||||
inline Handle<CodeT> code_target_object_handle_at(Address pc);
|
||||
inline Handle<CodeDataContainer> code_target_object_handle_at(Address pc);
|
||||
inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc);
|
||||
inline void set_embedded_object_index_referenced_from(
|
||||
Address p, EmbeddedObjectIndex index);
|
||||
|
@ -1413,9 +1413,9 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ AssertCodeT(optimized_code_entry);
|
||||
__ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch,
|
||||
&heal_optimized_code_slot);
|
||||
__ AssertCodeDataContainer(optimized_code_entry);
|
||||
__ JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
optimized_code_entry, scratch, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
@ -1447,7 +1447,7 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK(!AreAliased(optimized_code, closure));
|
||||
// Store code entry in the closure.
|
||||
AssertCodeT(optimized_code);
|
||||
AssertCodeDataContainer(optimized_code);
|
||||
StoreTaggedField(optimized_code,
|
||||
FieldMemOperand(closure, JSFunction::kCodeOffset));
|
||||
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
|
||||
@ -1596,16 +1596,16 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
|
||||
Check(ls, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertCodeT(Register object) {
|
||||
void MacroAssembler::AssertCodeDataContainer(Register object) {
|
||||
if (!v8_flags.debug_code) return;
|
||||
ASM_CODE_COMMENT(this);
|
||||
AssertNotSmi(object, AbortReason::kOperandIsNotACodeT);
|
||||
AssertNotSmi(object, AbortReason::kOperandIsNotACodeDataContainer);
|
||||
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
|
||||
CompareObjectType(object, temp, temp, CODET_TYPE);
|
||||
Check(eq, AbortReason::kOperandIsNotACodeT);
|
||||
CompareObjectType(object, temp, temp, CODE_DATA_CONTAINER_TYPE);
|
||||
Check(eq, AbortReason::kOperandIsNotACodeDataContainer);
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertConstructor(Register object) {
|
||||
@ -1913,7 +1913,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
Mov(x0, num_arguments);
|
||||
Mov(x1, ExternalReference::Create(f));
|
||||
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -1921,7 +1922,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
bool builtin_exit_frame) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
Mov(x1, builtin);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2146,7 +2147,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
JumpHelper(offset, rmode, cond);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
@ -2190,7 +2191,8 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode) {
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
Builtins::IsIsolateIndependentBuiltin(*code));
|
||||
BlockPoolsScope scope(this);
|
||||
@ -2201,7 +2203,7 @@ void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode) {
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(FromCodeT(*code).IsExecutable());
|
||||
DCHECK(FromCodeDataContainer(*code).IsExecutable());
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
|
||||
if (CanUseNearCallOrJump(rmode)) {
|
||||
@ -2283,7 +2285,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
EmbeddedObjectIndex index = AddEmbeddedObject(code);
|
||||
DCHECK(is_int32(index));
|
||||
near_call(static_cast<int32_t>(index), RelocInfo::CODE_TARGET);
|
||||
@ -2336,7 +2339,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
EmbeddedObjectIndex index = AddEmbeddedObject(code);
|
||||
DCHECK(is_int32(index));
|
||||
JumpHelper(static_cast<int64_t>(index), RelocInfo::CODE_TARGET, cond);
|
||||
@ -2681,10 +2685,12 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
Bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfCodeTIsMarkedForDeoptimization(
|
||||
Register codet, Register scratch, Label* if_marked_for_deoptimization) {
|
||||
void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch,
|
||||
Label* if_marked_for_deoptimization) {
|
||||
Ldr(scratch.W(),
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
FieldMemOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
|
||||
if_marked_for_deoptimization);
|
||||
}
|
||||
|
@ -974,12 +974,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
void Jump(Register target, Condition cond = al);
|
||||
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond = al);
|
||||
void Jump(const ExternalReference& reference);
|
||||
|
||||
void Call(Register target);
|
||||
void Call(Address target, RelocInfo::Mode rmode);
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
|
||||
void Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
|
||||
void Call(ExternalReference target);
|
||||
|
||||
// Generate an indirect call (for when a direct call's range is not adequate).
|
||||
@ -1897,8 +1899,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
DecodeField<Field>(reg, reg);
|
||||
}
|
||||
|
||||
void JumpIfCodeTIsMarkedForDeoptimization(
|
||||
Register codet, Register scratch, Label* if_marked_for_deoptimization);
|
||||
void JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch,
|
||||
Label* if_marked_for_deoptimization);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
Operand ReceiverOperand(const Register arg_count);
|
||||
@ -1907,8 +1910,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
|
||||
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
|
||||
|
||||
// Abort execution if argument is not a CodeT, enabled via --debug-code.
|
||||
void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE
|
||||
// Abort execution if argument is not a CodeDataContainer, enabled via
|
||||
// --debug-code.
|
||||
void AssertCodeDataContainer(Register object) NOOP_UNLESS_DEBUG_CODE
|
||||
|
||||
// Abort execution if argument is not a Constructor, enabled via
|
||||
// --debug-code.
|
||||
|
@ -264,7 +264,7 @@ void AssemblerBase::RequestHeapNumber(HeapNumberRequest request) {
|
||||
heap_number_requests_.push_front(request);
|
||||
}
|
||||
|
||||
int AssemblerBase::AddCodeTarget(Handle<CodeT> target) {
|
||||
int AssemblerBase::AddCodeTarget(Handle<CodeDataContainer> target) {
|
||||
int current = static_cast<int>(code_targets_.size());
|
||||
if (current > 0 && !target.is_null() &&
|
||||
code_targets_.back().address() == target.address()) {
|
||||
@ -276,7 +276,8 @@ int AssemblerBase::AddCodeTarget(Handle<CodeT> target) {
|
||||
}
|
||||
}
|
||||
|
||||
Handle<CodeT> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
|
||||
Handle<CodeDataContainer> AssemblerBase::GetCodeTarget(
|
||||
intptr_t code_target_index) const {
|
||||
DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
|
||||
return code_targets_[code_target_index];
|
||||
}
|
||||
|
@ -355,8 +355,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
|
||||
protected:
|
||||
// Add 'target' to the {code_targets_} vector, if necessary, and return the
|
||||
// offset at which it is stored.
|
||||
int AddCodeTarget(Handle<CodeT> target);
|
||||
Handle<CodeT> GetCodeTarget(intptr_t code_target_index) const;
|
||||
int AddCodeTarget(Handle<CodeDataContainer> target);
|
||||
Handle<CodeDataContainer> GetCodeTarget(intptr_t code_target_index) const;
|
||||
|
||||
// Add 'object' to the {embedded_objects_} vector and return the index at
|
||||
// which it is stored.
|
||||
@ -412,7 +412,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
|
||||
// guaranteed to fit in the instruction's offset field. We keep track of the
|
||||
// code handles we encounter in calls in this vector, and encode the index of
|
||||
// the code handle in the vector instead.
|
||||
std::vector<Handle<CodeT>> code_targets_;
|
||||
std::vector<Handle<CodeDataContainer>> code_targets_;
|
||||
|
||||
// If an assembler needs a small number to refer to a heap object handle
|
||||
// (for example, because there are only 32bit available on a 64bit arch), the
|
||||
|
@ -54,7 +54,7 @@ namespace internal {
|
||||
V(kOperandIsNotAFunction, "Operand is not a function") \
|
||||
V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
|
||||
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
|
||||
V(kOperandIsNotACodeT, "Operand is not a CodeT") \
|
||||
V(kOperandIsNotACodeDataContainer, "Operand is not a CodeDataContainer") \
|
||||
V(kOperandIsNotASmi, "Operand is not a smi") \
|
||||
V(kPromiseAlreadySettled, "Promise already settled") \
|
||||
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
|
||||
|
@ -16,14 +16,14 @@ class Code;
|
||||
// Associates a body of code with an interface descriptor.
|
||||
class Callable final {
|
||||
public:
|
||||
Callable(Handle<CodeT> code, CallInterfaceDescriptor descriptor)
|
||||
Callable(Handle<CodeDataContainer> code, CallInterfaceDescriptor descriptor)
|
||||
: code_(code), descriptor_(descriptor) {}
|
||||
|
||||
Handle<CodeT> code() const { return code_; }
|
||||
Handle<CodeDataContainer> code() const { return code_; }
|
||||
CallInterfaceDescriptor descriptor() const { return descriptor_; }
|
||||
|
||||
private:
|
||||
const Handle<CodeT> code_;
|
||||
const Handle<CodeDataContainer> code_;
|
||||
const CallInterfaceDescriptor descriptor_;
|
||||
};
|
||||
|
||||
|
@ -14,13 +14,15 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// static
|
||||
Handle<CodeT> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
|
||||
Handle<CodeDataContainer> CodeFactory::RuntimeCEntry(Isolate* isolate,
|
||||
int result_size) {
|
||||
return CodeFactory::CEntry(isolate, result_size);
|
||||
}
|
||||
|
||||
// static
|
||||
Handle<CodeT> CodeFactory::CEntry(Isolate* isolate, int result_size,
|
||||
ArgvMode argv_mode, bool builtin_exit_frame) {
|
||||
Handle<CodeDataContainer> CodeFactory::CEntry(Isolate* isolate, int result_size,
|
||||
ArgvMode argv_mode,
|
||||
bool builtin_exit_frame) {
|
||||
// Aliases for readability below.
|
||||
const int rs = result_size;
|
||||
const ArgvMode am = argv_mode;
|
||||
@ -254,7 +256,7 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
|
||||
|
||||
// static
|
||||
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate, result_size, ArgvMode::kRegister);
|
||||
if (result_size == 1) {
|
||||
return Callable(code, InterpreterCEntry1Descriptor{});
|
||||
|
@ -26,11 +26,12 @@ class V8_EXPORT_PRIVATE CodeFactory final {
|
||||
// stack and the arguments count is passed via register) which currently
|
||||
// can't be expressed in CallInterfaceDescriptor. Therefore only the code
|
||||
// is exported here.
|
||||
static Handle<CodeT> RuntimeCEntry(Isolate* isolate, int result_size = 1);
|
||||
static Handle<CodeDataContainer> RuntimeCEntry(Isolate* isolate,
|
||||
int result_size = 1);
|
||||
|
||||
static Handle<CodeT> CEntry(Isolate* isolate, int result_size = 1,
|
||||
ArgvMode argv_mode = ArgvMode::kStack,
|
||||
bool builtin_exit_frame = false);
|
||||
static Handle<CodeDataContainer> CEntry(Isolate* isolate, int result_size = 1,
|
||||
ArgvMode argv_mode = ArgvMode::kStack,
|
||||
bool builtin_exit_frame = false);
|
||||
|
||||
// Initial states for ICs.
|
||||
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
|
||||
|
@ -18,9 +18,9 @@ namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
template <typename CodeOrCodeT>
|
||||
struct CodeOrCodeTOps {
|
||||
Handle<CodeOrCodeT> code;
|
||||
template <typename CodeOrCodeDataContainer>
|
||||
struct CodeOrCodeDataContainerOps {
|
||||
Handle<CodeOrCodeDataContainer> code;
|
||||
|
||||
Address constant_pool() const { return code->constant_pool(); }
|
||||
Address instruction_start() const { return code->InstructionStart(); }
|
||||
@ -33,8 +33,8 @@ struct CodeOrCodeTOps {
|
||||
int code_comments_size() const { return code->code_comments_size(); }
|
||||
};
|
||||
|
||||
using CodeOps = CodeOrCodeTOps<Code>;
|
||||
using CodeTOps = CodeOrCodeTOps<CodeT>;
|
||||
using CodeOps = CodeOrCodeDataContainerOps<Code>;
|
||||
using CodeDataContainerOps = CodeOrCodeDataContainerOps<CodeDataContainer>;
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
struct WasmCodeOps {
|
||||
@ -92,21 +92,21 @@ struct CodeDescOps {
|
||||
#define HANDLE_WASM(...) UNREACHABLE()
|
||||
#endif
|
||||
|
||||
#define DISPATCH(ret, method) \
|
||||
ret CodeReference::method() const { \
|
||||
DCHECK(!is_null()); \
|
||||
switch (kind_) { \
|
||||
case Kind::CODE: \
|
||||
return CodeOps{code_}.method(); \
|
||||
case Kind::CODET: \
|
||||
return CodeTOps{codet_}.method(); \
|
||||
case Kind::WASM_CODE: \
|
||||
HANDLE_WASM(return WasmCodeOps{wasm_code_}.method()); \
|
||||
case Kind::CODE_DESC: \
|
||||
return CodeDescOps{code_desc_}.method(); \
|
||||
default: \
|
||||
UNREACHABLE(); \
|
||||
} \
|
||||
#define DISPATCH(ret, method) \
|
||||
ret CodeReference::method() const { \
|
||||
DCHECK(!is_null()); \
|
||||
switch (kind_) { \
|
||||
case Kind::CODE: \
|
||||
return CodeOps{code_}.method(); \
|
||||
case Kind::CODE_DATA_CONTAINER: \
|
||||
return CodeDataContainerOps{code_data_container_}.method(); \
|
||||
case Kind::WASM_CODE: \
|
||||
HANDLE_WASM(return WasmCodeOps{wasm_code_}.method()); \
|
||||
case Kind::CODE_DESC: \
|
||||
return CodeDescOps{code_desc_}.method(); \
|
||||
default: \
|
||||
UNREACHABLE(); \
|
||||
} \
|
||||
}
|
||||
|
||||
DISPATCH(Address, constant_pool)
|
||||
|
@ -28,8 +28,9 @@ class CodeReference {
|
||||
explicit CodeReference(const CodeDesc* code_desc)
|
||||
: kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
|
||||
explicit CodeReference(Handle<Code> code) : kind_(Kind::CODE), code_(code) {}
|
||||
explicit CodeReference(Handle<CodeT> codet)
|
||||
: kind_(Kind::CODET), codet_(codet) {}
|
||||
explicit CodeReference(Handle<CodeDataContainer> code_data_container)
|
||||
: kind_(Kind::CODE_DATA_CONTAINER),
|
||||
code_data_container_(code_data_container) {}
|
||||
|
||||
Address constant_pool() const;
|
||||
Address instruction_start() const;
|
||||
@ -43,7 +44,9 @@ class CodeReference {
|
||||
|
||||
bool is_null() const { return kind_ == Kind::NONE; }
|
||||
bool is_code() const { return kind_ == Kind::CODE; }
|
||||
bool is_codet() const { return kind_ == Kind::CODET; }
|
||||
bool is_code_data_container() const {
|
||||
return kind_ == Kind::CODE_DATA_CONTAINER;
|
||||
}
|
||||
bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; }
|
||||
|
||||
Handle<Code> as_code() const {
|
||||
@ -51,9 +54,9 @@ class CodeReference {
|
||||
return code_;
|
||||
}
|
||||
|
||||
Handle<CodeT> as_codet() const {
|
||||
DCHECK_EQ(Kind::CODET, kind_);
|
||||
return codet_;
|
||||
Handle<CodeDataContainer> as_code_data_container() const {
|
||||
DCHECK_EQ(Kind::CODE_DATA_CONTAINER, kind_);
|
||||
return code_data_container_;
|
||||
}
|
||||
|
||||
const wasm::WasmCode* as_wasm_code() const {
|
||||
@ -62,13 +65,19 @@ class CodeReference {
|
||||
}
|
||||
|
||||
private:
|
||||
enum class Kind { NONE, CODE, CODET, WASM_CODE, CODE_DESC } kind_;
|
||||
enum class Kind {
|
||||
NONE,
|
||||
CODE,
|
||||
CODE_DATA_CONTAINER,
|
||||
WASM_CODE,
|
||||
CODE_DESC
|
||||
} kind_;
|
||||
union {
|
||||
std::nullptr_t null_;
|
||||
const wasm::WasmCode* wasm_code_;
|
||||
const CodeDesc* code_desc_;
|
||||
Handle<Code> code_;
|
||||
Handle<CodeT> codet_;
|
||||
Handle<CodeDataContainer> code_data_container_;
|
||||
};
|
||||
|
||||
DISALLOW_NEW_AND_DELETE()
|
||||
|
@ -3145,19 +3145,19 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
|
||||
Label check_for_interpreter_data(this, &var_result);
|
||||
Label done(this, &var_result);
|
||||
|
||||
GotoIfNot(HasInstanceType(var_result.value(), CODET_TYPE),
|
||||
GotoIfNot(HasInstanceType(var_result.value(), CODE_DATA_CONTAINER_TYPE),
|
||||
&check_for_interpreter_data);
|
||||
{
|
||||
TNode<CodeT> code = CAST(var_result.value());
|
||||
TNode<CodeDataContainer> code = CAST(var_result.value());
|
||||
#ifdef DEBUG
|
||||
TNode<Int32T> code_flags =
|
||||
LoadObjectField<Int32T>(code, CodeT::kFlagsOffset);
|
||||
CSA_DCHECK(
|
||||
this, Word32Equal(DecodeWord32<CodeT::KindField>(code_flags),
|
||||
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
|
||||
LoadObjectField<Int32T>(code, CodeDataContainer::kFlagsOffset);
|
||||
CSA_DCHECK(this, Word32Equal(
|
||||
DecodeWord32<CodeDataContainer::KindField>(code_flags),
|
||||
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
|
||||
#endif // DEBUG
|
||||
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
|
||||
FromCodeTNonBuiltin(code),
|
||||
FromCodeDataContainerNonBuiltin(code),
|
||||
Code::kDeoptimizationDataOrInterpreterDataOffset);
|
||||
var_result = baseline_data;
|
||||
}
|
||||
@ -15463,7 +15463,7 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
|
||||
return Word32NotEqual(flags, Int32Constant(0));
|
||||
}
|
||||
|
||||
TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
|
||||
TNode<CodeDataContainer> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
|
||||
CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
|
||||
|
||||
TNode<IntPtrT> offset =
|
||||
@ -15475,13 +15475,13 @@ TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
|
||||
return CAST(BitcastWordToTagged(Load<RawPtrT>(table, offset)));
|
||||
}
|
||||
|
||||
TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
TNode<CodeDataContainer> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
TNode<SharedFunctionInfo> shared_info, TVariable<Uint16T>* data_type_out,
|
||||
Label* if_compile_lazy) {
|
||||
TNode<Object> sfi_data =
|
||||
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
|
||||
|
||||
TVARIABLE(CodeT, sfi_code);
|
||||
TVARIABLE(CodeDataContainer, sfi_code);
|
||||
|
||||
Label done(this);
|
||||
Label check_instance_type(this);
|
||||
@ -15507,7 +15507,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
|
||||
int32_t case_values[] = {
|
||||
BYTECODE_ARRAY_TYPE,
|
||||
CODET_TYPE,
|
||||
CODE_DATA_CONTAINER_TYPE,
|
||||
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
|
||||
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
|
||||
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE,
|
||||
@ -15557,7 +15557,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
// IsBaselineData: Execute baseline code
|
||||
BIND(&check_is_baseline_data);
|
||||
{
|
||||
TNode<CodeT> baseline_code = CAST(sfi_data);
|
||||
TNode<CodeDataContainer> baseline_code = CAST(sfi_data);
|
||||
sfi_code = baseline_code;
|
||||
Goto(&done);
|
||||
}
|
||||
@ -15579,7 +15579,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
CSA_DCHECK(this,
|
||||
Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
|
||||
{
|
||||
TNode<CodeT> trampoline =
|
||||
TNode<CodeDataContainer> trampoline =
|
||||
LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data));
|
||||
sfi_code = trampoline;
|
||||
}
|
||||
@ -15607,21 +15607,22 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
return sfi_code.value();
|
||||
}
|
||||
|
||||
TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeT> code) {
|
||||
TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeDataContainer> code) {
|
||||
return LoadObjectField<RawPtrT>(
|
||||
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset));
|
||||
}
|
||||
|
||||
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(TNode<CodeT> codet) {
|
||||
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(
|
||||
TNode<CodeDataContainer> code_data_container) {
|
||||
return IsSetWord32<Code::MarkedForDeoptimizationField>(
|
||||
LoadObjectField<Int32T>(codet,
|
||||
LoadObjectField<Int32T>(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
}
|
||||
|
||||
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
|
||||
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
|
||||
TNode<Context> context) {
|
||||
const TNode<CodeT> code = GetSharedFunctionInfoCode(shared_info);
|
||||
const TNode<CodeDataContainer> code = GetSharedFunctionInfoCode(shared_info);
|
||||
|
||||
// TODO(ishell): All the callers of this function pass map loaded from
|
||||
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
|
||||
|
@ -834,16 +834,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
|
||||
void FastCheck(TNode<BoolT> condition);
|
||||
|
||||
TNode<BoolT> IsCodeTMap(TNode<Map> map) {
|
||||
return IsCodeDataContainerMap(map);
|
||||
}
|
||||
TNode<BoolT> IsCodeT(TNode<HeapObject> object) {
|
||||
return IsCodeTMap(LoadMap(object));
|
||||
}
|
||||
|
||||
// TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
|
||||
// is cached in or moved to CodeT.
|
||||
TNode<Code> FromCodeTNonBuiltin(TNode<CodeT> code) {
|
||||
// is cached in or moved to CodeDataContainer.
|
||||
TNode<Code> FromCodeDataContainerNonBuiltin(TNode<CodeDataContainer> code) {
|
||||
// Compute the Code object pointer from the code entry point.
|
||||
TNode<RawPtrT> code_entry = Load<RawPtrT>(
|
||||
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
|
||||
@ -853,18 +846,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
return CAST(o);
|
||||
}
|
||||
|
||||
TNode<CodeT> ToCodeT(TNode<Code> code) {
|
||||
TNode<CodeDataContainer> ToCodeDataContainer(TNode<Code> code) {
|
||||
return LoadObjectField<CodeDataContainer>(code,
|
||||
Code::kCodeDataContainerOffset);
|
||||
}
|
||||
|
||||
TNode<CodeT> ToCodeT(TNode<Code> code,
|
||||
TNode<CodeDataContainer> code_data_container) {
|
||||
return code_data_container;
|
||||
}
|
||||
|
||||
TNode<RawPtrT> GetCodeEntry(TNode<CodeT> code);
|
||||
TNode<BoolT> IsMarkedForDeoptimization(TNode<CodeT> codet);
|
||||
TNode<RawPtrT> GetCodeEntry(TNode<CodeDataContainer> code);
|
||||
TNode<BoolT> IsMarkedForDeoptimization(
|
||||
TNode<CodeDataContainer> code_data_container);
|
||||
|
||||
// The following Call wrappers call an object according to the semantics that
|
||||
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
|
||||
@ -3862,7 +3851,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
ElementsKind kind = HOLEY_ELEMENTS);
|
||||
|
||||
// Load a builtin's code from the builtin array in the isolate.
|
||||
TNode<CodeT> LoadBuiltin(TNode<Smi> builtin_id);
|
||||
TNode<CodeDataContainer> LoadBuiltin(TNode<Smi> builtin_id);
|
||||
|
||||
// Figure out the SFI's code object using its data field.
|
||||
// If |data_type_out| is provided, the instance type of the function data will
|
||||
@ -3870,7 +3859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
// data_type_out will be set to 0.
|
||||
// If |if_compile_lazy| is provided then the execution will go to the given
|
||||
// label in case of an CompileLazy code object.
|
||||
TNode<CodeT> GetSharedFunctionInfoCode(
|
||||
TNode<CodeDataContainer> GetSharedFunctionInfoCode(
|
||||
TNode<SharedFunctionInfo> shared_info,
|
||||
TVariable<Uint16T>* data_type_out = nullptr,
|
||||
Label* if_compile_lazy = nullptr);
|
||||
|
@ -652,7 +652,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
|
||||
INTERPRETER_DATA_TYPE, AllocationType::kOld));
|
||||
|
||||
interpreter_data->set_bytecode_array(*bytecode_array);
|
||||
interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
|
||||
interpreter_data->set_interpreter_trampoline(ToCodeDataContainer(*code));
|
||||
|
||||
shared_info->set_interpreter_data(*interpreter_data);
|
||||
|
||||
@ -922,7 +922,7 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
|
||||
// A wrapper to access the optimized code cache slots on the feedback vector.
|
||||
class OptimizedCodeCache : public AllStatic {
|
||||
public:
|
||||
static V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> Get(
|
||||
static V8_WARN_UNUSED_RESULT MaybeHandle<CodeDataContainer> Get(
|
||||
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
|
||||
CodeKind code_kind) {
|
||||
if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {};
|
||||
@ -932,13 +932,13 @@ class OptimizedCodeCache : public AllStatic {
|
||||
SharedFunctionInfo shared = function->shared();
|
||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
|
||||
|
||||
CodeT code;
|
||||
CodeDataContainer code;
|
||||
FeedbackVector feedback_vector = function->feedback_vector();
|
||||
if (IsOSR(osr_offset)) {
|
||||
Handle<BytecodeArray> bytecode(shared.GetBytecodeArray(isolate), isolate);
|
||||
interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt());
|
||||
DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop);
|
||||
base::Optional<CodeT> maybe_code =
|
||||
base::Optional<CodeDataContainer> maybe_code =
|
||||
feedback_vector.GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
|
||||
if (maybe_code.has_value()) code = maybe_code.value();
|
||||
} else {
|
||||
@ -961,7 +961,7 @@ class OptimizedCodeCache : public AllStatic {
|
||||
}
|
||||
|
||||
static void Insert(Isolate* isolate, JSFunction function,
|
||||
BytecodeOffset osr_offset, CodeT code,
|
||||
BytecodeOffset osr_offset, CodeDataContainer code,
|
||||
bool is_function_context_specializing) {
|
||||
const CodeKind kind = code.kind();
|
||||
if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
|
||||
@ -1052,7 +1052,7 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
OptimizedCodeCache::Insert(isolate, *compilation_info->closure(),
|
||||
compilation_info->osr_offset(),
|
||||
ToCodeT(*compilation_info->code()),
|
||||
ToCodeDataContainer(*compilation_info->code()),
|
||||
compilation_info->function_context_specializing());
|
||||
job->RecordFunctionCompilation(LogEventListener::CodeTag::kFunction, isolate);
|
||||
return true;
|
||||
@ -1128,12 +1128,10 @@ bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
|
||||
}
|
||||
}
|
||||
|
||||
MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
|
||||
Handle<JSFunction> function,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
ConcurrencyMode mode,
|
||||
BytecodeOffset osr_offset,
|
||||
CompileResultBehavior result_behavior) {
|
||||
MaybeHandle<CodeDataContainer> CompileTurbofan(
|
||||
Isolate* isolate, Handle<JSFunction> function,
|
||||
Handle<SharedFunctionInfo> shared, ConcurrencyMode mode,
|
||||
BytecodeOffset osr_offset, CompileResultBehavior result_behavior) {
|
||||
VMState<COMPILER> state(isolate);
|
||||
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
|
||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
|
||||
@ -1165,7 +1163,7 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
|
||||
} else {
|
||||
DCHECK(IsSynchronous(mode));
|
||||
if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
|
||||
return ToCodeT(job->compilation_info()->code(), isolate);
|
||||
return ToCodeDataContainer(job->compilation_info()->code(), isolate);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1178,10 +1176,11 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
|
||||
void RecordMaglevFunctionCompilation(Isolate* isolate,
|
||||
Handle<JSFunction> function) {
|
||||
PtrComprCageBase cage_base(isolate);
|
||||
// TODO(v8:13261): We should be able to pass a CodeT AbstractCode in here, but
|
||||
// LinuxPerfJitLogger only supports Code AbstractCode.
|
||||
// TODO(v8:13261): We should be able to pass a CodeDataContainer AbstractCode
|
||||
// in here, but LinuxPerfJitLogger only supports Code AbstractCode.
|
||||
Handle<AbstractCode> abstract_code(
|
||||
AbstractCode::cast(FromCodeT(function->code(cage_base))), isolate);
|
||||
AbstractCode::cast(FromCodeDataContainer(function->code(cage_base))),
|
||||
isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(cage_base), isolate);
|
||||
Handle<Script> script(Script::cast(shared->script(cage_base)), isolate);
|
||||
Handle<FeedbackVector> feedback_vector(function->feedback_vector(cage_base),
|
||||
@ -1197,10 +1196,9 @@ void RecordMaglevFunctionCompilation(Isolate* isolate,
|
||||
}
|
||||
#endif // V8_ENABLE_MAGLEV
|
||||
|
||||
MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
|
||||
ConcurrencyMode mode,
|
||||
BytecodeOffset osr_offset,
|
||||
CompileResultBehavior result_behavior) {
|
||||
MaybeHandle<CodeDataContainer> CompileMaglev(
|
||||
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
|
||||
BytecodeOffset osr_offset, CompileResultBehavior result_behavior) {
|
||||
#ifdef V8_ENABLE_MAGLEV
|
||||
DCHECK(v8_flags.maglev);
|
||||
// TODO(v8:7700): Add missing support.
|
||||
@ -1268,7 +1266,7 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
|
||||
#endif // V8_ENABLE_MAGLEV
|
||||
}
|
||||
|
||||
MaybeHandle<CodeT> GetOrCompileOptimized(
|
||||
MaybeHandle<CodeDataContainer> GetOrCompileOptimized(
|
||||
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
|
||||
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
|
||||
CompileResultBehavior result_behavior = CompileResultBehavior::kDefault) {
|
||||
@ -1298,7 +1296,7 @@ MaybeHandle<CodeT> GetOrCompileOptimized(
|
||||
// turbo_filter.
|
||||
if (!ShouldOptimize(code_kind, shared)) return {};
|
||||
|
||||
Handle<CodeT> cached_code;
|
||||
Handle<CodeDataContainer> cached_code;
|
||||
if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
|
||||
.ToHandle(&cached_code)) {
|
||||
return cached_code;
|
||||
@ -2566,7 +2564,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
|
||||
}
|
||||
|
||||
DCHECK(is_compiled_scope->is_compiled());
|
||||
Handle<CodeT> code = handle(shared_info->GetCode(), isolate);
|
||||
Handle<CodeDataContainer> code = handle(shared_info->GetCode(), isolate);
|
||||
|
||||
// Initialize the feedback cell for this JSFunction and reset the interrupt
|
||||
// budget for feedback vector allocation even if there is a closure feedback
|
||||
@ -2595,7 +2593,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
|
||||
concurrency_mode, code_kind);
|
||||
}
|
||||
|
||||
Handle<CodeT> maybe_code;
|
||||
Handle<CodeDataContainer> maybe_code;
|
||||
if (GetOrCompileOptimized(isolate, function, concurrency_mode, code_kind)
|
||||
.ToHandle(&maybe_code)) {
|
||||
code = maybe_code;
|
||||
@ -2648,7 +2646,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
|
||||
// report these somehow, or silently ignore them?
|
||||
return false;
|
||||
}
|
||||
shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
|
||||
shared->set_baseline_code(ToCodeDataContainer(*code), kReleaseStore);
|
||||
}
|
||||
double time_taken_ms = time_taken.InMillisecondsF();
|
||||
|
||||
@ -2676,7 +2674,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
|
||||
// Baseline code needs a feedback vector.
|
||||
JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
|
||||
|
||||
CodeT baseline_code = shared->baseline_code(kAcquireLoad);
|
||||
CodeDataContainer baseline_code = shared->baseline_code(kAcquireLoad);
|
||||
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
|
||||
function->set_code(baseline_code);
|
||||
return true;
|
||||
@ -2720,7 +2718,7 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
|
||||
code_kind);
|
||||
}
|
||||
|
||||
Handle<CodeT> code;
|
||||
Handle<CodeDataContainer> code;
|
||||
if (GetOrCompileOptimized(isolate, function, mode, code_kind)
|
||||
.ToHandle(&code)) {
|
||||
function->set_code(*code, kReleaseStore);
|
||||
@ -3854,10 +3852,9 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
|
||||
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
|
||||
|
||||
// static
|
||||
MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
|
||||
Handle<JSFunction> function,
|
||||
BytecodeOffset osr_offset,
|
||||
ConcurrencyMode mode) {
|
||||
MaybeHandle<CodeDataContainer> Compiler::CompileOptimizedOSR(
|
||||
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
|
||||
ConcurrencyMode mode) {
|
||||
DCHECK(IsOSR(osr_offset));
|
||||
|
||||
if (V8_UNLIKELY(isolate->serializer_enabled())) return {};
|
||||
@ -3880,7 +3877,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
|
||||
function->feedback_vector().reset_osr_urgency();
|
||||
|
||||
CompilerTracer::TraceOptimizeOSRStarted(isolate, function, osr_offset, mode);
|
||||
MaybeHandle<CodeT> result = GetOrCompileOptimized(
|
||||
MaybeHandle<CodeDataContainer> result = GetOrCompileOptimized(
|
||||
isolate, function, mode, CodeKind::TURBOFAN, osr_offset);
|
||||
|
||||
if (result.is_null()) {
|
||||
@ -3944,7 +3941,8 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
|
||||
ResetTieringState(*function, osr_offset);
|
||||
OptimizedCodeCache::Insert(
|
||||
isolate, *compilation_info->closure(),
|
||||
compilation_info->osr_offset(), ToCodeT(*compilation_info->code()),
|
||||
compilation_info->osr_offset(),
|
||||
ToCodeDataContainer(*compilation_info->code()),
|
||||
compilation_info->function_context_specializing());
|
||||
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
|
||||
if (IsOSR(osr_offset)) {
|
||||
@ -4034,7 +4032,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
|
||||
// deoptimized code just before installing it on the funciton.
|
||||
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
|
||||
*shared, "new function from shared function info");
|
||||
CodeT code = function->feedback_vector().optimized_code();
|
||||
CodeDataContainer code = function->feedback_vector().optimized_code();
|
||||
if (!code.is_null()) {
|
||||
// Caching of optimized code enabled and optimized code found.
|
||||
DCHECK(!code.marked_for_deoptimization());
|
||||
|
@ -95,9 +95,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
|
||||
// Generate and return optimized code for OSR. The empty handle is returned
|
||||
// either on failure, or after spawning a concurrent OSR task (in which case
|
||||
// a future OSR request will pick up the resulting code object).
|
||||
V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> CompileOptimizedOSR(
|
||||
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
|
||||
ConcurrencyMode mode);
|
||||
V8_WARN_UNUSED_RESULT static MaybeHandle<CodeDataContainer>
|
||||
CompileOptimizedOSR(Isolate* isolate, Handle<JSFunction> function,
|
||||
BytecodeOffset osr_offset, ConcurrencyMode mode);
|
||||
|
||||
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
|
||||
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
|
||||
|
@ -166,7 +166,7 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
|
||||
emit(x);
|
||||
}
|
||||
|
||||
void Assembler::emit(Handle<CodeT> code, RelocInfo::Mode rmode) {
|
||||
void Assembler::emit(Handle<CodeDataContainer> code, RelocInfo::Mode rmode) {
|
||||
emit(code.address(), rmode);
|
||||
}
|
||||
|
||||
|
@ -1639,7 +1639,7 @@ void Assembler::call(Operand adr) {
|
||||
emit_operand(edx, adr);
|
||||
}
|
||||
|
||||
void Assembler::call(Handle<CodeT> code, RelocInfo::Mode rmode) {
|
||||
void Assembler::call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode) {
|
||||
EnsureSpace ensure_space(this);
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
EMIT(0xE8);
|
||||
@ -1709,7 +1709,7 @@ void Assembler::jmp(Operand adr) {
|
||||
emit_operand(esp, adr);
|
||||
}
|
||||
|
||||
void Assembler::jmp(Handle<CodeT> code, RelocInfo::Mode rmode) {
|
||||
void Assembler::jmp(Handle<CodeDataContainer> code, RelocInfo::Mode rmode) {
|
||||
EnsureSpace ensure_space(this);
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
EMIT(0xE9);
|
||||
@ -1769,7 +1769,8 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
|
||||
emit(entry - (pc_ + sizeof(int32_t)), rmode);
|
||||
}
|
||||
|
||||
void Assembler::j(Condition cc, Handle<CodeT> code, RelocInfo::Mode rmode) {
|
||||
void Assembler::j(Condition cc, Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode) {
|
||||
EnsureSpace ensure_space(this);
|
||||
// 0000 1111 1000 tttn #32-bit disp
|
||||
EMIT(0x0F);
|
||||
|
@ -746,7 +746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void call(Address entry, RelocInfo::Mode rmode);
|
||||
void call(Register reg) { call(Operand(reg)); }
|
||||
void call(Operand adr);
|
||||
void call(Handle<CodeT> code, RelocInfo::Mode rmode);
|
||||
void call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode);
|
||||
void wasm_call(Address address, RelocInfo::Mode rmode);
|
||||
|
||||
// Jumps
|
||||
@ -755,7 +755,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void jmp(Address entry, RelocInfo::Mode rmode);
|
||||
void jmp(Register reg) { jmp(Operand(reg)); }
|
||||
void jmp(Operand adr);
|
||||
void jmp(Handle<CodeT> code, RelocInfo::Mode rmode);
|
||||
void jmp(Handle<CodeDataContainer> code, RelocInfo::Mode rmode);
|
||||
// Unconditional jump relative to the current address. Low-level routine,
|
||||
// use with caution!
|
||||
void jmp_rel(int offset);
|
||||
@ -763,7 +763,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// Conditional jumps
|
||||
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
|
||||
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
|
||||
void j(Condition cc, Handle<CodeT> code,
|
||||
void j(Condition cc, Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
|
||||
|
||||
// Floating-point operations
|
||||
@ -1688,7 +1688,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
inline void emit(uint32_t x);
|
||||
inline void emit(Handle<HeapObject> handle);
|
||||
inline void emit(uint32_t x, RelocInfo::Mode rmode);
|
||||
inline void emit(Handle<CodeT> code, RelocInfo::Mode rmode);
|
||||
inline void emit(Handle<CodeDataContainer> code, RelocInfo::Mode rmode);
|
||||
inline void emit(const Immediate& x);
|
||||
inline void emit_b(Immediate x);
|
||||
inline void emit_w(const Immediate& x);
|
||||
|
@ -706,8 +706,10 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
|
||||
CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet) {
|
||||
test(FieldOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container) {
|
||||
test(FieldOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
|
||||
@ -740,7 +742,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, bailout to a
|
||||
// given label.
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry);
|
||||
__ j(not_zero, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure
|
||||
@ -1282,7 +1284,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
|
||||
Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -1314,7 +1317,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
|
||||
ASM_CODE_COMMENT(this);
|
||||
// Set the entry point and jump to the C entry runtime stub.
|
||||
Move(kRuntimeCallFunctionRegister, Immediate(ext));
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -1964,7 +1967,8 @@ void TurboAssembler::PushPC() {
|
||||
bind(&get_pc);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code_object,
|
||||
RelocInfo::Mode rmode) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||
@ -2012,7 +2016,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
||||
call(EntryFromBuiltinAsOperand(builtin));
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
call(code, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2033,7 +2038,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
jmp(EntryFromBuiltinAsOperand(builtin));
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
jmp(code, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2089,7 +2095,8 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
|
||||
isolate(), reference)));
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
|
||||
RelocInfo::Mode rmode) {
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||
Builtin builtin = Builtin::kNoBuiltinId;
|
||||
|
@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||
void Call(Register reg) { call(reg); }
|
||||
void Call(Operand op) { call(op); }
|
||||
void Call(Label* target) { call(target); }
|
||||
void Call(Handle<CodeT> code_object, RelocInfo::Mode rmode);
|
||||
void Call(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
|
||||
|
||||
// Load the builtin given by the Smi in |builtin_index| into the same
|
||||
// register.
|
||||
@ -172,7 +172,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||
JumpMode jump_mode = JumpMode::kJump);
|
||||
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode);
|
||||
void Jump(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
|
||||
|
||||
void LoadMap(Register destination, Register object);
|
||||
|
||||
@ -559,7 +559,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
and_(reg, Immediate(mask));
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container);
|
||||
Immediate ClearedValue() const;
|
||||
|
||||
// Tiering support.
|
||||
|
@ -137,7 +137,7 @@ Address RelocInfo::target_internal_reference_address() {
|
||||
return pc_;
|
||||
}
|
||||
|
||||
Handle<CodeT> Assembler::relative_code_target_object_handle_at(
|
||||
Handle<CodeDataContainer> Assembler::relative_code_target_object_handle_at(
|
||||
Address pc) const {
|
||||
Instr instr = Assembler::instr_at(pc);
|
||||
int32_t code_target_index = instr & kImm26Mask;
|
||||
|
@ -824,7 +824,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void CheckTrampolinePool();
|
||||
|
||||
// Get the code target object for a pc-relative call or jump.
|
||||
V8_INLINE Handle<CodeT> relative_code_target_object_handle_at(
|
||||
V8_INLINE Handle<CodeDataContainer> relative_code_target_object_handle_at(
|
||||
Address pc_) const;
|
||||
|
||||
inline int UnboundLabelsCount() { return unbound_labels_count_; }
|
||||
|
@ -2584,7 +2584,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rj, const Operand& rk) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
@ -2659,7 +2659,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
bind(&skip);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rj, const Operand& rk) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
Builtin builtin = Builtin::kNoBuiltinId;
|
||||
@ -2718,7 +2718,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
|
||||
bl(code_target_index);
|
||||
@ -2754,7 +2755,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
|
||||
b(code_target_index);
|
||||
@ -3003,12 +3005,11 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
|
||||
Branch(stack_overflow, le, scratch1, Operand(scratch2));
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
|
||||
Register scratch,
|
||||
Condition cond,
|
||||
Label* target) {
|
||||
Ld_wu(scratch,
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
Register code_data_container, Register scratch, Condition cond,
|
||||
Label* target) {
|
||||
Ld_wu(scratch, FieldMemOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Branch(target, cond, scratch, Operand(zero_reg));
|
||||
}
|
||||
@ -3373,7 +3374,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
PrepareCEntryArgs(num_arguments);
|
||||
PrepareCEntryFunction(ExternalReference::Create(f));
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -3390,7 +3392,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
|
||||
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
bool builtin_exit_frame) {
|
||||
PrepareCEntryFunction(builtin);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
|
||||
}
|
||||
@ -4195,8 +4197,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, a6, ne,
|
||||
&heal_optimized_code_slot);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
optimized_code_entry, a6, ne, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
|
@ -188,12 +188,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// it to register use ld_d, it can be used in wasm jump table for concurrent
|
||||
// patching.
|
||||
void PatchAndJump(Address target);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Call(Register target, COND_ARGS);
|
||||
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
COND_ARGS);
|
||||
void Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS);
|
||||
void Call(Label* target);
|
||||
|
||||
// Load the builtin given by the Smi in |builtin_index| into the same
|
||||
@ -832,9 +832,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
// less efficient form using xor instead of mov is emitted.
|
||||
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
|
||||
Register scratch,
|
||||
Condition cond, Label* target);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
Register code_data_container, Register scratch, Condition cond,
|
||||
Label* target);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
void PushRoot(RootIndex index) {
|
||||
|
@ -4326,7 +4326,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
@ -4399,7 +4399,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Call(t9, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
@ -4454,7 +4454,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
||||
break;
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
IndirectLoadConstant(temp, code);
|
||||
CallCodeDataContainerObject(temp);
|
||||
break;
|
||||
@ -4482,7 +4483,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
break;
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
IndirectLoadConstant(temp, code);
|
||||
JumpCodeDataContainerObject(temp);
|
||||
break;
|
||||
@ -4919,12 +4921,11 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
|
||||
Branch(stack_overflow, le, scratch1, Operand(scratch2));
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
|
||||
Register scratch,
|
||||
Condition cond,
|
||||
Label* target) {
|
||||
Lwu(scratch,
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
Register code_data_container, Register scratch, Condition cond,
|
||||
Label* target) {
|
||||
Lwu(scratch, FieldMemOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Branch(target, cond, scratch, Operand(zero_reg));
|
||||
}
|
||||
@ -5277,7 +5278,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
PrepareCEntryArgs(num_arguments);
|
||||
PrepareCEntryFunction(ExternalReference::Create(f));
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -5295,7 +5297,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
BranchDelaySlot bd,
|
||||
bool builtin_exit_frame) {
|
||||
PrepareCEntryFunction(builtin);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
|
||||
}
|
||||
@ -6242,8 +6244,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1,
|
||||
ne, &heal_optimized_code_slot);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
optimized_code_entry, scratch1, ne, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
|
@ -244,12 +244,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// it to register use ld, it can be used in wasm jump table for concurrent
|
||||
// patching.
|
||||
void PatchAndJump(Address target);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Call(Register target, COND_ARGS);
|
||||
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
COND_ARGS);
|
||||
void Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS);
|
||||
void Call(Label* target);
|
||||
void LoadAddress(Register dst, Label* target);
|
||||
|
||||
@ -1005,9 +1005,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
// less efficient form using xor instead of mov is emitted.
|
||||
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
|
||||
Register scratch,
|
||||
Condition cond, Label* target);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimizationAndJump(
|
||||
Register code_data_container, Register scratch, Condition cond,
|
||||
Label* target);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
void PushRoot(RootIndex index) {
|
||||
|
@ -189,7 +189,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond, cr);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond, CRegister cr) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
@ -252,7 +252,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
|
||||
bctrl();
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
@ -293,7 +293,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
Call(static_cast<Address>(code_target_index), RelocInfo::CODE_TARGET,
|
||||
cond);
|
||||
@ -336,7 +337,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond,
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
Jump(static_cast<intptr_t>(code_target_index), RelocInfo::CODE_TARGET,
|
||||
cond, cr);
|
||||
@ -363,11 +365,11 @@ void TurboAssembler::Drop(Register count, Register scratch) {
|
||||
add(sp, sp, scratch);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch1, Register scratch2) {
|
||||
LoadS32(scratch1,
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
FieldMemOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
scratch2);
|
||||
TestBit(scratch1, Code::kMarkedForDeoptimizationBit, scratch2);
|
||||
}
|
||||
@ -2044,8 +2046,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// runtime to clear it.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire(),
|
||||
scratch);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry,
|
||||
temps.Acquire(), scratch);
|
||||
__ bne(&heal_optimized_code_slot, cr0);
|
||||
}
|
||||
|
||||
@ -2185,9 +2187,10 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
mov(r3, Operand(num_arguments));
|
||||
Move(r4, ExternalReference::Create(f));
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
#else
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), 1);
|
||||
Handle<CodeDataContainer> code = CodeFactory::CEntry(isolate(), 1);
|
||||
#endif
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -2204,7 +2207,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
|
||||
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
bool builtin_exit_frame) {
|
||||
Move(r4, builtin);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
@ -716,14 +716,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Jump(Register target);
|
||||
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
|
||||
CRegister cr = cr7);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al,
|
||||
CRegister cr = cr7);
|
||||
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond = al, CRegister cr = cr7);
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
|
||||
CRegister cr = cr7);
|
||||
void Call(Register target);
|
||||
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
void Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
Condition cond = al);
|
||||
void Call(Label* target);
|
||||
|
||||
@ -1723,8 +1724,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
DecodeField<Field>(reg, reg, rc);
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch1,
|
||||
Register scratch2);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch1, Register scratch2);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
private:
|
||||
|
@ -114,8 +114,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1,
|
||||
&heal_optimized_code_slot);
|
||||
__ JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
optimized_code_entry, scratch1, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
@ -4448,7 +4448,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
}
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
EmbeddedObjectIndex index = AddEmbeddedObject(code);
|
||||
DCHECK(is_int32(index));
|
||||
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET,
|
||||
@ -5733,10 +5734,12 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
|
||||
Branch(smi_label, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfCodeTIsMarkedForDeoptimization(
|
||||
Register codet, Register scratch, Label* if_marked_for_deoptimization) {
|
||||
void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch,
|
||||
Label* if_marked_for_deoptimization) {
|
||||
LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
|
||||
scratch,
|
||||
FieldMemOperand(code_data_container, Code::kCodeDataContainerOffset));
|
||||
Lw(scratch,
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
|
@ -1475,8 +1475,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
ArgumentsCountType type,
|
||||
ArgumentsCountMode mode,
|
||||
Register scratch = no_reg);
|
||||
void JumpIfCodeTIsMarkedForDeoptimization(
|
||||
Register codet, Register scratch, Label* if_marked_for_deoptimization);
|
||||
void JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch,
|
||||
Label* if_marked_for_deoptimization);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
// Jump if the register contains a non-smi.
|
||||
|
@ -689,7 +689,7 @@ void Assembler::EnsureSpaceFor(int space_needed) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::call(Handle<CodeT> target, RelocInfo::Mode rmode) {
|
||||
void Assembler::call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
EnsureSpace ensure_space(this);
|
||||
|
||||
@ -698,7 +698,7 @@ void Assembler::call(Handle<CodeT> target, RelocInfo::Mode rmode) {
|
||||
brasl(r14, Operand(target_index));
|
||||
}
|
||||
|
||||
void Assembler::jump(Handle<CodeT> target, RelocInfo::Mode rmode,
|
||||
void Assembler::jump(Handle<CodeDataContainer> target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
|
||||
EnsureSpace ensure_space(this);
|
||||
|
@ -1073,8 +1073,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
basr(r14, r1);
|
||||
}
|
||||
|
||||
void call(Handle<CodeT> target, RelocInfo::Mode rmode);
|
||||
void jump(Handle<CodeT> target, RelocInfo::Mode rmode, Condition cond);
|
||||
void call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode);
|
||||
void jump(Handle<CodeDataContainer> target, RelocInfo::Mode rmode,
|
||||
Condition cond);
|
||||
|
||||
// S390 instruction generation
|
||||
#define DECLARE_VRR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
|
||||
|
@ -416,7 +416,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
Jump(static_cast<intptr_t>(target), rmode, cond);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
@ -469,7 +469,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
|
||||
basr(r14, ip);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
|
||||
|
||||
@ -502,7 +502,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
|
||||
Call(ip);
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
call(code, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -528,7 +529,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
|
||||
} else {
|
||||
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
|
||||
@ -557,10 +559,11 @@ void TurboAssembler::Drop(Register count, Register scratch) {
|
||||
AddS64(sp, sp, scratch);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
|
||||
Register scratch) {
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch) {
|
||||
LoadS32(scratch,
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
FieldMemOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
TestBit(scratch, Code::kMarkedForDeoptimizationBit, scratch);
|
||||
}
|
||||
|
||||
@ -2043,7 +2046,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
{
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry,
|
||||
scratch);
|
||||
__ bne(&heal_optimized_code_slot);
|
||||
}
|
||||
|
||||
@ -2182,9 +2186,10 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
mov(r2, Operand(num_arguments));
|
||||
Move(r3, ExternalReference::Create(f));
|
||||
#if V8_TARGET_ARCH_S390X
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
#else
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), 1);
|
||||
Handle<CodeDataContainer> code = CodeFactory::CEntry(isolate(), 1);
|
||||
#endif
|
||||
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
@ -2202,7 +2207,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
|
||||
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
|
||||
bool builtin_exit_frame) {
|
||||
Move(r3, builtin);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
@ -96,7 +96,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// Jump, Call, and Ret pseudo instructions implementing inter-working.
|
||||
void Jump(Register target, Condition cond = al);
|
||||
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Jump(Handle<CodeDataContainer> code, RelocInfo::Mode rmode,
|
||||
Condition cond = al);
|
||||
void Jump(const ExternalReference& reference);
|
||||
// Jump the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label) {
|
||||
@ -110,7 +111,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
void Call(Register target);
|
||||
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
||||
void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
void Call(Handle<CodeDataContainer> code,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
||||
Condition cond = al);
|
||||
void Ret() { b(r14); }
|
||||
void Ret(Condition cond) { b(cond, r14); }
|
||||
@ -1799,7 +1801,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
|
||||
SmiCheck smi_check = SmiCheck::kInline);
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container, Register scratch);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
private:
|
||||
|
@ -226,7 +226,7 @@ int Assembler::deserialization_special_target_size(
|
||||
return kSpecialTargetSize;
|
||||
}
|
||||
|
||||
Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
|
||||
Handle<CodeDataContainer> Assembler::code_target_object_handle_at(Address pc) {
|
||||
return GetCodeTarget(ReadUnalignedValue<int32_t>(pc));
|
||||
}
|
||||
|
||||
|
@ -1024,9 +1024,9 @@ void Assembler::call(Label* L) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::call(Handle<CodeT> target, RelocInfo::Mode rmode) {
|
||||
void Assembler::call(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
DCHECK(FromCodeT(*target).IsExecutable());
|
||||
DCHECK(FromCodeDataContainer(*target).IsExecutable());
|
||||
EnsureSpace ensure_space(this);
|
||||
// 1110 1000 #32-bit disp.
|
||||
emit(0xE8);
|
||||
@ -1437,7 +1437,8 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
|
||||
emitl(static_cast<int32_t>(entry));
|
||||
}
|
||||
|
||||
void Assembler::j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode) {
|
||||
void Assembler::j(Condition cc, Handle<CodeDataContainer> target,
|
||||
RelocInfo::Mode rmode) {
|
||||
EnsureSpace ensure_space(this);
|
||||
DCHECK(is_uint4(cc));
|
||||
// 0000 1111 1000 tttn #32-bit disp.
|
||||
@ -1516,7 +1517,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::jmp(Handle<CodeT> target, RelocInfo::Mode rmode) {
|
||||
void Assembler::jmp(Handle<CodeDataContainer> target, RelocInfo::Mode rmode) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
EnsureSpace ensure_space(this);
|
||||
// 1110 1001 #32-bit disp.
|
||||
|
@ -478,7 +478,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
Address pc, Address target,
|
||||
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
|
||||
|
||||
inline Handle<CodeT> code_target_object_handle_at(Address pc);
|
||||
inline Handle<CodeDataContainer> code_target_object_handle_at(Address pc);
|
||||
inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
|
||||
|
||||
// Number of bytes taken up by the branch target in the code.
|
||||
@ -827,7 +827,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
|
||||
void near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode);
|
||||
|
||||
void call(Handle<CodeT> target,
|
||||
void call(Handle<CodeDataContainer> target,
|
||||
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
|
||||
|
||||
// Call near absolute indirect, address in register
|
||||
@ -838,7 +838,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// Use a 32-bit signed displacement.
|
||||
// Unconditional jump to L
|
||||
void jmp(Label* L, Label::Distance distance = Label::kFar);
|
||||
void jmp(Handle<CodeT> target, RelocInfo::Mode rmode);
|
||||
void jmp(Handle<CodeDataContainer> target, RelocInfo::Mode rmode);
|
||||
|
||||
// Jump near absolute indirect (r64)
|
||||
void jmp(Register adr);
|
||||
@ -851,7 +851,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// Conditional jumps
|
||||
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
|
||||
void j(Condition cc, Address entry, RelocInfo::Mode rmode);
|
||||
void j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode);
|
||||
void j(Condition cc, Handle<CodeDataContainer> target, RelocInfo::Mode rmode);
|
||||
|
||||
// Floating-point operations
|
||||
void fld(int i);
|
||||
|
@ -575,7 +575,8 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value,
|
||||
|
||||
if (isolate()) {
|
||||
Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
|
||||
Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code_target =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
Call(code_target, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
@ -616,7 +617,8 @@ void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
|
||||
|
||||
if (isolate()) {
|
||||
Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
|
||||
Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code_target =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
Call(code_target, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
@ -776,7 +778,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
// smarter.
|
||||
Move(rax, num_arguments);
|
||||
LoadAddress(rbx, ExternalReference::Create(f));
|
||||
Handle<CodeT> code = CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), f->result_size);
|
||||
Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -804,7 +807,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
|
||||
ASM_CODE_COMMENT(this);
|
||||
// Set the entry point and jump to the C entry runtime stub.
|
||||
LoadAddress(rbx, ext);
|
||||
Handle<CodeT> code =
|
||||
Handle<CodeDataContainer> code =
|
||||
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
|
||||
Jump(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
@ -834,8 +837,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ AssertCodeT(optimized_code_entry);
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry);
|
||||
__ AssertCodeDataContainer(optimized_code_entry);
|
||||
__ TestCodeDataContainerIsMarkedForDeoptimization(optimized_code_entry);
|
||||
__ j(not_zero, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
@ -903,7 +906,7 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
|
||||
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
|
||||
DCHECK_EQ(closure, kJSFunctionRegister);
|
||||
// Store the optimized code in the closure.
|
||||
AssertCodeT(optimized_code);
|
||||
AssertCodeDataContainer(optimized_code);
|
||||
StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
|
||||
optimized_code);
|
||||
// Write barrier clobbers scratch1 below.
|
||||
@ -2144,7 +2147,8 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode,
|
||||
bind(&skip);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
|
||||
RelocInfo::Mode rmode) {
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||
Builtin builtin = Builtin::kNoBuiltinId;
|
||||
@ -2156,8 +2160,8 @@ void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
||||
jmp(code_object, rmode);
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
|
||||
Condition cc) {
|
||||
void TurboAssembler::Jump(Handle<CodeDataContainer> code_object,
|
||||
RelocInfo::Mode rmode, Condition cc) {
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||
Builtin builtin = Builtin::kNoBuiltinId;
|
||||
@ -2193,7 +2197,8 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
|
||||
call(kScratchRegister);
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Call(Handle<CodeDataContainer> code_object,
|
||||
RelocInfo::Mode rmode) {
|
||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||
Builtin builtin = Builtin::kNoBuiltinId;
|
||||
@ -2244,7 +2249,8 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
||||
Call(EntryFromBuiltinAsOperand(builtin));
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
call(code, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2265,7 +2271,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
||||
Jump(EntryFromBuiltinAsOperand(builtin));
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
jmp(code, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2286,7 +2293,8 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) {
|
||||
Jump(EntryFromBuiltinAsOperand(builtin), cc);
|
||||
break;
|
||||
case BuiltinCallJumpMode::kForMksnapshot: {
|
||||
Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
|
||||
Handle<CodeDataContainer> code =
|
||||
isolate()->builtins()->code_handle(builtin);
|
||||
j(cc, code, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2598,8 +2606,10 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
|
||||
CompareRange(instance_type_out, lower_limit, higher_limit);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet) {
|
||||
testl(FieldOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container) {
|
||||
testl(FieldOperand(code_data_container,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
|
||||
@ -2647,16 +2657,16 @@ void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) {
|
||||
Check(zero, AbortReason::kSignedBitOfSmiIsNotZero);
|
||||
}
|
||||
|
||||
void TurboAssembler::AssertCodeT(Register object) {
|
||||
void TurboAssembler::AssertCodeDataContainer(Register object) {
|
||||
if (!v8_flags.debug_code) return;
|
||||
ASM_CODE_COMMENT(this);
|
||||
testb(object, Immediate(kSmiTagMask));
|
||||
Check(not_equal, AbortReason::kOperandIsNotACodeT);
|
||||
Check(not_equal, AbortReason::kOperandIsNotACodeDataContainer);
|
||||
Push(object);
|
||||
LoadMap(object, object);
|
||||
CmpInstanceType(object, CODET_TYPE);
|
||||
CmpInstanceType(object, CODE_DATA_CONTAINER_TYPE);
|
||||
popq(object);
|
||||
Check(equal, AbortReason::kOperandIsNotACodeT);
|
||||
Check(equal, AbortReason::kOperandIsNotACodeDataContainer);
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertConstructor(Register object) {
|
||||
|
@ -388,7 +388,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||
|
||||
void Call(Register reg) { call(reg); }
|
||||
void Call(Operand op);
|
||||
void Call(Handle<CodeT> code_object, RelocInfo::Mode rmode);
|
||||
void Call(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
|
||||
void Call(Address destination, RelocInfo::Mode rmode);
|
||||
void Call(ExternalReference ext);
|
||||
void Call(Label* target) { call(target); }
|
||||
@ -418,8 +418,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||
void Jump(const ExternalReference& reference);
|
||||
void Jump(Operand op);
|
||||
void Jump(Operand op, Condition cc);
|
||||
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode);
|
||||
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode, Condition cc);
|
||||
void Jump(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode);
|
||||
void Jump(Handle<CodeDataContainer> code_object, RelocInfo::Mode rmode,
|
||||
Condition cc);
|
||||
|
||||
void BailoutIfDeoptimized(Register scratch);
|
||||
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
|
||||
@ -473,11 +474,12 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
||||
// Always use unsigned comparisons: above and below, not less and greater.
|
||||
void CmpInstanceType(Register map, InstanceType type);
|
||||
|
||||
// Abort execution if argument is not a CodeT, enabled via --debug-code.
|
||||
void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE
|
||||
// Abort execution if argument is not a CodeDataContainer, enabled via
|
||||
// --debug-code.
|
||||
void AssertCodeDataContainer(Register object) NOOP_UNLESS_DEBUG_CODE
|
||||
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(AbortReason msg);
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(AbortReason msg);
|
||||
|
||||
// Check that the stack is aligned.
|
||||
void CheckStackAlignment();
|
||||
@ -828,7 +830,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
andq(reg, Immediate(mask));
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet);
|
||||
void TestCodeDataContainerIsMarkedForDeoptimization(
|
||||
Register code_data_container);
|
||||
Immediate ClearedValue() const;
|
||||
|
||||
// Tiering support.
|
||||
|
@ -235,11 +235,6 @@ const size_t kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB;
|
||||
#define V8_EXTERNAL_CODE_SPACE_BOOL false
|
||||
#endif
|
||||
|
||||
// TODO(jgruber): Remove the CodeT alias, rename CodeDataContainer to Code and
|
||||
// Code to CodeInstructions (or similar).
|
||||
class CodeDataContainer;
|
||||
using CodeT = CodeDataContainer;
|
||||
|
||||
// V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT controls how V8 sets permissions for
|
||||
// executable pages.
|
||||
// In particular,
|
||||
|
@ -90,7 +90,7 @@ class InstructionOperandConverter {
|
||||
return ToExternalReference(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
Handle<CodeT> InputCode(size_t index) {
|
||||
Handle<CodeDataContainer> InputCode(size_t index) {
|
||||
return ToCode(instr_->InputAt(index));
|
||||
}
|
||||
|
||||
@ -172,7 +172,7 @@ class InstructionOperandConverter {
|
||||
return ToConstant(op).ToExternalReference();
|
||||
}
|
||||
|
||||
Handle<CodeT> ToCode(InstructionOperand* op) {
|
||||
Handle<CodeDataContainer> ToCode(InstructionOperand* op) {
|
||||
return ToConstant(op).ToCode();
|
||||
}
|
||||
|
||||
|
@ -686,7 +686,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kArchCallCodeObject: {
|
||||
InstructionOperand* op = instr->InputAt(0);
|
||||
if (op->IsImmediate()) {
|
||||
Handle<CodeT> code = i.InputCode(0);
|
||||
Handle<CodeDataContainer> code = i.InputCode(0);
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
@ -739,7 +739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
case kArchTailCallCodeObject: {
|
||||
if (HasImmediateInput(instr, 0)) {
|
||||
Handle<CodeT> code = i.InputCode(0);
|
||||
Handle<CodeDataContainer> code = i.InputCode(0);
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
|
@ -581,11 +581,11 @@ Handle<HeapObject> Constant::ToHeapObject() const {
|
||||
return value;
|
||||
}
|
||||
|
||||
Handle<CodeT> Constant::ToCode() const {
|
||||
Handle<CodeDataContainer> Constant::ToCode() const {
|
||||
DCHECK_EQ(kHeapObject, type());
|
||||
Handle<CodeT> value(
|
||||
Handle<CodeDataContainer> value(
|
||||
reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
|
||||
DCHECK(value->IsCodeT(GetPtrComprCageBaseSlow(*value)));
|
||||
DCHECK(value->IsCodeDataContainer(GetPtrComprCageBaseSlow(*value)));
|
||||
return value;
|
||||
}
|
||||
|
||||
|
@ -1192,7 +1192,7 @@ class V8_EXPORT_PRIVATE Constant final {
|
||||
}
|
||||
|
||||
Handle<HeapObject> ToHeapObject() const;
|
||||
Handle<CodeT> ToCode() const;
|
||||
Handle<CodeDataContainer> ToCode() const;
|
||||
|
||||
private:
|
||||
Type type_;
|
||||
|
@ -1263,7 +1263,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kArchCallCodeObject: {
|
||||
if (HasImmediateInput(instr, 0)) {
|
||||
Handle<CodeT> code = i.InputCode(0);
|
||||
Handle<CodeDataContainer> code = i.InputCode(0);
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
@ -1323,7 +1323,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
case kArchTailCallCodeObject: {
|
||||
if (HasImmediateInput(instr, 0)) {
|
||||
Handle<CodeT> code = i.InputCode(0);
|
||||
Handle<CodeDataContainer> code = i.InputCode(0);
|
||||
__ Jump(code, RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
|
@ -2202,7 +2202,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
|
||||
bytecode_iterator().GetFlag8Operand(2))
|
||||
? AllocationType::kOld
|
||||
: AllocationType::kYoung;
|
||||
CodeTRef compile_lazy =
|
||||
CodeDataContainerRef compile_lazy =
|
||||
MakeRef(broker(), *BUILTIN_CODE(jsgraph()->isolate(), CompileLazy));
|
||||
const Operator* op =
|
||||
javascript()->CreateClosure(shared_info, compile_lazy, allocation);
|
||||
|
@ -1000,7 +1000,7 @@ Node* CodeAssembler::CallRuntimeImpl(
|
||||
Runtime::FunctionId function, TNode<Object> context,
|
||||
std::initializer_list<TNode<Object>> args) {
|
||||
int result_size = Runtime::FunctionForId(function)->result_size;
|
||||
TNode<CodeT> centry =
|
||||
TNode<CodeDataContainer> centry =
|
||||
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
|
||||
constexpr size_t kMaxNumArgs = 6;
|
||||
DCHECK_GE(kMaxNumArgs, args.size());
|
||||
@ -1033,7 +1033,7 @@ void CodeAssembler::TailCallRuntimeImpl(
|
||||
Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context,
|
||||
std::initializer_list<TNode<Object>> args) {
|
||||
int result_size = Runtime::FunctionForId(function)->result_size;
|
||||
TNode<CodeT> centry =
|
||||
TNode<CodeDataContainer> centry =
|
||||
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
|
||||
constexpr size_t kMaxNumArgs = 6;
|
||||
DCHECK_GE(kMaxNumArgs, args.size());
|
||||
@ -1089,7 +1089,8 @@ Node* CodeAssembler::CallStubN(StubCallMode call_mode,
|
||||
}
|
||||
|
||||
void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
|
||||
TNode<CodeT> target, TNode<Object> context,
|
||||
TNode<CodeDataContainer> target,
|
||||
TNode<Object> context,
|
||||
std::initializer_list<Node*> args) {
|
||||
constexpr size_t kMaxNumArgs = 11;
|
||||
DCHECK_GE(kMaxNumArgs, args.size());
|
||||
@ -1194,7 +1195,8 @@ template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch(
|
||||
TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>,
|
||||
TNode<ExternalReference>);
|
||||
|
||||
void CodeAssembler::TailCallJSCode(TNode<CodeT> code, TNode<Context> context,
|
||||
void CodeAssembler::TailCallJSCode(TNode<CodeDataContainer> code,
|
||||
TNode<Context> context,
|
||||
TNode<JSFunction> function,
|
||||
TNode<Object> new_target,
|
||||
TNode<Int32T> arg_count) {
|
||||
|
@ -1170,13 +1170,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
template <class T = Object, class... TArgs>
|
||||
TNode<T> CallStub(Callable const& callable, TNode<Object> context,
|
||||
TArgs... args) {
|
||||
TNode<CodeT> target = HeapConstant(callable.code());
|
||||
TNode<CodeDataContainer> target = HeapConstant(callable.code());
|
||||
return CallStub<T>(callable.descriptor(), target, context, args...);
|
||||
}
|
||||
|
||||
template <class T = Object, class... TArgs>
|
||||
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
|
||||
TNode<CodeT> target, TNode<Object> context, TArgs... args) {
|
||||
TNode<CodeDataContainer> target, TNode<Object> context,
|
||||
TArgs... args) {
|
||||
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
|
||||
target, context, args...));
|
||||
}
|
||||
@ -1192,13 +1193,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
template <class... TArgs>
|
||||
void TailCallStub(Callable const& callable, TNode<Object> context,
|
||||
TArgs... args) {
|
||||
TNode<CodeT> target = HeapConstant(callable.code());
|
||||
TNode<CodeDataContainer> target = HeapConstant(callable.code());
|
||||
TailCallStub(callable.descriptor(), target, context, args...);
|
||||
}
|
||||
|
||||
template <class... TArgs>
|
||||
void TailCallStub(const CallInterfaceDescriptor& descriptor,
|
||||
TNode<CodeT> target, TNode<Object> context, TArgs... args) {
|
||||
TNode<CodeDataContainer> target, TNode<Object> context,
|
||||
TArgs... args) {
|
||||
TailCallStubImpl(descriptor, target, context, {args...});
|
||||
}
|
||||
|
||||
@ -1221,7 +1223,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
// Note that no arguments adaption is going on here - all the JavaScript
|
||||
// arguments are left on the stack unmodified. Therefore, this tail call can
|
||||
// only be used after arguments adaptation has been performed already.
|
||||
void TailCallJSCode(TNode<CodeT> code, TNode<Context> context,
|
||||
void TailCallJSCode(TNode<CodeDataContainer> code, TNode<Context> context,
|
||||
TNode<JSFunction> function, TNode<Object> new_target,
|
||||
TNode<Int32T> arg_count);
|
||||
|
||||
@ -1230,7 +1232,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
Node* receiver, TArgs... args) {
|
||||
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
|
||||
TNode<Int32T> arity = Int32Constant(argc);
|
||||
TNode<CodeT> target = HeapConstant(callable.code());
|
||||
TNode<CodeDataContainer> target = HeapConstant(callable.code());
|
||||
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
|
||||
CAST(function), {}, arity, {receiver, args...}));
|
||||
}
|
||||
@ -1241,7 +1243,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
|
||||
TNode<Int32T> arity = Int32Constant(argc);
|
||||
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
|
||||
TNode<CodeT> target = HeapConstant(callable.code());
|
||||
TNode<CodeDataContainer> target = HeapConstant(callable.code());
|
||||
return CallJSStubImpl(callable.descriptor(), target, CAST(context),
|
||||
CAST(function), CAST(new_target), arity,
|
||||
{receiver, args...});
|
||||
@ -1340,7 +1342,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
std::initializer_list<TNode<Object>> args);
|
||||
|
||||
void TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
|
||||
TNode<CodeT> target, TNode<Object> context,
|
||||
TNode<CodeDataContainer> target, TNode<Object> context,
|
||||
std::initializer_list<Node*> args);
|
||||
|
||||
void TailCallStubThenBytecodeDispatchImpl(
|
||||
|
@ -1048,10 +1048,6 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
|
||||
HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
|
||||
#undef DEFINE_IS_AND_AS
|
||||
|
||||
bool ObjectRef::IsCodeT() const { return IsCodeDataContainer(); }
|
||||
|
||||
CodeTRef ObjectRef::AsCodeT() const { return AsCodeDataContainer(); }
|
||||
|
||||
bool ObjectRef::IsSmi() const { return data()->is_smi(); }
|
||||
|
||||
int ObjectRef::AsSmi() const {
|
||||
@ -2204,8 +2200,8 @@ BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
|
||||
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
|
||||
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C
|
||||
|
||||
CodeTRef JSFunctionRef::code() const {
|
||||
CodeT code = object()->code(kAcquireLoad);
|
||||
CodeDataContainerRef JSFunctionRef::code() const {
|
||||
CodeDataContainer code = object()->code(kAcquireLoad);
|
||||
return MakeRefAssumeMemoryFence(broker(), code);
|
||||
}
|
||||
|
||||
@ -2307,14 +2303,14 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
|
||||
}
|
||||
|
||||
unsigned CodeDataContainerRef::GetInlinedBytecodeSize() const {
|
||||
CodeDataContainer codet = *object();
|
||||
if (codet.is_off_heap_trampoline()) {
|
||||
CodeDataContainer code_data_container = *object();
|
||||
if (code_data_container.is_off_heap_trampoline()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Safe to do a relaxed conversion to Code here since CodeT::code field is
|
||||
// modified only by GC and the CodeT was acquire-loaded.
|
||||
Code code = codet.code(kRelaxedLoad);
|
||||
// Safe to do a relaxed conversion to Code here since CodeDataContainer::code
|
||||
// field is modified only by GC and the CodeDataContainer was acquire-loaded.
|
||||
Code code = code_data_container.code(kRelaxedLoad);
|
||||
return GetInlinedBytecodeSizeImpl(code);
|
||||
}
|
||||
|
||||
|
@ -208,9 +208,6 @@ class TinyRef {
|
||||
HEAP_BROKER_OBJECT_LIST(V)
|
||||
#undef V
|
||||
|
||||
using CodeTRef = CodeDataContainerRef;
|
||||
using CodeTTinyRef = CodeDataContainerTinyRef;
|
||||
|
||||
class V8_EXPORT_PRIVATE ObjectRef {
|
||||
public:
|
||||
ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
|
||||
@ -233,14 +230,6 @@ class V8_EXPORT_PRIVATE ObjectRef {
|
||||
HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
|
||||
#undef HEAP_AS_METHOD_DECL
|
||||
|
||||
// CodeT is defined as an alias to either CodeDataContainer or Code, depending
|
||||
// on the architecture. We can't put it in HEAP_BROKER_OBJECT_LIST, because
|
||||
// this list already contains CodeDataContainer and Code. Still, defining
|
||||
// IsCodeT and AsCodeT is useful to write code that is independent of
|
||||
// V8_EXTERNAL_CODE_SPACE.
|
||||
bool IsCodeT() const;
|
||||
CodeTRef AsCodeT() const;
|
||||
|
||||
bool IsNull() const;
|
||||
bool IsNullOrUndefined() const;
|
||||
bool IsTheHole() const;
|
||||
@ -470,7 +459,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
|
||||
ContextRef context() const;
|
||||
NativeContextRef native_context() const;
|
||||
SharedFunctionInfoRef shared() const;
|
||||
CodeTRef code() const;
|
||||
CodeDataContainerRef code() const;
|
||||
|
||||
bool has_initial_map(CompilationDependencies* dependencies) const;
|
||||
bool PrototypeRequiresRuntimeLookup(
|
||||
|
@ -586,7 +586,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
|
||||
isolate()->factory()->many_closures_cell();
|
||||
Callable const callable =
|
||||
Builtins::CallableFor(isolate(), shared.builtin_id());
|
||||
CodeTRef code = MakeRef(broker_, *callable.code());
|
||||
CodeDataContainerRef code = MakeRef(broker_, *callable.code());
|
||||
return AddNode<JSFunction>(graph()->NewNode(
|
||||
javascript()->CreateClosure(shared, code), HeapConstant(feedback_cell),
|
||||
context, effect(), control()));
|
||||
@ -6909,7 +6909,7 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
|
||||
isolate()->factory()->many_closures_cell();
|
||||
Callable const callable =
|
||||
Builtins::CallableFor(isolate(), shared.builtin_id());
|
||||
CodeTRef code = MakeRef(broker(), *callable.code());
|
||||
CodeDataContainerRef code = MakeRef(broker(), *callable.code());
|
||||
return graph()->NewNode(javascript()->CreateClosure(shared, code),
|
||||
jsgraph()->HeapConstant(feedback_cell), context,
|
||||
effect, control);
|
||||
|
@ -1310,7 +1310,7 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
|
||||
}
|
||||
|
||||
const Operator* JSOperatorBuilder::CreateClosure(
|
||||
const SharedFunctionInfoRef& shared_info, const CodeTRef& code,
|
||||
const SharedFunctionInfoRef& shared_info, const CodeDataContainerRef& code,
|
||||
AllocationType allocation) {
|
||||
static constexpr int kFeedbackCell = 1;
|
||||
static constexpr int kArity = kFeedbackCell;
|
||||
|
@ -676,18 +676,21 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
|
||||
class CreateClosureParameters final {
|
||||
public:
|
||||
CreateClosureParameters(const SharedFunctionInfoRef& shared_info,
|
||||
const CodeTRef& code, AllocationType allocation)
|
||||
const CodeDataContainerRef& code,
|
||||
AllocationType allocation)
|
||||
: shared_info_(shared_info), code_(code), allocation_(allocation) {}
|
||||
|
||||
SharedFunctionInfoRef shared_info(JSHeapBroker* broker) const {
|
||||
return shared_info_.AsRef(broker);
|
||||
}
|
||||
CodeTRef code(JSHeapBroker* broker) const { return code_.AsRef(broker); }
|
||||
CodeDataContainerRef code(JSHeapBroker* broker) const {
|
||||
return code_.AsRef(broker);
|
||||
}
|
||||
AllocationType allocation() const { return allocation_; }
|
||||
|
||||
private:
|
||||
const SharedFunctionInfoTinyRef shared_info_;
|
||||
const CodeTTinyRef code_;
|
||||
const CodeDataContainerTinyRef code_;
|
||||
AllocationType const allocation_;
|
||||
|
||||
friend bool operator==(CreateClosureParameters const&,
|
||||
@ -950,7 +953,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
|
||||
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
|
||||
const Operator* CreateBoundFunction(size_t arity, const MapRef& map);
|
||||
const Operator* CreateClosure(
|
||||
const SharedFunctionInfoRef& shared_info, const CodeTRef& code,
|
||||
const SharedFunctionInfoRef& shared_info,
|
||||
const CodeDataContainerRef& code,
|
||||
AllocationType allocation = AllocationType::kYoung);
|
||||
const Operator* CreateIterResultObject();
|
||||
const Operator* CreateStringIterator();
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace v8::internal::compiler::turboshaft {
|
||||
|
||||
Handle<CodeT> BuiltinCodeHandle(Builtin builtin, Isolate* isolate) {
|
||||
Handle<CodeDataContainer> BuiltinCodeHandle(Builtin builtin, Isolate* isolate) {
|
||||
return isolate->builtins()->code_handle(builtin);
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ enum class Builtin : int32_t;
|
||||
|
||||
namespace v8::internal::compiler::turboshaft {
|
||||
|
||||
Handle<CodeT> BuiltinCodeHandle(Builtin builtin, Isolate* isolate);
|
||||
Handle<CodeDataContainer> BuiltinCodeHandle(Builtin builtin, Isolate* isolate);
|
||||
|
||||
// Forward declarations
|
||||
template <class Assembler>
|
||||
|
@ -8414,8 +8414,9 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
|
||||
return code;
|
||||
}
|
||||
|
||||
Handle<CodeT> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
|
||||
const wasm::WasmModule* module) {
|
||||
Handle<CodeDataContainer> CompileCWasmEntry(Isolate* isolate,
|
||||
const wasm::FunctionSig* sig,
|
||||
const wasm::WasmModule* module) {
|
||||
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
|
||||
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
|
||||
Graph* graph = zone->New<Graph>(zone.get());
|
||||
@ -8464,7 +8465,7 @@ Handle<CodeT> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
|
||||
CompilationJob::FAILED);
|
||||
CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
|
||||
|
||||
return ToCodeT(job->compilation_info()->code(), isolate);
|
||||
return ToCodeDataContainer(job->compilation_info()->code(), isolate);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -165,7 +165,7 @@ enum CWasmEntryParameters {
|
||||
|
||||
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
|
||||
// which knows how to feed it its parameters.
|
||||
V8_EXPORT_PRIVATE Handle<CodeT> CompileCWasmEntry(
|
||||
V8_EXPORT_PRIVATE Handle<CodeDataContainer> CompileCWasmEntry(
|
||||
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
|
||||
|
||||
// Values from the instance object are cached between Wasm-level function calls.
|
||||
|
@ -1231,7 +1231,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
|
||||
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
|
||||
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
|
||||
if (state != DebugInfo::kHasNoSideEffect) continue;
|
||||
Code code = FromCodeT(isolate->builtins()->code(caller));
|
||||
Code code = FromCodeDataContainer(isolate->builtins()->code(caller));
|
||||
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
|
||||
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
|
||||
|
||||
|
@ -1636,7 +1636,8 @@ void Debug::InstallDebugBreakTrampoline() {
|
||||
|
||||
if (!needs_to_use_trampoline) return;
|
||||
|
||||
Handle<CodeT> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
|
||||
Handle<CodeDataContainer> trampoline =
|
||||
BUILTIN_CODE(isolate_, DebugBreakTrampoline);
|
||||
std::vector<Handle<JSFunction>> needs_compile;
|
||||
using AccessorPairWithContext =
|
||||
std::pair<Handle<AccessorPair>, Handle<NativeContext>>;
|
||||
|
@ -229,7 +229,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
|
||||
namespace {
|
||||
class ActivationsFinder : public ThreadVisitor {
|
||||
public:
|
||||
ActivationsFinder(CodeT topmost_optimized_code,
|
||||
ActivationsFinder(CodeDataContainer topmost_optimized_code,
|
||||
bool safe_to_deopt_topmost_optimized_code) {
|
||||
#ifdef DEBUG
|
||||
topmost_ = topmost_optimized_code;
|
||||
@ -243,7 +243,8 @@ class ActivationsFinder : public ThreadVisitor {
|
||||
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
|
||||
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
|
||||
if (it.frame()->is_optimized()) {
|
||||
CodeT code = it.frame()->LookupCodeT().ToCodeT();
|
||||
CodeDataContainer code =
|
||||
it.frame()->LookupCodeDataContainer().ToCodeDataContainer();
|
||||
if (CodeKindCanDeoptimize(code.kind()) &&
|
||||
code.marked_for_deoptimization()) {
|
||||
// Obtain the trampoline to the deoptimizer call.
|
||||
@ -272,7 +273,7 @@ class ActivationsFinder : public ThreadVisitor {
|
||||
|
||||
private:
|
||||
#ifdef DEBUG
|
||||
CodeT topmost_;
|
||||
CodeDataContainer topmost_;
|
||||
bool safe_to_deopt_;
|
||||
#endif
|
||||
};
|
||||
@ -283,7 +284,7 @@ class ActivationsFinder : public ThreadVisitor {
|
||||
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
|
||||
CodeT topmost_optimized_code;
|
||||
CodeDataContainer topmost_optimized_code;
|
||||
bool safe_to_deopt_topmost_optimized_code = false;
|
||||
#ifdef DEBUG
|
||||
// Make sure all activations of optimized code can deopt at their current PC.
|
||||
@ -292,7 +293,8 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
|
||||
for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
|
||||
it.Advance()) {
|
||||
if (it.frame()->is_optimized()) {
|
||||
CodeT code = it.frame()->LookupCodeT().ToCodeT();
|
||||
CodeDataContainer code =
|
||||
it.frame()->LookupCodeDataContainer().ToCodeDataContainer();
|
||||
JSFunction function =
|
||||
static_cast<OptimizedFrame*>(it.frame())->function();
|
||||
TraceFoundActivation(isolate, function);
|
||||
@ -347,7 +349,8 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
||||
DeoptimizeMarkedCode(isolate);
|
||||
}
|
||||
|
||||
void Deoptimizer::DeoptimizeFunction(JSFunction function, CodeT code) {
|
||||
void Deoptimizer::DeoptimizeFunction(JSFunction function,
|
||||
CodeDataContainer code) {
|
||||
Isolate* isolate = function.GetIsolate();
|
||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
|
||||
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
||||
@ -934,7 +937,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
const bool deopt_to_baseline =
|
||||
shared.HasBaselineCode() && v8_flags.deopt_to_baseline;
|
||||
const bool restart_frame = goto_catch_handler && is_restart_frame();
|
||||
CodeT dispatch_builtin = builtins->code(
|
||||
CodeDataContainer dispatch_builtin = builtins->code(
|
||||
DispatchBuiltinFor(deopt_to_baseline, advance_bc, restart_frame));
|
||||
|
||||
if (verbose_tracing_enabled()) {
|
||||
@ -1175,7 +1178,8 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
Register context_reg = JavaScriptFrame::context_register();
|
||||
output_frame->SetRegister(context_reg.code(), context_value);
|
||||
// Set the continuation for the topmost frame.
|
||||
CodeT continuation = builtins->code(Builtin::kNotifyDeoptimized);
|
||||
CodeDataContainer continuation =
|
||||
builtins->code(Builtin::kNotifyDeoptimized);
|
||||
output_frame->SetContinuation(
|
||||
static_cast<intptr_t>(continuation.InstructionStart()));
|
||||
}
|
||||
@ -1255,7 +1259,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
|
||||
CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
|
||||
|
||||
Builtins* builtins = isolate_->builtins();
|
||||
CodeT construct_stub = builtins->code(Builtin::kJSConstructStubGeneric);
|
||||
CodeDataContainer construct_stub =
|
||||
builtins->code(Builtin::kJSConstructStubGeneric);
|
||||
BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
|
||||
|
||||
const int parameters_count = translated_frame->height();
|
||||
@ -1409,7 +1414,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
|
||||
// Set the continuation for the topmost frame.
|
||||
if (is_topmost) {
|
||||
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
|
||||
CodeT continuation = builtins->code(Builtin::kNotifyDeoptimized);
|
||||
CodeDataContainer continuation =
|
||||
builtins->code(Builtin::kNotifyDeoptimized);
|
||||
output_frame->SetContinuation(
|
||||
static_cast<intptr_t>(continuation.InstructionStart()));
|
||||
}
|
||||
@ -1833,7 +1839,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
|
||||
// For JSToWasmBuiltinContinuations use ContinueToCodeStubBuiltin, and not
|
||||
// ContinueToCodeStubBuiltinWithResult because we don't want to overwrite the
|
||||
// return value that we have already set.
|
||||
CodeT continue_to_builtin =
|
||||
CodeDataContainer continue_to_builtin =
|
||||
isolate()->builtins()->code(TrampolineForBuiltinContinuation(
|
||||
mode, frame_info.frame_has_result_stack_slot() &&
|
||||
!is_js_to_wasm_builtin_continuation));
|
||||
@ -1850,7 +1856,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
|
||||
static_cast<intptr_t>(continue_to_builtin.InstructionStart()));
|
||||
}
|
||||
|
||||
CodeT continuation = isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
|
||||
CodeDataContainer continuation =
|
||||
isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
|
||||
output_frame->SetContinuation(
|
||||
static_cast<intptr_t>(continuation.InstructionStart()));
|
||||
}
|
||||
|
@ -79,7 +79,8 @@ class Deoptimizer : public Malloced {
|
||||
// again and any activations of the optimized code will get deoptimized when
|
||||
// execution returns. If {code} is specified then the given code is targeted
|
||||
// instead of the function code (e.g. OSR code not installed on function).
|
||||
static void DeoptimizeFunction(JSFunction function, CodeT code = {});
|
||||
static void DeoptimizeFunction(JSFunction function,
|
||||
CodeDataContainer code = {});
|
||||
|
||||
// Deoptimize all code in the given isolate.
|
||||
V8_EXPORT_PRIVATE static void DeoptimizeAll(Isolate* isolate);
|
||||
|
@ -2216,8 +2216,9 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
|
||||
previously_materialized_objects);
|
||||
CHECK_EQ(frames_[0].kind(), TranslatedFrame::kUnoptimizedFunction);
|
||||
CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
|
||||
Deoptimizer::DeoptimizeFunction(frame->function(),
|
||||
frame->LookupCodeT().ToCodeT());
|
||||
Deoptimizer::DeoptimizeFunction(
|
||||
frame->function(),
|
||||
frame->LookupCodeDataContainer().ToCodeDataContainer());
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user