Rename Code to InstructionStream
.. as part of the big Code/CodeDataContainer name shuffle. In the next step, CodeDataContainer will be renamed to Code. Bug: v8:13654 Change-Id: Ia80ac984d46dd6c2a108098055a5cd60e22a837c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4171628 Auto-Submit: Jakob Linke <jgruber@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#85337}
This commit is contained in:
parent
503b56efdf
commit
2c7272fdde
@ -53,7 +53,7 @@ class BaselineCompilerTask {
|
|||||||
compiler.GenerateCode();
|
compiler.GenerateCode();
|
||||||
maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle(
|
maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle(
|
||||||
compiler.Build(local_isolate));
|
compiler.Build(local_isolate));
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (maybe_code_.ToHandle(&code)) {
|
if (maybe_code_.ToHandle(&code)) {
|
||||||
local_isolate->heap()->RegisterCodeObject(code);
|
local_isolate->heap()->RegisterCodeObject(code);
|
||||||
}
|
}
|
||||||
@ -63,7 +63,7 @@ class BaselineCompilerTask {
|
|||||||
// Executed in the main thread.
|
// Executed in the main thread.
|
||||||
void Install(Isolate* isolate) {
|
void Install(Isolate* isolate) {
|
||||||
shared_function_info_->set_is_sparkplug_compiling(false);
|
shared_function_info_->set_is_sparkplug_compiling(false);
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (!maybe_code_.ToHandle(&code)) return;
|
if (!maybe_code_.ToHandle(&code)) return;
|
||||||
if (v8_flags.print_code) {
|
if (v8_flags.print_code) {
|
||||||
code->Print();
|
code->Print();
|
||||||
@ -97,7 +97,7 @@ class BaselineCompilerTask {
|
|||||||
private:
|
private:
|
||||||
Handle<SharedFunctionInfo> shared_function_info_;
|
Handle<SharedFunctionInfo> shared_function_info_;
|
||||||
Handle<BytecodeArray> bytecode_;
|
Handle<BytecodeArray> bytecode_;
|
||||||
MaybeHandle<Code> maybe_code_;
|
MaybeHandle<InstructionStream> maybe_code_;
|
||||||
double time_taken_ms_;
|
double time_taken_ms_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -337,7 +337,8 @@ void BaselineCompiler::GenerateCode() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> BaselineCompiler::Build(LocalIsolate* local_isolate) {
|
MaybeHandle<InstructionStream> BaselineCompiler::Build(
|
||||||
|
LocalIsolate* local_isolate) {
|
||||||
CodeDesc desc;
|
CodeDesc desc;
|
||||||
__ GetCode(local_isolate->GetMainThreadIsolateUnsafe(), &desc);
|
__ GetCode(local_isolate->GetMainThreadIsolateUnsafe(), &desc);
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ class BaselineCompiler {
|
|||||||
Handle<BytecodeArray> bytecode);
|
Handle<BytecodeArray> bytecode);
|
||||||
|
|
||||||
void GenerateCode();
|
void GenerateCode();
|
||||||
MaybeHandle<Code> Build(LocalIsolate* local_isolate);
|
MaybeHandle<InstructionStream> Build(LocalIsolate* local_isolate);
|
||||||
static int EstimateInstructionSize(BytecodeArray bytecode);
|
static int EstimateInstructionSize(BytecodeArray bytecode);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -56,14 +56,14 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
|
MaybeHandle<InstructionStream> GenerateBaselineCode(
|
||||||
Handle<SharedFunctionInfo> shared) {
|
Isolate* isolate, Handle<SharedFunctionInfo> shared) {
|
||||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
|
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
|
||||||
Handle<BytecodeArray> bytecode(shared->GetBytecodeArray(isolate), isolate);
|
Handle<BytecodeArray> bytecode(shared->GetBytecodeArray(isolate), isolate);
|
||||||
LocalIsolate* local_isolate = isolate->main_thread_local_isolate();
|
LocalIsolate* local_isolate = isolate->main_thread_local_isolate();
|
||||||
baseline::BaselineCompiler compiler(local_isolate, shared, bytecode);
|
baseline::BaselineCompiler compiler(local_isolate, shared, bytecode);
|
||||||
compiler.GenerateCode();
|
compiler.GenerateCode();
|
||||||
MaybeHandle<Code> code = compiler.Build(local_isolate);
|
MaybeHandle<InstructionStream> code = compiler.Build(local_isolate);
|
||||||
if (v8_flags.print_code && !code.is_null()) {
|
if (v8_flags.print_code && !code.is_null()) {
|
||||||
code.ToHandleChecked()->Print();
|
code.ToHandleChecked()->Print();
|
||||||
}
|
}
|
||||||
@ -86,8 +86,8 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
|
MaybeHandle<InstructionStream> GenerateBaselineCode(
|
||||||
Handle<SharedFunctionInfo> shared) {
|
Isolate* isolate, Handle<SharedFunctionInfo> shared) {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,14 +10,14 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class Code;
|
class InstructionStream;
|
||||||
class SharedFunctionInfo;
|
class SharedFunctionInfo;
|
||||||
class MacroAssembler;
|
class MacroAssembler;
|
||||||
|
|
||||||
bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared);
|
bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared);
|
||||||
|
|
||||||
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
|
MaybeHandle<InstructionStream> GenerateBaselineCode(
|
||||||
Handle<SharedFunctionInfo> shared);
|
Isolate* isolate, Handle<SharedFunctionInfo> shared);
|
||||||
|
|
||||||
void EmitReturnBaseline(MacroAssembler* masm);
|
void EmitReturnBaseline(MacroAssembler* masm);
|
||||||
|
|
||||||
|
@ -1715,8 +1715,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
Label jump_to_optimized_code;
|
Label jump_to_optimized_code;
|
||||||
{
|
{
|
||||||
// If maybe_target_code is not null, no need to call into runtime. A
|
// If maybe_target_code is not null, no need to call into runtime. A
|
||||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
// precondition here is: if maybe_target_code is a InstructionStream object,
|
||||||
// be marked_for_deoptimization (callers must ensure this).
|
// it must NOT be marked_for_deoptimization (callers must ensure this).
|
||||||
__ cmp(maybe_target_code, Operand(Smi::zero()));
|
__ cmp(maybe_target_code, Operand(Smi::zero()));
|
||||||
__ b(ne, &jump_to_optimized_code);
|
__ b(ne, &jump_to_optimized_code);
|
||||||
}
|
}
|
||||||
@ -1759,16 +1759,20 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
__ LeaveFrame(StackFrame::STUB);
|
__ LeaveFrame(StackFrame::STUB);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(r0, r0);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(r0, r0);
|
||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ ldr(r1,
|
__ ldr(
|
||||||
FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
|
r1,
|
||||||
|
FieldMemOperand(
|
||||||
|
r0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
|
|
||||||
{
|
{
|
||||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
|
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
|
||||||
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
|
__ add(r0, r0,
|
||||||
|
Operand(InstructionStream::kHeaderSize -
|
||||||
|
kHeapObjectTag)); // InstructionStream start
|
||||||
|
|
||||||
// Load the OSR entrypoint offset from the deoptimization data.
|
// Load the OSR entrypoint offset from the deoptimization data.
|
||||||
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
|
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
|
||||||
@ -2000,7 +2004,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// static
|
// static
|
||||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
|
||||||
|
// CallOrConstructVarargs
|
||||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||||
Handle<CodeDataContainer> code) {
|
Handle<CodeDataContainer> code) {
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -3262,8 +3267,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
|
|
||||||
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||||
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
|
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
|
||||||
// purpose Code object) to be able to call into C functions that may trigger
|
// purpose InstructionStream object) to be able to call into C functions that
|
||||||
// GC and thus move the caller.
|
// may trigger GC and thus move the caller.
|
||||||
//
|
//
|
||||||
// DirectCEntry places the return address on the stack (updated by the GC),
|
// DirectCEntry places the return address on the stack (updated by the GC),
|
||||||
// making the call GC safe. The irregexp backend relies on this.
|
// making the call GC safe. The irregexp backend relies on this.
|
||||||
@ -3557,7 +3562,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
Register closure = r1;
|
Register closure = r1;
|
||||||
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
|
|
||||||
// Get the Code object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = r4;
|
Register code_obj = r4;
|
||||||
__ ldr(code_obj,
|
__ ldr(code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
@ -3588,7 +3593,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
AssertCodeDataContainerIsBaseline(masm, code_obj, r3);
|
AssertCodeDataContainerIsBaseline(masm, code_obj, r3);
|
||||||
}
|
}
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
|
||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = r2;
|
Register feedback_vector = r2;
|
||||||
@ -3663,9 +3668,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire());
|
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire());
|
||||||
Generate_OSREntry(masm, code_obj,
|
Generate_OSREntry(masm, code_obj,
|
||||||
Operand(Code::kHeaderSize - kHeapObjectTag));
|
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
} else {
|
} else {
|
||||||
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ add(code_obj, code_obj,
|
||||||
|
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
__ Jump(code_obj);
|
__ Jump(code_obj);
|
||||||
}
|
}
|
||||||
__ Trap(); // Unreachable.
|
__ Trap(); // Unreachable.
|
||||||
|
@ -1952,8 +1952,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
Label jump_to_optimized_code;
|
Label jump_to_optimized_code;
|
||||||
{
|
{
|
||||||
// If maybe_target_code is not null, no need to call into runtime. A
|
// If maybe_target_code is not null, no need to call into runtime. A
|
||||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
// precondition here is: if maybe_target_code is a InstructionStream object,
|
||||||
// be marked_for_deoptimization (callers must ensure this).
|
// it must NOT be marked_for_deoptimization (callers must ensure this).
|
||||||
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
|
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1994,13 +1994,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
__ LeaveFrame(StackFrame::STUB);
|
__ LeaveFrame(StackFrame::STUB);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(x0, x0);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(x0, x0);
|
||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedPointerField(
|
||||||
x1,
|
x1,
|
||||||
FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
|
FieldMemOperand(
|
||||||
|
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
|
|
||||||
// Load the OSR entrypoint offset from the deoptimization data.
|
// Load the OSR entrypoint offset from the deoptimization data.
|
||||||
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
|
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
|
||||||
@ -2011,7 +2012,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
// Compute the target address = code_obj + header_size + osr_offset
|
// Compute the target address = code_obj + header_size + osr_offset
|
||||||
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
|
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
|
||||||
__ Add(x0, x0, x1);
|
__ Add(x0, x0, x1);
|
||||||
Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
|
Generate_OSREntry(masm, x0, InstructionStream::kHeaderSize - kHeapObjectTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -2333,7 +2334,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// static
|
// static
|
||||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
|
||||||
|
// CallOrConstructVarargs
|
||||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||||
Handle<CodeDataContainer> code) {
|
Handle<CodeDataContainer> code) {
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -5377,8 +5379,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
|
|
||||||
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||||
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
|
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
|
||||||
// purpose Code object) to be able to call into C functions that may trigger
|
// purpose InstructionStream object) to be able to call into C functions that
|
||||||
// GC and thus move the caller.
|
// may trigger GC and thus move the caller.
|
||||||
//
|
//
|
||||||
// DirectCEntry places the return address on the stack (updated by the GC),
|
// DirectCEntry places the return address on the stack (updated by the GC),
|
||||||
// making the call GC safe. The irregexp backend relies on this.
|
// making the call GC safe. The irregexp backend relies on this.
|
||||||
@ -5693,7 +5695,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
Register closure = x1;
|
Register closure = x1;
|
||||||
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
|
|
||||||
// Get the Code object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = x22;
|
Register code_obj = x22;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedPointerField(
|
||||||
code_obj,
|
code_obj,
|
||||||
@ -5726,7 +5728,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
AssertCodeDataContainerIsBaseline(masm, code_obj, x3);
|
AssertCodeDataContainerIsBaseline(masm, code_obj, x3);
|
||||||
}
|
}
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
|
||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = x2;
|
Register feedback_vector = x2;
|
||||||
@ -5799,9 +5801,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
if (is_osr) {
|
if (is_osr) {
|
||||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||||
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
Generate_OSREntry(masm, code_obj,
|
||||||
|
InstructionStream::kHeaderSize - kHeapObjectTag);
|
||||||
} else {
|
} else {
|
||||||
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
__ Add(code_obj, code_obj, InstructionStream::kHeaderSize - kHeapObjectTag);
|
||||||
__ Jump(code_obj);
|
__ Jump(code_obj);
|
||||||
}
|
}
|
||||||
__ Trap(); // Unreachable.
|
__ Trap(); // Unreachable.
|
||||||
|
@ -229,7 +229,7 @@ type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
|
|||||||
type RawPtr<To: type> extends RawPtr;
|
type RawPtr<To: type> extends RawPtr;
|
||||||
type ExternalPointer
|
type ExternalPointer
|
||||||
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
|
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
|
||||||
extern class Code extends HeapObject;
|
extern class InstructionStream extends HeapObject;
|
||||||
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
|
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
|
||||||
|
|
||||||
type Number = Smi|HeapNumber;
|
type Number = Smi|HeapNumber;
|
||||||
|
@ -125,9 +125,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
|||||||
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
|
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
|
||||||
Goto(&maybe_use_sfi_code);
|
Goto(&maybe_use_sfi_code);
|
||||||
|
|
||||||
// At this point we have a candidate Code object. It's *not* a cached
|
// At this point we have a candidate InstructionStream object. It's *not* a
|
||||||
// optimized Code object (we'd have tail-called it above). A usual case would
|
// cached optimized InstructionStream object (we'd have tail-called it above).
|
||||||
// be the InterpreterEntryTrampoline to start executing existing bytecode.
|
// A usual case would be the InterpreterEntryTrampoline to start executing
|
||||||
|
// existing bytecode.
|
||||||
BIND(&maybe_use_sfi_code);
|
BIND(&maybe_use_sfi_code);
|
||||||
Label tailcall_code(this), baseline(this);
|
Label tailcall_code(this), baseline(this);
|
||||||
TVARIABLE(CodeDataContainer, code);
|
TVARIABLE(CodeDataContainer, code);
|
||||||
|
@ -285,7 +285,7 @@ Address Builtins::CppEntryOf(Builtin builtin) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
bool Builtins::IsBuiltin(const Code code) {
|
bool Builtins::IsBuiltin(const InstructionStream code) {
|
||||||
return Builtins::IsBuiltinId(code.builtin_id());
|
return Builtins::IsBuiltinId(code.builtin_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -399,7 +399,7 @@ constexpr int OffHeapTrampolineGenerator::kBufferSize;
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// static
|
// static
|
||||||
Handle<Code> Builtins::GenerateOffHeapTrampolineFor(
|
Handle<InstructionStream> Builtins::GenerateOffHeapTrampolineFor(
|
||||||
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
|
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
|
||||||
bool generate_jump_to_instruction_stream) {
|
bool generate_jump_to_instruction_stream) {
|
||||||
DCHECK_NOT_NULL(isolate->embedded_blob_code());
|
DCHECK_NOT_NULL(isolate->embedded_blob_code());
|
||||||
@ -429,14 +429,14 @@ Handle<ByteArray> Builtins::GenerateOffHeapTrampolineRelocInfo(
|
|||||||
|
|
||||||
Handle<ByteArray> reloc_info = isolate->factory()->NewByteArray(
|
Handle<ByteArray> reloc_info = isolate->factory()->NewByteArray(
|
||||||
desc.reloc_size, AllocationType::kReadOnly);
|
desc.reloc_size, AllocationType::kReadOnly);
|
||||||
Code::CopyRelocInfoToByteArray(*reloc_info, desc);
|
InstructionStream::CopyRelocInfoToByteArray(*reloc_info, desc);
|
||||||
|
|
||||||
return reloc_info;
|
return reloc_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
Handle<Code> Builtins::CreateInterpreterEntryTrampolineForProfiling(
|
Handle<InstructionStream>
|
||||||
Isolate* isolate) {
|
Builtins::CreateInterpreterEntryTrampolineForProfiling(Isolate* isolate) {
|
||||||
DCHECK_NOT_NULL(isolate->embedded_blob_code());
|
DCHECK_NOT_NULL(isolate->embedded_blob_code());
|
||||||
DCHECK_NE(0, isolate->embedded_blob_code_size());
|
DCHECK_NE(0, isolate->embedded_blob_code_size());
|
||||||
|
|
||||||
@ -525,18 +525,19 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
|
|||||||
// static
|
// static
|
||||||
bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
|
bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
|
||||||
// If the runtime/optimized code always knows when executing a given builtin
|
// If the runtime/optimized code always knows when executing a given builtin
|
||||||
// that it is a builtin, then that builtin does not need an executable Code
|
// that it is a builtin, then that builtin does not need an executable
|
||||||
// object. Such Code objects can go in read_only_space (and can even be
|
// InstructionStream object. Such InstructionStream objects can go in
|
||||||
// smaller with no branch instruction), thus saving memory.
|
// read_only_space (and can even be smaller with no branch instruction), thus
|
||||||
|
// saving memory.
|
||||||
|
|
||||||
// Builtins with JS linkage will always have executable Code objects since
|
// Builtins with JS linkage will always have executable InstructionStream
|
||||||
// they can be called directly from jitted code with no way of determining
|
// objects since they can be called directly from jitted code with no way of
|
||||||
// that they are builtins at generation time. E.g.
|
// determining that they are builtins at generation time. E.g.
|
||||||
// f = Array.of;
|
// f = Array.of;
|
||||||
// f(1, 2, 3);
|
// f(1, 2, 3);
|
||||||
// TODO(delphick): This is probably too loose but for now Wasm can call any JS
|
// TODO(delphick): This is probably too loose but for now Wasm can call any JS
|
||||||
// linkage builtin via its Code object. Once Wasm is fixed this can either be
|
// linkage builtin via its InstructionStream object. Once Wasm is fixed this
|
||||||
// tighted or removed completely.
|
// can either be tighted or removed completely.
|
||||||
if (Builtins::KindOf(builtin) != BCH && HasJSLinkage(builtin)) {
|
if (Builtins::KindOf(builtin) != BCH && HasJSLinkage(builtin)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -175,8 +175,8 @@ class Builtins {
|
|||||||
static bool IsCpp(Builtin builtin);
|
static bool IsCpp(Builtin builtin);
|
||||||
|
|
||||||
// True, iff the given code object is a builtin. Note that this does not
|
// True, iff the given code object is a builtin. Note that this does not
|
||||||
// necessarily mean that its kind is Code::BUILTIN.
|
// necessarily mean that its kind is InstructionStream::BUILTIN.
|
||||||
static bool IsBuiltin(const Code code);
|
static bool IsBuiltin(const InstructionStream code);
|
||||||
|
|
||||||
// As above, but safe to access off the main thread since the check is done
|
// As above, but safe to access off the main thread since the check is done
|
||||||
// by handle location. Similar to Heap::IsRootHandle.
|
// by handle location. Similar to Heap::IsRootHandle.
|
||||||
@ -232,7 +232,7 @@ class Builtins {
|
|||||||
// function.
|
// function.
|
||||||
// TODO(delphick): Come up with a better name since it may not generate an
|
// TODO(delphick): Come up with a better name since it may not generate an
|
||||||
// executable trampoline.
|
// executable trampoline.
|
||||||
static Handle<Code> GenerateOffHeapTrampolineFor(
|
static Handle<InstructionStream> GenerateOffHeapTrampolineFor(
|
||||||
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
|
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
|
||||||
bool generate_jump_to_instruction_stream);
|
bool generate_jump_to_instruction_stream);
|
||||||
|
|
||||||
@ -241,12 +241,12 @@ class Builtins {
|
|||||||
static Handle<ByteArray> GenerateOffHeapTrampolineRelocInfo(Isolate* isolate);
|
static Handle<ByteArray> GenerateOffHeapTrampolineRelocInfo(Isolate* isolate);
|
||||||
|
|
||||||
// Creates a copy of InterpreterEntryTrampolineForProfiling in the code space.
|
// Creates a copy of InterpreterEntryTrampolineForProfiling in the code space.
|
||||||
static Handle<Code> CreateInterpreterEntryTrampolineForProfiling(
|
static Handle<InstructionStream> CreateInterpreterEntryTrampolineForProfiling(
|
||||||
Isolate* isolate);
|
Isolate* isolate);
|
||||||
|
|
||||||
// Only builtins with JS linkage should ever need to be called via their
|
// Only builtins with JS linkage should ever need to be called via their
|
||||||
// trampoline Code object. The remaining builtins have non-executable Code
|
// trampoline InstructionStream object. The remaining builtins have
|
||||||
// objects.
|
// non-executable InstructionStream objects.
|
||||||
static bool CodeObjectIsExecutable(Builtin builtin);
|
static bool CodeObjectIsExecutable(Builtin builtin);
|
||||||
|
|
||||||
static bool IsJSEntryVariant(Builtin builtin) {
|
static bool IsJSEntryVariant(Builtin builtin) {
|
||||||
@ -336,8 +336,8 @@ class Builtins {
|
|||||||
};
|
};
|
||||||
|
|
||||||
V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
|
V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
|
||||||
// Check for kNoBuiltinId first to abort early when the current Code object
|
// Check for kNoBuiltinId first to abort early when the current
|
||||||
// is not a builtin.
|
// InstructionStream object is not a builtin.
|
||||||
return builtin_id != Builtin::kNoBuiltinId &&
|
return builtin_id != Builtin::kNoBuiltinId &&
|
||||||
(builtin_id == Builtin::kInterpreterEntryTrampoline ||
|
(builtin_id == Builtin::kInterpreterEntryTrampoline ||
|
||||||
builtin_id == Builtin::kInterpreterEnterAtBytecode ||
|
builtin_id == Builtin::kInterpreterEnterAtBytecode ||
|
||||||
@ -345,8 +345,8 @@ V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) {
|
V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) {
|
||||||
// Check for kNoBuiltinId first to abort early when the current Code object
|
// Check for kNoBuiltinId first to abort early when the current
|
||||||
// is not a builtin.
|
// InstructionStream object is not a builtin.
|
||||||
return builtin_id != Builtin::kNoBuiltinId &&
|
return builtin_id != Builtin::kNoBuiltinId &&
|
||||||
(builtin_id == Builtin::kBaselineOutOfLinePrologue ||
|
(builtin_id == Builtin::kBaselineOutOfLinePrologue ||
|
||||||
builtin_id == Builtin::kBaselineOutOfLinePrologueDeopt ||
|
builtin_id == Builtin::kBaselineOutOfLinePrologueDeopt ||
|
||||||
|
@ -30,8 +30,8 @@ macro IsCell(o: HeapObject): bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@export
|
@export
|
||||||
macro IsCode(o: HeapObject): bool {
|
macro IsInstructionStream(o: HeapObject): bool {
|
||||||
return Is<Code>(o);
|
return Is<InstructionStream>(o);
|
||||||
}
|
}
|
||||||
|
|
||||||
@export
|
@export
|
||||||
|
@ -43,7 +43,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
|
|||||||
|
|
||||||
// All code objects should be loaded through the root register or use
|
// All code objects should be loaded through the root register or use
|
||||||
// pc-relative addressing.
|
// pc-relative addressing.
|
||||||
DCHECK(!object->IsCode());
|
DCHECK(!object->IsInstructionStream());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
auto find_result = map_.FindOrInsert(object);
|
auto find_result = map_.FindOrInsert(object);
|
||||||
@ -73,7 +73,7 @@ void CheckPreconditionsForPatching(Isolate* isolate,
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void BuiltinsConstantsTableBuilder::PatchSelfReference(
|
void BuiltinsConstantsTableBuilder::PatchSelfReference(
|
||||||
Handle<Object> self_reference, Handle<Code> code_object) {
|
Handle<Object> self_reference, Handle<InstructionStream> code_object) {
|
||||||
CheckPreconditionsForPatching(isolate_, code_object);
|
CheckPreconditionsForPatching(isolate_, code_object);
|
||||||
DCHECK(self_reference->IsOddball());
|
DCHECK(self_reference->IsOddball());
|
||||||
DCHECK(Oddball::cast(*self_reference).kind() ==
|
DCHECK(Oddball::cast(*self_reference).kind() ==
|
||||||
@ -81,7 +81,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
|
|||||||
|
|
||||||
uint32_t key;
|
uint32_t key;
|
||||||
if (map_.Delete(self_reference, &key)) {
|
if (map_.Delete(self_reference, &key)) {
|
||||||
DCHECK(code_object->IsCode());
|
DCHECK(code_object->IsInstructionStream());
|
||||||
map_.Insert(code_object, key);
|
map_.Insert(code_object, key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,12 +115,13 @@ void BuiltinsConstantsTableBuilder::Finalize() {
|
|||||||
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
|
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
|
||||||
uint32_t index = *it.entry();
|
uint32_t index = *it.entry();
|
||||||
Object value = it.key();
|
Object value = it.key();
|
||||||
if (value.IsCode() && Code::cast(value).kind() == CodeKind::BUILTIN) {
|
if (value.IsInstructionStream() &&
|
||||||
|
InstructionStream::cast(value).kind() == CodeKind::BUILTIN) {
|
||||||
// Replace placeholder code objects with the real builtin.
|
// Replace placeholder code objects with the real builtin.
|
||||||
// See also: SetupIsolateDelegate::PopulateWithPlaceholders.
|
// See also: SetupIsolateDelegate::PopulateWithPlaceholders.
|
||||||
// TODO(jgruber): Deduplicate placeholders and their corresponding
|
// TODO(jgruber): Deduplicate placeholders and their corresponding
|
||||||
// builtin.
|
// builtin.
|
||||||
value = builtins->code(Code::cast(value).builtin_id());
|
value = builtins->code(InstructionStream::cast(value).builtin_id());
|
||||||
}
|
}
|
||||||
DCHECK(value.IsHeapObject());
|
DCHECK(value.IsHeapObject());
|
||||||
table->set(index, value);
|
table->set(index, value);
|
||||||
|
@ -33,10 +33,10 @@ class BuiltinsConstantsTableBuilder final {
|
|||||||
uint32_t AddObject(Handle<Object> object);
|
uint32_t AddObject(Handle<Object> object);
|
||||||
|
|
||||||
// Self-references during code generation start out by referencing a handle
|
// Self-references during code generation start out by referencing a handle
|
||||||
// with a temporary dummy object. Once the final Code object exists, such
|
// with a temporary dummy object. Once the final InstructionStream object
|
||||||
// entries in the constants map must be patched up.
|
// exists, such entries in the constants map must be patched up.
|
||||||
void PatchSelfReference(Handle<Object> self_reference,
|
void PatchSelfReference(Handle<Object> self_reference,
|
||||||
Handle<Code> code_object);
|
Handle<InstructionStream> code_object);
|
||||||
|
|
||||||
// References to the array that stores basic block usage counters start out as
|
// References to the array that stores basic block usage counters start out as
|
||||||
// references to a unique oddball. Once the actual array has been allocated,
|
// references to a unique oddball. Once the actual array has been allocated,
|
||||||
|
@ -2052,7 +2052,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// static
|
// static
|
||||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
|
||||||
|
// CallOrConstructVarargs
|
||||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||||
Handle<CodeDataContainer> code) {
|
Handle<CodeDataContainer> code) {
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2683,8 +2684,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
Label jump_to_optimized_code;
|
Label jump_to_optimized_code;
|
||||||
{
|
{
|
||||||
// If maybe_target_code is not null, no need to call into runtime. A
|
// If maybe_target_code is not null, no need to call into runtime. A
|
||||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
// precondition here is: if maybe_target_code is a InstructionStream object,
|
||||||
// be marked_for_deoptimization (callers must ensure this).
|
// it must NOT be marked_for_deoptimization (callers must ensure this).
|
||||||
__ cmp(maybe_target_code, Immediate(0));
|
__ cmp(maybe_target_code, Immediate(0));
|
||||||
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
||||||
}
|
}
|
||||||
@ -2727,11 +2728,13 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
__ leave();
|
__ leave();
|
||||||
}
|
}
|
||||||
|
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(eax, eax);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(eax, eax);
|
||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
__ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset -
|
__ mov(ecx,
|
||||||
kHeapObjectTag));
|
Operand(eax,
|
||||||
|
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
|
||||||
|
kHeapObjectTag));
|
||||||
|
|
||||||
// Load the OSR entrypoint offset from the deoptimization data.
|
// Load the OSR entrypoint offset from the deoptimization data.
|
||||||
__ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
|
__ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
|
||||||
@ -2740,7 +2743,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
__ SmiUntag(ecx);
|
__ SmiUntag(ecx);
|
||||||
|
|
||||||
// Compute the target address = code_obj + header_size + osr_offset
|
// Compute the target address = code_obj + header_size + osr_offset
|
||||||
__ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
|
__ lea(eax, Operand(eax, ecx, times_1,
|
||||||
|
InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
Generate_OSREntry(masm, eax);
|
Generate_OSREntry(masm, eax);
|
||||||
}
|
}
|
||||||
@ -4035,7 +4039,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
|
|||||||
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
|
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
|
||||||
__ mov(Operand(esp, 1 * kSystemPointerSize),
|
__ mov(Operand(esp, 1 * kSystemPointerSize),
|
||||||
Immediate(static_cast<int>(deopt_kind)));
|
Immediate(static_cast<int>(deopt_kind)));
|
||||||
__ mov(Operand(esp, 2 * kSystemPointerSize), ecx); // Code address or 0.
|
__ mov(Operand(esp, 2 * kSystemPointerSize),
|
||||||
|
ecx); // InstructionStream address or 0.
|
||||||
__ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta.
|
__ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta.
|
||||||
__ Move(Operand(esp, 4 * kSystemPointerSize),
|
__ Move(Operand(esp, 4 * kSystemPointerSize),
|
||||||
Immediate(ExternalReference::isolate_address(masm->isolate())));
|
Immediate(ExternalReference::isolate_address(masm->isolate())));
|
||||||
@ -4197,7 +4202,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
Register closure = eax;
|
Register closure = eax;
|
||||||
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
|
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
|
||||||
|
|
||||||
// Get the Code object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = esi;
|
Register code_obj = esi;
|
||||||
__ mov(code_obj,
|
__ mov(code_obj,
|
||||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
@ -4230,7 +4235,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
AssertCodeDataContainerIsBaseline(masm, code_obj, ecx);
|
AssertCodeDataContainerIsBaseline(masm, code_obj, ecx);
|
||||||
}
|
}
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
|
||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = ecx;
|
Register feedback_vector = ecx;
|
||||||
@ -4296,8 +4301,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
kInterpreterBytecodeArrayRegister);
|
kInterpreterBytecodeArrayRegister);
|
||||||
__ CallCFunction(get_baseline_pc, 3);
|
__ CallCFunction(get_baseline_pc, 3);
|
||||||
}
|
}
|
||||||
__ lea(code_obj,
|
__ lea(code_obj, FieldOperand(code_obj, kReturnRegister0, times_1,
|
||||||
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
InstructionStream::kHeaderSize));
|
||||||
__ pop(kInterpreterAccumulatorRegister);
|
__ pop(kInterpreterAccumulatorRegister);
|
||||||
|
|
||||||
if (is_osr) {
|
if (is_osr) {
|
||||||
|
@ -52,8 +52,9 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) {
|
|||||||
// PC-relative call/jump instructions can be used for builtin to builtin
|
// PC-relative call/jump instructions can be used for builtin to builtin
|
||||||
// calls/tail calls. The embedded builtins blob generator also ensures that.
|
// calls/tail calls. The embedded builtins blob generator also ensures that.
|
||||||
// However, there are serializer tests, where we force isolate creation at
|
// However, there are serializer tests, where we force isolate creation at
|
||||||
// runtime and at this point, Code space isn't restricted to a size s.t.
|
// runtime and at this point, Code space isn't restricted to a
|
||||||
// PC-relative calls may be used. So, we fall back to an indirect mode.
|
// size s.t. PC-relative calls may be used. So, we fall back to an indirect
|
||||||
|
// mode.
|
||||||
options.use_pc_relative_calls_and_jumps_for_mksnapshot =
|
options.use_pc_relative_calls_and_jumps_for_mksnapshot =
|
||||||
pc_relative_calls_fit_in_code_range;
|
pc_relative_calls_fit_in_code_range;
|
||||||
|
|
||||||
@ -75,7 +76,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) {
|
|||||||
using MacroAssemblerGenerator = void (*)(MacroAssembler*);
|
using MacroAssemblerGenerator = void (*)(MacroAssembler*);
|
||||||
using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*);
|
using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*);
|
||||||
|
|
||||||
Handle<Code> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
|
Handle<InstructionStream> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
|
||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
byte buffer[kBufferSize];
|
byte buffer[kBufferSize];
|
||||||
MacroAssembler masm(isolate, CodeObjectRequired::kYes,
|
MacroAssembler masm(isolate, CodeObjectRequired::kYes,
|
||||||
@ -90,16 +91,17 @@ Handle<Code> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
|
|||||||
}
|
}
|
||||||
CodeDesc desc;
|
CodeDesc desc;
|
||||||
masm.GetCode(isolate, &desc);
|
masm.GetCode(isolate, &desc);
|
||||||
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
|
Handle<InstructionStream> code =
|
||||||
.set_self_reference(masm.CodeObject())
|
Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
|
||||||
.set_builtin(builtin)
|
.set_self_reference(masm.CodeObject())
|
||||||
.Build();
|
.set_builtin(builtin)
|
||||||
|
.Build();
|
||||||
return scope.CloseAndEscape(code);
|
return scope.CloseAndEscape(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin,
|
InstructionStream BuildWithMacroAssembler(Isolate* isolate, Builtin builtin,
|
||||||
MacroAssemblerGenerator generator,
|
MacroAssemblerGenerator generator,
|
||||||
const char* s_name) {
|
const char* s_name) {
|
||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
// Canonicalize handles, so that we can share constant pool entries pointing
|
// Canonicalize handles, so that we can share constant pool entries pointing
|
||||||
// to code targets without dereferencing their handles.
|
// to code targets without dereferencing their handles.
|
||||||
@ -130,18 +132,19 @@ Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin,
|
|||||||
masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable,
|
masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable,
|
||||||
handler_table_offset);
|
handler_table_offset);
|
||||||
|
|
||||||
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
|
Handle<InstructionStream> code =
|
||||||
.set_self_reference(masm.CodeObject())
|
Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
|
||||||
.set_builtin(builtin)
|
.set_self_reference(masm.CodeObject())
|
||||||
.Build();
|
.set_builtin(builtin)
|
||||||
|
.Build();
|
||||||
#if defined(V8_OS_WIN64)
|
#if defined(V8_OS_WIN64)
|
||||||
isolate->SetBuiltinUnwindData(builtin, masm.GetUnwindInfo());
|
isolate->SetBuiltinUnwindData(builtin, masm.GetUnwindInfo());
|
||||||
#endif // V8_OS_WIN64
|
#endif // V8_OS_WIN64
|
||||||
return *code;
|
return *code;
|
||||||
}
|
}
|
||||||
|
|
||||||
Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address,
|
InstructionStream BuildAdaptor(Isolate* isolate, Builtin builtin,
|
||||||
const char* name) {
|
Address builtin_address, const char* name) {
|
||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
// Canonicalize handles, so that we can share constant pool entries pointing
|
// Canonicalize handles, so that we can share constant pool entries pointing
|
||||||
// to code targets without dereferencing their handles.
|
// to code targets without dereferencing their handles.
|
||||||
@ -155,17 +158,19 @@ Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address,
|
|||||||
Builtins::Generate_Adaptor(&masm, builtin_address);
|
Builtins::Generate_Adaptor(&masm, builtin_address);
|
||||||
CodeDesc desc;
|
CodeDesc desc;
|
||||||
masm.GetCode(isolate, &desc);
|
masm.GetCode(isolate, &desc);
|
||||||
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
|
Handle<InstructionStream> code =
|
||||||
.set_self_reference(masm.CodeObject())
|
Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
|
||||||
.set_builtin(builtin)
|
.set_self_reference(masm.CodeObject())
|
||||||
.Build();
|
.set_builtin(builtin)
|
||||||
|
.Build();
|
||||||
return *code;
|
return *code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builder for builtins implemented in TurboFan with JS linkage.
|
// Builder for builtins implemented in TurboFan with JS linkage.
|
||||||
Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
|
InstructionStream BuildWithCodeStubAssemblerJS(Isolate* isolate,
|
||||||
CodeAssemblerGenerator generator, int argc,
|
Builtin builtin,
|
||||||
const char* name) {
|
CodeAssemblerGenerator generator,
|
||||||
|
int argc, const char* name) {
|
||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
// Canonicalize handles, so that we can share constant pool entries pointing
|
// Canonicalize handles, so that we can share constant pool entries pointing
|
||||||
// to code targets without dereferencing their handles.
|
// to code targets without dereferencing their handles.
|
||||||
@ -175,17 +180,16 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
|
|||||||
compiler::CodeAssemblerState state(isolate, &zone, argc, CodeKind::BUILTIN,
|
compiler::CodeAssemblerState state(isolate, &zone, argc, CodeKind::BUILTIN,
|
||||||
name, builtin);
|
name, builtin);
|
||||||
generator(&state);
|
generator(&state);
|
||||||
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
|
Handle<InstructionStream> code = compiler::CodeAssembler::GenerateCode(
|
||||||
&state, BuiltinAssemblerOptions(isolate, builtin),
|
&state, BuiltinAssemblerOptions(isolate, builtin),
|
||||||
ProfileDataFromFile::TryRead(name));
|
ProfileDataFromFile::TryRead(name));
|
||||||
return *code;
|
return *code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builder for builtins implemented in TurboFan with CallStub linkage.
|
// Builder for builtins implemented in TurboFan with CallStub linkage.
|
||||||
Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
|
InstructionStream BuildWithCodeStubAssemblerCS(
|
||||||
CodeAssemblerGenerator generator,
|
Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
|
||||||
CallDescriptors::Key interface_descriptor,
|
CallDescriptors::Key interface_descriptor, const char* name) {
|
||||||
const char* name) {
|
|
||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
// Canonicalize handles, so that we can share constant pool entries pointing
|
// Canonicalize handles, so that we can share constant pool entries pointing
|
||||||
// to code targets without dereferencing their handles.
|
// to code targets without dereferencing their handles.
|
||||||
@ -199,7 +203,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
|
|||||||
compiler::CodeAssemblerState state(isolate, &zone, descriptor,
|
compiler::CodeAssemblerState state(isolate, &zone, descriptor,
|
||||||
CodeKind::BUILTIN, name, builtin);
|
CodeKind::BUILTIN, name, builtin);
|
||||||
generator(&state);
|
generator(&state);
|
||||||
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
|
Handle<InstructionStream> code = compiler::CodeAssembler::GenerateCode(
|
||||||
&state, BuiltinAssemblerOptions(isolate, builtin),
|
&state, BuiltinAssemblerOptions(isolate, builtin),
|
||||||
ProfileDataFromFile::TryRead(name));
|
ProfileDataFromFile::TryRead(name));
|
||||||
return *code;
|
return *code;
|
||||||
@ -209,7 +213,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
|
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
|
||||||
Code code) {
|
InstructionStream code) {
|
||||||
DCHECK_EQ(builtin, code.builtin_id());
|
DCHECK_EQ(builtin, code.builtin_id());
|
||||||
builtins->set_code(builtin, ToCodeDataContainer(code));
|
builtins->set_code(builtin, ToCodeDataContainer(code));
|
||||||
}
|
}
|
||||||
@ -223,7 +227,7 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
|
|||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
|
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
|
||||||
++builtin) {
|
++builtin) {
|
||||||
Handle<Code> placeholder = BuildPlaceholder(isolate, builtin);
|
Handle<InstructionStream> placeholder = BuildPlaceholder(isolate, builtin);
|
||||||
AddBuiltin(builtins, builtin, *placeholder);
|
AddBuiltin(builtins, builtin, *placeholder);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -242,14 +246,15 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
|
|||||||
PtrComprCageBase cage_base(isolate);
|
PtrComprCageBase cage_base(isolate);
|
||||||
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
|
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
|
||||||
++builtin) {
|
++builtin) {
|
||||||
Code code = FromCodeDataContainer(builtins->code(builtin));
|
InstructionStream code = FromCodeDataContainer(builtins->code(builtin));
|
||||||
isolate->heap()->UnprotectAndRegisterMemoryChunk(
|
isolate->heap()->UnprotectAndRegisterMemoryChunk(
|
||||||
code, UnprotectMemoryOrigin::kMainThread);
|
code, UnprotectMemoryOrigin::kMainThread);
|
||||||
bool flush_icache = false;
|
bool flush_icache = false;
|
||||||
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
|
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
|
||||||
RelocInfo* rinfo = it.rinfo();
|
RelocInfo* rinfo = it.rinfo();
|
||||||
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
|
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
|
||||||
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
|
InstructionStream target = InstructionStream::GetCodeFromTargetAddress(
|
||||||
|
rinfo->target_address());
|
||||||
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
|
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
|
||||||
Builtins::IsIsolateIndependent(target.builtin_id()));
|
Builtins::IsIsolateIndependent(target.builtin_id()));
|
||||||
if (!target.is_builtin()) continue;
|
if (!target.is_builtin()) continue;
|
||||||
@ -277,11 +282,11 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
Code GenerateBytecodeHandler(Isolate* isolate, Builtin builtin,
|
InstructionStream GenerateBytecodeHandler(
|
||||||
interpreter::OperandScale operand_scale,
|
Isolate* isolate, Builtin builtin, interpreter::OperandScale operand_scale,
|
||||||
interpreter::Bytecode bytecode) {
|
interpreter::Bytecode bytecode) {
|
||||||
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
|
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
|
||||||
Handle<Code> code = interpreter::GenerateBytecodeHandler(
|
Handle<InstructionStream> code = interpreter::GenerateBytecodeHandler(
|
||||||
isolate, Builtins::name(builtin), bytecode, operand_scale, builtin,
|
isolate, Builtins::name(builtin), bytecode, operand_scale, builtin,
|
||||||
BuiltinAssemblerOptions(isolate, builtin));
|
BuiltinAssemblerOptions(isolate, builtin));
|
||||||
return *code;
|
return *code;
|
||||||
@ -300,7 +305,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
|
|||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
|
|
||||||
int index = 0;
|
int index = 0;
|
||||||
Code code;
|
InstructionStream code;
|
||||||
#define BUILD_CPP(Name) \
|
#define BUILD_CPP(Name) \
|
||||||
code = BuildAdaptor(isolate, Builtin::k##Name, \
|
code = BuildAdaptor(isolate, Builtin::k##Name, \
|
||||||
FUNCTION_ADDR(Builtin_##Name), #Name); \
|
FUNCTION_ADDR(Builtin_##Name), #Name); \
|
||||||
|
@ -504,7 +504,7 @@ builtin WasmI64AtomicWait(
|
|||||||
|
|
||||||
// Type feedback collection support for `call_ref`.
|
// Type feedback collection support for `call_ref`.
|
||||||
|
|
||||||
extern macro GetCodeEntry(Code): RawPtr;
|
extern macro GetCodeEntry(InstructionStream): RawPtr;
|
||||||
extern macro GetCodeEntry(CodeDataContainer): RawPtr;
|
extern macro GetCodeEntry(CodeDataContainer): RawPtr;
|
||||||
|
|
||||||
struct TargetAndInstance {
|
struct TargetAndInstance {
|
||||||
|
@ -2046,7 +2046,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// static
|
// static
|
||||||
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
|
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
|
||||||
|
// CallOrConstructVarargs
|
||||||
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
||||||
Handle<CodeDataContainer> code) {
|
Handle<CodeDataContainer> code) {
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2621,8 +2622,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
Label jump_to_optimized_code;
|
Label jump_to_optimized_code;
|
||||||
{
|
{
|
||||||
// If maybe_target_code is not null, no need to call into runtime. A
|
// If maybe_target_code is not null, no need to call into runtime. A
|
||||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
// precondition here is: if maybe_target_code is a InstructionStream object,
|
||||||
// be marked_for_deoptimization (callers must ensure this).
|
// it must NOT be marked_for_deoptimization (callers must ensure this).
|
||||||
__ testq(maybe_target_code, maybe_target_code);
|
__ testq(maybe_target_code, maybe_target_code);
|
||||||
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
||||||
}
|
}
|
||||||
@ -2673,13 +2674,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
__ leave();
|
__ leave();
|
||||||
}
|
}
|
||||||
|
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(rax, rax);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(rax, rax);
|
||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
const TaggedRegister deopt_data(rbx);
|
const TaggedRegister deopt_data(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedPointerField(
|
||||||
deopt_data,
|
deopt_data,
|
||||||
FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
|
FieldOperand(
|
||||||
|
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
|
|
||||||
// Load the OSR entrypoint offset from the deoptimization data.
|
// Load the OSR entrypoint offset from the deoptimization data.
|
||||||
__ SmiUntagField(
|
__ SmiUntagField(
|
||||||
@ -2688,7 +2690,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
DeoptimizationData::kOsrPcOffsetIndex)));
|
DeoptimizationData::kOsrPcOffsetIndex)));
|
||||||
|
|
||||||
// Compute the target address = code_obj + header_size + osr_offset
|
// Compute the target address = code_obj + header_size + osr_offset
|
||||||
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
|
__ leaq(rax, FieldOperand(rax, rbx, times_1, InstructionStream::kHeaderSize));
|
||||||
|
|
||||||
Generate_OSREntry(masm, rax);
|
Generate_OSREntry(masm, rax);
|
||||||
}
|
}
|
||||||
@ -2772,13 +2774,14 @@ void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
// before deoptimizing.
|
// before deoptimizing.
|
||||||
{
|
{
|
||||||
static constexpr int kCodeStartToCodeDataContainerOffset =
|
static constexpr int kCodeStartToCodeDataContainerOffset =
|
||||||
Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
InstructionStream::kCodeDataContainerOffset -
|
||||||
|
InstructionStream::kHeaderSize;
|
||||||
__ LoadTaggedPointerField(scratch0,
|
__ LoadTaggedPointerField(scratch0,
|
||||||
Operand(kJavaScriptCallCodeStartRegister,
|
Operand(kJavaScriptCallCodeStartRegister,
|
||||||
kCodeStartToCodeDataContainerOffset));
|
kCodeStartToCodeDataContainerOffset));
|
||||||
__ testl(
|
__ testl(
|
||||||
FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset),
|
FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
__ j(not_zero, &deoptimize);
|
__ j(not_zero, &deoptimize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5330,7 +5333,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
Register closure = rdi;
|
Register closure = rdi;
|
||||||
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||||
|
|
||||||
// Get the Code object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = rbx;
|
Register code_obj = rbx;
|
||||||
TaggedRegister shared_function_info(code_obj);
|
TaggedRegister shared_function_info(code_obj);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedPointerField(
|
||||||
@ -5364,7 +5367,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
AssertCodeDataContainerIsBaseline(masm, code_obj, r11);
|
AssertCodeDataContainerIsBaseline(masm, code_obj, r11);
|
||||||
}
|
}
|
||||||
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
|
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
|
||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = r11;
|
Register feedback_vector = r11;
|
||||||
@ -5431,8 +5434,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
__ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
__ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||||
__ CallCFunction(get_baseline_pc, 3);
|
__ CallCFunction(get_baseline_pc, 3);
|
||||||
}
|
}
|
||||||
__ leaq(code_obj,
|
__ leaq(code_obj, FieldOperand(code_obj, kReturnRegister0, times_1,
|
||||||
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
InstructionStream::kHeaderSize));
|
||||||
__ popq(kInterpreterAccumulatorRegister);
|
__ popq(kInterpreterAccumulatorRegister);
|
||||||
|
|
||||||
if (is_osr) {
|
if (is_osr) {
|
||||||
|
@ -190,7 +190,7 @@ void Assembler::emit(Instr x) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::deserialization_set_special_target_at(
|
void Assembler::deserialization_set_special_target_at(
|
||||||
Address constant_pool_entry, Code code, Address target) {
|
Address constant_pool_entry, InstructionStream code, Address target) {
|
||||||
DCHECK(!Builtins::IsIsolateIndependentBuiltin(code));
|
DCHECK(!Builtins::IsIsolateIndependentBuiltin(code));
|
||||||
Memory<Address>(constant_pool_entry) = target;
|
Memory<Address>(constant_pool_entry) = target;
|
||||||
}
|
}
|
||||||
|
@ -553,13 +553,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
|
|||||||
SafepointTableBuilder* safepoint_table_builder,
|
SafepointTableBuilder* safepoint_table_builder,
|
||||||
int handler_table_offset) {
|
int handler_table_offset) {
|
||||||
// As a crutch to avoid having to add manual Align calls wherever we use a
|
// As a crutch to avoid having to add manual Align calls wherever we use a
|
||||||
// raw workflow to create Code objects (mostly in tests), add another Align
|
// raw workflow to create InstructionStream objects (mostly in tests), add
|
||||||
// call here. It does no harm - the end of the Code object is aligned to the
|
// another Align call here. It does no harm - the end of the InstructionStream
|
||||||
// (larger) kCodeAlignment anyways.
|
// object is aligned to the (larger) kCodeAlignment anyways.
|
||||||
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
||||||
// metadata table builders (safepoint, handler, constant pool, code
|
// metadata table builders (safepoint, handler, constant pool, code
|
||||||
// comments).
|
// comments).
|
||||||
DataAlign(Code::kMetadataAlignment);
|
DataAlign(InstructionStream::kMetadataAlignment);
|
||||||
|
|
||||||
// Emit constant pool if necessary.
|
// Emit constant pool if necessary.
|
||||||
CheckConstPool(true, false);
|
CheckConstPool(true, false);
|
||||||
@ -831,7 +831,8 @@ void Assembler::target_at_put(int pos, int target_pos) {
|
|||||||
// orr dst, dst, #target8_1 << 8
|
// orr dst, dst, #target8_1 << 8
|
||||||
// orr dst, dst, #target8_2 << 16
|
// orr dst, dst, #target8_2 << 16
|
||||||
|
|
||||||
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
|
uint32_t target24 =
|
||||||
|
target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
|
||||||
CHECK(is_uint24(target24));
|
CHECK(is_uint24(target24));
|
||||||
if (is_uint8(target24)) {
|
if (is_uint8(target24)) {
|
||||||
// If the target fits in a byte then only patch with a mov
|
// If the target fits in a byte then only patch with a mov
|
||||||
@ -1635,7 +1636,8 @@ void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
|
|||||||
|
|
||||||
void Assembler::mov_label_offset(Register dst, Label* label) {
|
void Assembler::mov_label_offset(Register dst, Label* label) {
|
||||||
if (label->is_bound()) {
|
if (label->is_bound()) {
|
||||||
mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
|
mov(dst, Operand(label->pos() +
|
||||||
|
(InstructionStream::kHeaderSize - kHeapObjectTag)));
|
||||||
} else {
|
} else {
|
||||||
// Emit the link to the label in the code stream followed by extra nop
|
// Emit the link to the label in the code stream followed by extra nop
|
||||||
// instructions.
|
// instructions.
|
||||||
@ -5252,7 +5254,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
|
|||||||
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||||
if (!ShouldRecordRelocInfo(rmode)) return;
|
if (!ShouldRecordRelocInfo(rmode)) return;
|
||||||
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
|
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
|
||||||
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
|
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
|
||||||
|
InstructionStream());
|
||||||
reloc_info_writer.Write(&rinfo);
|
reloc_info_writer.Write(&rinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,7 +367,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
// This sets the branch destination (which is in the constant pool on ARM).
|
// This sets the branch destination (which is in the constant pool on ARM).
|
||||||
// This is for calls and branches within generated code.
|
// This is for calls and branches within generated code.
|
||||||
inline static void deserialization_set_special_target_at(
|
inline static void deserialization_set_special_target_at(
|
||||||
Address constant_pool_entry, Code code, Address target);
|
Address constant_pool_entry, InstructionStream code, Address target);
|
||||||
|
|
||||||
// Get the size of the special target encoded at 'location'.
|
// Get the size of the special target encoded at 'location'.
|
||||||
inline static int deserialization_special_target_size(Address location);
|
inline static int deserialization_special_target_size(Address location);
|
||||||
@ -388,7 +388,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Code generation
|
// InstructionStream generation
|
||||||
|
|
||||||
// Insert the smallest number of nop instructions
|
// Insert the smallest number of nop instructions
|
||||||
// possible to align the pc offset to a multiple
|
// possible to align the pc offset to a multiple
|
||||||
@ -1252,7 +1252,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
|
|
||||||
inline void emit(Instr x);
|
inline void emit(Instr x);
|
||||||
|
|
||||||
// Code generation
|
// InstructionStream generation
|
||||||
// The relocation writer's position is at least kGap bytes below the end of
|
// The relocation writer's position is at least kGap bytes below the end of
|
||||||
// the generated instructions. This is so that multi-instruction sequences do
|
// the generated instructions. This is so that multi-instruction sequences do
|
||||||
// not have to check for overflow. The same is true for writes of large
|
// not have to check for overflow. The same is true for writes of large
|
||||||
|
@ -348,13 +348,14 @@ void TurboAssembler::LoadCodeDataContainerEntry(
|
|||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register destination, Register code_data_container_object) {
|
Register destination, Register code_data_container_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Compute the Code object pointer from the code entry point.
|
// Compute the InstructionStream object pointer from the code entry point.
|
||||||
ldr(destination, FieldMemOperand(code_data_container_object,
|
ldr(destination, FieldMemOperand(code_data_container_object,
|
||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
sub(destination, destination, Operand(Code::kHeaderSize - kHeapObjectTag));
|
sub(destination, destination,
|
||||||
|
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeDataContainerObject(
|
void TurboAssembler::CallCodeDataContainerObject(
|
||||||
@ -379,9 +380,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
|||||||
// This generates the final instruction sequence for calls to C functions
|
// This generates the final instruction sequence for calls to C functions
|
||||||
// once an exit frame has been constructed.
|
// once an exit frame has been constructed.
|
||||||
//
|
//
|
||||||
// Note that this assumes the caller code (i.e. the Code object currently
|
// Note that this assumes the caller code (i.e. the InstructionStream object
|
||||||
// being generated) is immovable or that the callee function cannot trigger
|
// currently being generated) is immovable or that the callee function cannot
|
||||||
// GC, since the callee function will return to it.
|
// trigger GC, since the callee function will return to it.
|
||||||
|
|
||||||
// Compute the return address in lr to return to after the jump below. The pc
|
// Compute the return address in lr to return to after the jump below. The pc
|
||||||
// is already at '+ 8' from the current instruction; but return is after three
|
// is already at '+ 8' from the current instruction; but return is after three
|
||||||
@ -408,7 +409,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
|||||||
Register code_data_container, Register scratch) {
|
Register code_data_container, Register scratch) {
|
||||||
ldr(scratch, FieldMemOperand(code_data_container,
|
ldr(scratch, FieldMemOperand(code_data_container,
|
||||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||||
tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
}
|
}
|
||||||
|
|
||||||
Operand MacroAssembler::ClearedValue() const {
|
Operand MacroAssembler::ClearedValue() const {
|
||||||
|
@ -327,11 +327,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void LoadCodeDataContainerEntry(Register destination,
|
void LoadCodeDataContainerEntry(Register destination,
|
||||||
Register code_data_container_object);
|
Register code_data_container_object);
|
||||||
// Load code entry point from the CodeDataContainer object and compute
|
// Load code entry point from the CodeDataContainer object and compute
|
||||||
// Code object pointer out of it. Must not be used for CodeDataContainers
|
// InstructionStream object pointer out of it. Must not be used for
|
||||||
// corresponding to builtins, because their entry points values point to
|
// CodeDataContainers corresponding to builtins, because their entry points
|
||||||
// the embedded instruction stream in .text section.
|
// values point to the embedded instruction stream in .text section.
|
||||||
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
|
void LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register code_data_container_object);
|
Register destination, Register code_data_container_object);
|
||||||
void CallCodeDataContainerObject(Register code_data_container_object);
|
void CallCodeDataContainerObject(Register code_data_container_object);
|
||||||
void JumpCodeDataContainerObject(Register code_data_container_object,
|
void JumpCodeDataContainerObject(Register code_data_container_object,
|
||||||
JumpMode jump_mode = JumpMode::kJump);
|
JumpMode jump_mode = JumpMode::kJump);
|
||||||
@ -912,7 +912,7 @@ struct MoveCycleState {
|
|||||||
VfpRegList scratch_v_reglist = 0;
|
VfpRegList scratch_v_reglist = 0;
|
||||||
// Available scratch registers during the move cycle resolution scope.
|
// Available scratch registers during the move cycle resolution scope.
|
||||||
base::Optional<UseScratchRegisterScope> temps;
|
base::Optional<UseScratchRegisterScope> temps;
|
||||||
// Code of the scratch register picked by {MoveToTempLocation}.
|
// InstructionStream of the scratch register picked by {MoveToTempLocation}.
|
||||||
int scratch_reg_code = -1;
|
int scratch_reg_code = -1;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -548,7 +548,7 @@ int Assembler::deserialization_special_target_size(Address location) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::deserialization_set_special_target_at(Address location,
|
void Assembler::deserialization_set_special_target_at(Address location,
|
||||||
Code code,
|
InstructionStream code,
|
||||||
Address target) {
|
Address target) {
|
||||||
Instruction* instr = reinterpret_cast<Instruction*>(location);
|
Instruction* instr = reinterpret_cast<Instruction*>(location);
|
||||||
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
|
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
|
||||||
@ -661,8 +661,9 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
|||||||
DCHECK(!HAS_SMI_TAG(compressed));
|
DCHECK(!HAS_SMI_TAG(compressed));
|
||||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
||||||
compressed));
|
compressed));
|
||||||
// Embedding of compressed Code objects must not happen when external code
|
// Embedding of compressed InstructionStream objects must not happen when
|
||||||
// space is enabled, because CodeDataContainers must be used instead.
|
// external code space is enabled, because CodeDataContainers must be used
|
||||||
|
// instead.
|
||||||
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
|
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
|
||||||
!IsCodeSpaceObject(HeapObject::cast(obj)));
|
!IsCodeSpaceObject(HeapObject::cast(obj)));
|
||||||
return HeapObject::cast(obj);
|
return HeapObject::cast(obj);
|
||||||
|
@ -377,13 +377,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
|
|||||||
SafepointTableBuilderBase* safepoint_table_builder,
|
SafepointTableBuilderBase* safepoint_table_builder,
|
||||||
int handler_table_offset) {
|
int handler_table_offset) {
|
||||||
// As a crutch to avoid having to add manual Align calls wherever we use a
|
// As a crutch to avoid having to add manual Align calls wherever we use a
|
||||||
// raw workflow to create Code objects (mostly in tests), add another Align
|
// raw workflow to create InstructionStream objects (mostly in tests), add
|
||||||
// call here. It does no harm - the end of the Code object is aligned to the
|
// another Align call here. It does no harm - the end of the InstructionStream
|
||||||
// (larger) kCodeAlignment anyways.
|
// object is aligned to the (larger) kCodeAlignment anyways.
|
||||||
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
||||||
// metadata table builders (safepoint, handler, constant pool, code
|
// metadata table builders (safepoint, handler, constant pool, code
|
||||||
// comments).
|
// comments).
|
||||||
DataAlign(Code::kMetadataAlignment);
|
DataAlign(InstructionStream::kMetadataAlignment);
|
||||||
|
|
||||||
// Emit constant pool if necessary.
|
// Emit constant pool if necessary.
|
||||||
ForceConstantPoolEmissionWithoutJump();
|
ForceConstantPoolEmissionWithoutJump();
|
||||||
@ -3577,7 +3577,7 @@ Instr Assembler::ImmNEONFP(double imm) {
|
|||||||
return ImmNEONabcdefgh(FPToImm8(imm));
|
return ImmNEONabcdefgh(FPToImm8(imm));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Code generation helpers.
|
// InstructionStream generation helpers.
|
||||||
void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
|
void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
|
||||||
MoveWideImmediateOp mov_op) {
|
MoveWideImmediateOp mov_op) {
|
||||||
// Ignore the top 32 bits of an immediate if we're moving to a W register.
|
// Ignore the top 32 bits of an immediate if we're moving to a W register.
|
||||||
@ -4360,7 +4360,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
|
|||||||
DCHECK(constpool_.IsBlocked());
|
DCHECK(constpool_.IsBlocked());
|
||||||
|
|
||||||
// We do not try to reuse pool constants.
|
// We do not try to reuse pool constants.
|
||||||
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
|
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
|
||||||
|
InstructionStream());
|
||||||
|
|
||||||
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
|
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
|
||||||
reloc_info_writer.Write(&rinfo);
|
reloc_info_writer.Write(&rinfo);
|
||||||
@ -4486,7 +4487,8 @@ intptr_t Assembler::MaxPCOffsetAfterVeneerPoolIfEmittedNow(size_t margin) {
|
|||||||
void Assembler::RecordVeneerPool(int location_offset, int size) {
|
void Assembler::RecordVeneerPool(int location_offset, int size) {
|
||||||
Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
|
Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
|
||||||
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
|
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
|
||||||
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
|
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
|
||||||
|
InstructionStream());
|
||||||
reloc_info_writer.Write(&rinfo);
|
reloc_info_writer.Write(&rinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,9 +277,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
// This sets the branch destination. 'location' here can be either the pc of
|
// This sets the branch destination. 'location' here can be either the pc of
|
||||||
// an immediate branch or the address of an entry in the constant pool.
|
// an immediate branch or the address of an entry in the constant pool.
|
||||||
// This is for calls and branches within generated code.
|
// This is for calls and branches within generated code.
|
||||||
inline static void deserialization_set_special_target_at(Address location,
|
inline static void deserialization_set_special_target_at(
|
||||||
Code code,
|
Address location, InstructionStream code, Address target);
|
||||||
Address target);
|
|
||||||
|
|
||||||
// Get the size of the special target encoded at 'location'.
|
// Get the size of the special target encoded at 'location'.
|
||||||
inline static int deserialization_special_target_size(Address location);
|
inline static int deserialization_special_target_size(Address location);
|
||||||
@ -780,12 +779,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
void clz(const Register& rd, const Register& rn);
|
void clz(const Register& rd, const Register& rn);
|
||||||
void cls(const Register& rd, const Register& rn);
|
void cls(const Register& rd, const Register& rn);
|
||||||
|
|
||||||
// Pointer Authentication Code for Instruction address, using key B, with
|
// Pointer Authentication InstructionStream for Instruction address, using key
|
||||||
// address in x17 and modifier in x16 [Armv8.3].
|
// B, with address in x17 and modifier in x16 [Armv8.3].
|
||||||
void pacib1716();
|
void pacib1716();
|
||||||
|
|
||||||
// Pointer Authentication Code for Instruction address, using key B, with
|
// Pointer Authentication InstructionStream for Instruction address, using key
|
||||||
// address in LR and modifier in SP [Armv8.3].
|
// B, with address in LR and modifier in SP [Armv8.3].
|
||||||
void pacibsp();
|
void pacibsp();
|
||||||
|
|
||||||
// Authenticate Instruction address, using key B, with address in x17 and
|
// Authenticate Instruction address, using key B, with address in x17 and
|
||||||
@ -2088,7 +2087,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
dc64(data);
|
dc64(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Code generation helpers --------------------------------------------------
|
// InstructionStream generation helpers
|
||||||
|
// --------------------------------------------------
|
||||||
|
|
||||||
Instruction* pc() const { return Instruction::Cast(pc_); }
|
Instruction* pc() const { return Instruction::Cast(pc_); }
|
||||||
|
|
||||||
@ -2663,7 +2663,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
std::deque<int> internal_reference_positions_;
|
std::deque<int> internal_reference_positions_;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Code generation
|
// InstructionStream generation
|
||||||
// The relocation writer's position is at least kGap bytes below the end of
|
// The relocation writer's position is at least kGap bytes below the end of
|
||||||
// the generated instructions. This is so that multi-instruction sequences do
|
// the generated instructions. This is so that multi-instruction sequences do
|
||||||
// not have to check for overflow. The same is true for writes of large
|
// not have to check for overflow. The same is true for writes of large
|
||||||
|
@ -2360,13 +2360,14 @@ void TurboAssembler::LoadCodeDataContainerEntry(
|
|||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register destination, Register code_data_container_object) {
|
Register destination, Register code_data_container_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Compute the Code object pointer from the code entry point.
|
// Compute the InstructionStream object pointer from the code entry point.
|
||||||
Ldr(destination, FieldMemOperand(code_data_container_object,
|
Ldr(destination, FieldMemOperand(code_data_container_object,
|
||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
Sub(destination, destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
Sub(destination, destination,
|
||||||
|
Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeDataContainerObject(
|
void TurboAssembler::CallCodeDataContainerObject(
|
||||||
@ -2396,9 +2397,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
|||||||
// This generates the final instruction sequence for calls to C functions
|
// This generates the final instruction sequence for calls to C functions
|
||||||
// once an exit frame has been constructed.
|
// once an exit frame has been constructed.
|
||||||
//
|
//
|
||||||
// Note that this assumes the caller code (i.e. the Code object currently
|
// Note that this assumes the caller code (i.e. the InstructionStream object
|
||||||
// being generated) is immovable or that the callee function cannot trigger
|
// currently being generated) is immovable or that the callee function cannot
|
||||||
// GC, since the callee function will return to it.
|
// trigger GC, since the callee function will return to it.
|
||||||
|
|
||||||
UseScratchRegisterScope temps(this);
|
UseScratchRegisterScope temps(this);
|
||||||
temps.Exclude(x16, x17);
|
temps.Exclude(x16, x17);
|
||||||
@ -2447,13 +2448,15 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
|
|||||||
void TurboAssembler::BailoutIfDeoptimized() {
|
void TurboAssembler::BailoutIfDeoptimized() {
|
||||||
UseScratchRegisterScope temps(this);
|
UseScratchRegisterScope temps(this);
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
int offset = InstructionStream::kCodeDataContainerOffset -
|
||||||
|
InstructionStream::kHeaderSize;
|
||||||
LoadTaggedPointerField(scratch,
|
LoadTaggedPointerField(scratch,
|
||||||
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
Ldr(scratch.W(),
|
Ldr(scratch.W(),
|
||||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||||
Label not_deoptimized;
|
Label not_deoptimized;
|
||||||
Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, ¬_deoptimized);
|
Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
|
||||||
|
¬_deoptimized);
|
||||||
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
Bind(¬_deoptimized);
|
Bind(¬_deoptimized);
|
||||||
@ -2691,7 +2694,7 @@ void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization(
|
|||||||
Ldr(scratch.W(),
|
Ldr(scratch.W(),
|
||||||
FieldMemOperand(code_data_container,
|
FieldMemOperand(code_data_container,
|
||||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||||
Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
|
Tbnz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
|
||||||
if_marked_for_deoptimization);
|
if_marked_for_deoptimization);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1000,11 +1000,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void LoadCodeDataContainerEntry(Register destination,
|
void LoadCodeDataContainerEntry(Register destination,
|
||||||
Register code_data_container_object);
|
Register code_data_container_object);
|
||||||
// Load code entry point from the CodeDataContainer object and compute
|
// Load code entry point from the CodeDataContainer object and compute
|
||||||
// Code object pointer out of it. Must not be used for CodeDataContainers
|
// InstructionStream object pointer out of it. Must not be used for
|
||||||
// corresponding to builtins, because their entry points values point to
|
// CodeDataContainers corresponding to builtins, because their entry points
|
||||||
// the embedded instruction stream in .text section.
|
// values point to the embedded instruction stream in .text section.
|
||||||
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
|
void LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register code_data_container_object);
|
Register destination, Register code_data_container_object);
|
||||||
void CallCodeDataContainerObject(Register code_data_container_object);
|
void CallCodeDataContainerObject(Register code_data_container_object);
|
||||||
void JumpCodeDataContainerObject(Register code_data_container_object,
|
void JumpCodeDataContainerObject(Register code_data_container_object,
|
||||||
JumpMode jump_mode = JumpMode::kJump);
|
JumpMode jump_mode = JumpMode::kJump);
|
||||||
@ -1989,7 +1989,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
void InvokeFunction(Register function, Register expected_parameter_count,
|
void InvokeFunction(Register function, Register expected_parameter_count,
|
||||||
Register actual_parameter_count, InvokeType type);
|
Register actual_parameter_count, InvokeType type);
|
||||||
|
|
||||||
// ---- Code generation helpers ----
|
// ---- InstructionStream generation helpers ----
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Support functions.
|
// Support functions.
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class Code;
|
class InstructionStream;
|
||||||
|
|
||||||
// Associates a body of code with an interface descriptor.
|
// Associates a body of code with an interface descriptor.
|
||||||
class Callable final {
|
class Callable final {
|
||||||
|
@ -17,7 +17,7 @@ namespace internal {
|
|||||||
|
|
||||||
class Assembler;
|
class Assembler;
|
||||||
|
|
||||||
// Code comments section layout:
|
// InstructionStream comments section layout:
|
||||||
// byte count content
|
// byte count content
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// 4 size as uint32_t (only for a check)
|
// 4 size as uint32_t (only for a check)
|
||||||
|
@ -63,7 +63,7 @@ class CodeDesc {
|
|||||||
int code_comments_size = 0;
|
int code_comments_size = 0;
|
||||||
|
|
||||||
// TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have
|
// TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have
|
||||||
// been made consistent with Code layout.
|
// been made consistent with InstructionStream layout.
|
||||||
int body_size() const { return instr_size + unwinding_info_size; }
|
int body_size() const { return instr_size + unwinding_info_size; }
|
||||||
int instruction_size() const { return safepoint_table_offset; }
|
int instruction_size() const { return safepoint_table_offset; }
|
||||||
int metadata_size() const { return body_size() - instruction_size(); }
|
int metadata_size() const { return body_size() - instruction_size(); }
|
||||||
|
@ -33,7 +33,7 @@ struct CodeOrCodeDataContainerOps {
|
|||||||
int code_comments_size() const { return code->code_comments_size(); }
|
int code_comments_size() const { return code->code_comments_size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
using CodeOps = CodeOrCodeDataContainerOps<Code>;
|
using CodeOps = CodeOrCodeDataContainerOps<InstructionStream>;
|
||||||
using CodeDataContainerOps = CodeOrCodeDataContainerOps<CodeDataContainer>;
|
using CodeDataContainerOps = CodeOrCodeDataContainerOps<CodeDataContainer>;
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
@ -96,8 +96,8 @@ struct CodeDescOps {
|
|||||||
ret CodeReference::method() const { \
|
ret CodeReference::method() const { \
|
||||||
DCHECK(!is_null()); \
|
DCHECK(!is_null()); \
|
||||||
switch (kind_) { \
|
switch (kind_) { \
|
||||||
case Kind::CODE: \
|
case Kind::INSTRUCTION_STREAM: \
|
||||||
return CodeOps{code_}.method(); \
|
return CodeOps{instruction_stream_}.method(); \
|
||||||
case Kind::CODE_DATA_CONTAINER: \
|
case Kind::CODE_DATA_CONTAINER: \
|
||||||
return CodeDataContainerOps{code_data_container_}.method(); \
|
return CodeDataContainerOps{code_data_container_}.method(); \
|
||||||
case Kind::WASM_CODE: \
|
case Kind::WASM_CODE: \
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class Code;
|
class InstructionStream;
|
||||||
class CodeDataContainer;
|
class CodeDataContainer;
|
||||||
class CodeDesc;
|
class CodeDesc;
|
||||||
|
|
||||||
@ -27,7 +27,8 @@ class CodeReference {
|
|||||||
: kind_(Kind::WASM_CODE), wasm_code_(wasm_code) {}
|
: kind_(Kind::WASM_CODE), wasm_code_(wasm_code) {}
|
||||||
explicit CodeReference(const CodeDesc* code_desc)
|
explicit CodeReference(const CodeDesc* code_desc)
|
||||||
: kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
|
: kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
|
||||||
explicit CodeReference(Handle<Code> code) : kind_(Kind::CODE), code_(code) {}
|
explicit CodeReference(Handle<InstructionStream> code)
|
||||||
|
: kind_(Kind::INSTRUCTION_STREAM), instruction_stream_(code) {}
|
||||||
explicit CodeReference(Handle<CodeDataContainer> code_data_container)
|
explicit CodeReference(Handle<CodeDataContainer> code_data_container)
|
||||||
: kind_(Kind::CODE_DATA_CONTAINER),
|
: kind_(Kind::CODE_DATA_CONTAINER),
|
||||||
code_data_container_(code_data_container) {}
|
code_data_container_(code_data_container) {}
|
||||||
@ -43,15 +44,17 @@ class CodeReference {
|
|||||||
int code_comments_size() const;
|
int code_comments_size() const;
|
||||||
|
|
||||||
bool is_null() const { return kind_ == Kind::NONE; }
|
bool is_null() const { return kind_ == Kind::NONE; }
|
||||||
bool is_code() const { return kind_ == Kind::CODE; }
|
bool is_instruction_stream() const {
|
||||||
|
return kind_ == Kind::INSTRUCTION_STREAM;
|
||||||
|
}
|
||||||
bool is_code_data_container() const {
|
bool is_code_data_container() const {
|
||||||
return kind_ == Kind::CODE_DATA_CONTAINER;
|
return kind_ == Kind::CODE_DATA_CONTAINER;
|
||||||
}
|
}
|
||||||
bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; }
|
bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; }
|
||||||
|
|
||||||
Handle<Code> as_code() const {
|
Handle<InstructionStream> as_instruction_stream() const {
|
||||||
DCHECK_EQ(Kind::CODE, kind_);
|
DCHECK_EQ(Kind::INSTRUCTION_STREAM, kind_);
|
||||||
return code_;
|
return instruction_stream_;
|
||||||
}
|
}
|
||||||
|
|
||||||
Handle<CodeDataContainer> as_code_data_container() const {
|
Handle<CodeDataContainer> as_code_data_container() const {
|
||||||
@ -67,7 +70,7 @@ class CodeReference {
|
|||||||
private:
|
private:
|
||||||
enum class Kind {
|
enum class Kind {
|
||||||
NONE,
|
NONE,
|
||||||
CODE,
|
INSTRUCTION_STREAM,
|
||||||
CODE_DATA_CONTAINER,
|
CODE_DATA_CONTAINER,
|
||||||
WASM_CODE,
|
WASM_CODE,
|
||||||
CODE_DESC
|
CODE_DESC
|
||||||
@ -76,7 +79,7 @@ class CodeReference {
|
|||||||
std::nullptr_t null_;
|
std::nullptr_t null_;
|
||||||
const wasm::WasmCode* wasm_code_;
|
const wasm::WasmCode* wasm_code_;
|
||||||
const CodeDesc* code_desc_;
|
const CodeDesc* code_desc_;
|
||||||
Handle<Code> code_;
|
Handle<InstructionStream> instruction_stream_;
|
||||||
Handle<CodeDataContainer> code_data_container_;
|
Handle<CodeDataContainer> code_data_container_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3158,7 +3158,7 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
|
|||||||
#endif // DEBUG
|
#endif // DEBUG
|
||||||
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
|
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
|
||||||
FromCodeDataContainerNonBuiltin(code),
|
FromCodeDataContainerNonBuiltin(code),
|
||||||
Code::kDeoptimizationDataOrInterpreterDataOffset);
|
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset);
|
||||||
var_result = baseline_data;
|
var_result = baseline_data;
|
||||||
}
|
}
|
||||||
Goto(&check_for_interpreter_data);
|
Goto(&check_for_interpreter_data);
|
||||||
@ -15614,7 +15614,7 @@ TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeDataContainer> code) {
|
|||||||
|
|
||||||
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(
|
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(
|
||||||
TNode<CodeDataContainer> code_data_container) {
|
TNode<CodeDataContainer> code_data_container) {
|
||||||
return IsSetWord32<Code::MarkedForDeoptimizationField>(
|
return IsSetWord32<InstructionStream::MarkedForDeoptimizationField>(
|
||||||
LoadObjectField<Int32T>(code_data_container,
|
LoadObjectField<Int32T>(code_data_container,
|
||||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||||
}
|
}
|
||||||
|
@ -834,21 +834,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||||||
|
|
||||||
void FastCheck(TNode<BoolT> condition);
|
void FastCheck(TNode<BoolT> condition);
|
||||||
|
|
||||||
// TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
|
// TODO(v8:11880): remove once InstructionStream::bytecode_or_interpreter_data
|
||||||
// is cached in or moved to CodeDataContainer.
|
// field is cached in or moved to CodeDataContainer.
|
||||||
TNode<Code> FromCodeDataContainerNonBuiltin(TNode<CodeDataContainer> code) {
|
TNode<InstructionStream> FromCodeDataContainerNonBuiltin(
|
||||||
// Compute the Code object pointer from the code entry point.
|
TNode<CodeDataContainer> code) {
|
||||||
|
// Compute the InstructionStream object pointer from the code entry point.
|
||||||
TNode<RawPtrT> code_entry = Load<RawPtrT>(
|
TNode<RawPtrT> code_entry = Load<RawPtrT>(
|
||||||
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
|
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
|
||||||
kHeapObjectTag));
|
kHeapObjectTag));
|
||||||
TNode<Object> o = BitcastWordToTagged(IntPtrSub(
|
TNode<Object> o = BitcastWordToTagged(IntPtrSub(
|
||||||
code_entry, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
|
code_entry,
|
||||||
|
IntPtrConstant(InstructionStream::kHeaderSize - kHeapObjectTag)));
|
||||||
return CAST(o);
|
return CAST(o);
|
||||||
}
|
}
|
||||||
|
|
||||||
TNode<CodeDataContainer> ToCodeDataContainer(TNode<Code> code) {
|
TNode<CodeDataContainer> ToCodeDataContainer(TNode<InstructionStream> code) {
|
||||||
return LoadObjectField<CodeDataContainer>(code,
|
return LoadObjectField<CodeDataContainer>(
|
||||||
Code::kCodeDataContainerOffset);
|
code, InstructionStream::kCodeDataContainerOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
TNode<RawPtrT> GetCodeEntry(TNode<CodeDataContainer> code);
|
TNode<RawPtrT> GetCodeEntry(TNode<CodeDataContainer> code);
|
||||||
@ -857,7 +859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
|||||||
|
|
||||||
// The following Call wrappers call an object according to the semantics that
|
// The following Call wrappers call an object according to the semantics that
|
||||||
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
|
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
|
||||||
// JSFunction or proxy) rather than a Code object.
|
// JSFunction or proxy) rather than a InstructionStream object.
|
||||||
template <class... TArgs>
|
template <class... TArgs>
|
||||||
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
|
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
|
||||||
TNode<JSReceiver> receiver, TArgs... args) {
|
TNode<JSReceiver> receiver, TArgs... args) {
|
||||||
|
@ -644,7 +644,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
|
|||||||
Handle<BytecodeArray> bytecode_array(shared_info->GetBytecodeArray(isolate),
|
Handle<BytecodeArray> bytecode_array(shared_info->GetBytecodeArray(isolate),
|
||||||
isolate);
|
isolate);
|
||||||
|
|
||||||
Handle<Code> code =
|
Handle<InstructionStream> code =
|
||||||
Builtins::CreateInterpreterEntryTrampolineForProfiling(isolate);
|
Builtins::CreateInterpreterEntryTrampolineForProfiling(isolate);
|
||||||
|
|
||||||
Handle<InterpreterData> interpreter_data =
|
Handle<InterpreterData> interpreter_data =
|
||||||
@ -1177,7 +1177,8 @@ void RecordMaglevFunctionCompilation(Isolate* isolate,
|
|||||||
Handle<JSFunction> function) {
|
Handle<JSFunction> function) {
|
||||||
PtrComprCageBase cage_base(isolate);
|
PtrComprCageBase cage_base(isolate);
|
||||||
// TODO(v8:13261): We should be able to pass a CodeDataContainer AbstractCode
|
// TODO(v8:13261): We should be able to pass a CodeDataContainer AbstractCode
|
||||||
// in here, but LinuxPerfJitLogger only supports Code AbstractCode.
|
// in here, but LinuxPerfJitLogger only supports InstructionStream
|
||||||
|
// AbstractCode.
|
||||||
Handle<AbstractCode> abstract_code(
|
Handle<AbstractCode> abstract_code(
|
||||||
AbstractCode::cast(FromCodeDataContainer(function->code(cage_base))),
|
AbstractCode::cast(FromCodeDataContainer(function->code(cage_base))),
|
||||||
isolate);
|
isolate);
|
||||||
@ -1731,13 +1732,16 @@ class MergeAssumptionChecker final : public ObjectVisitor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The object graph for a newly compiled Script shouldn't yet contain any
|
// The object graph for a newly compiled Script shouldn't yet contain any
|
||||||
// Code. If any of these functions are called, then that would indicate that
|
// InstructionStream. If any of these functions are called, then that would
|
||||||
// the graph was not disjoint from the rest of the heap as expected.
|
// indicate that the graph was not disjoint from the rest of the heap as
|
||||||
|
// expected.
|
||||||
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
|
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
|
void VisitCodeTarget(InstructionStream host, RelocInfo* rinfo) override {
|
||||||
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
void VisitEmbeddedPointer(InstructionStream host, RelocInfo* rinfo) override {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2637,7 +2641,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
|
|||||||
}
|
}
|
||||||
|
|
||||||
CompilerTracer::TraceStartBaselineCompile(isolate, shared);
|
CompilerTracer::TraceStartBaselineCompile(isolate, shared);
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
base::TimeDelta time_taken;
|
base::TimeDelta time_taken;
|
||||||
{
|
{
|
||||||
ScopedTimer timer(&time_taken);
|
ScopedTimer timer(&time_taken);
|
||||||
@ -3929,7 +3933,7 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
|
|||||||
// 2) The function may have already been optimized by OSR. Simply continue.
|
// 2) The function may have already been optimized by OSR. Simply continue.
|
||||||
// Except when OSR already disabled optimization for some reason.
|
// Except when OSR already disabled optimization for some reason.
|
||||||
// 3) The code may have already been invalidated due to dependency change.
|
// 3) The code may have already been invalidated due to dependency change.
|
||||||
// 4) Code generation may have failed.
|
// 4) InstructionStream generation may have failed.
|
||||||
if (job->state() == CompilationJob::State::kReadyToFinalize) {
|
if (job->state() == CompilationJob::State::kReadyToFinalize) {
|
||||||
if (shared->optimization_disabled()) {
|
if (shared->optimization_disabled()) {
|
||||||
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
|
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
|
||||||
@ -3991,8 +3995,8 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
|
|||||||
ResetTieringState(*function, osr_offset);
|
ResetTieringState(*function, osr_offset);
|
||||||
|
|
||||||
if (status == CompilationJob::SUCCEEDED) {
|
if (status == CompilationJob::SUCCEEDED) {
|
||||||
// Note the finalized Code object has already been installed on the
|
// Note the finalized InstructionStream object has already been installed on
|
||||||
// function by MaglevCompilationJob::FinalizeJobImpl.
|
// the function by MaglevCompilationJob::FinalizeJobImpl.
|
||||||
|
|
||||||
OptimizedCodeCache::Insert(isolate, *function, BytecodeOffset::None(),
|
OptimizedCodeCache::Insert(isolate, *function, BytecodeOffset::None(),
|
||||||
function->code(),
|
function->code(),
|
||||||
|
@ -749,7 +749,7 @@ namespace {
|
|||||||
static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
|
static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
|
||||||
int bytecode_offset,
|
int bytecode_offset,
|
||||||
Address raw_bytecode_array) {
|
Address raw_bytecode_array) {
|
||||||
Code code_obj = Code::cast(Object(raw_code_obj));
|
InstructionStream code_obj = InstructionStream::cast(Object(raw_code_obj));
|
||||||
BytecodeArray bytecode_array =
|
BytecodeArray bytecode_array =
|
||||||
BytecodeArray::cast(Object(raw_bytecode_array));
|
BytecodeArray::cast(Object(raw_bytecode_array));
|
||||||
return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
|
return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
|
||||||
@ -759,7 +759,7 @@ static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
|
|||||||
static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj,
|
static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj,
|
||||||
int bytecode_offset,
|
int bytecode_offset,
|
||||||
Address raw_bytecode_array) {
|
Address raw_bytecode_array) {
|
||||||
Code code_obj = Code::cast(Object(raw_code_obj));
|
InstructionStream code_obj = InstructionStream::cast(Object(raw_code_obj));
|
||||||
BytecodeArray bytecode_array =
|
BytecodeArray bytecode_array =
|
||||||
BytecodeArray::cast(Object(raw_bytecode_array));
|
BytecodeArray::cast(Object(raw_bytecode_array));
|
||||||
return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset,
|
return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset,
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
HandlerTable::HandlerTable(Code code)
|
HandlerTable::HandlerTable(InstructionStream code)
|
||||||
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
|
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
|
||||||
kReturnAddressBasedEncoding) {}
|
kReturnAddressBasedEncoding) {}
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ int HandlerTable::LengthForRange(int entries) {
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
int HandlerTable::EmitReturnTableStart(Assembler* masm) {
|
int HandlerTable::EmitReturnTableStart(Assembler* masm) {
|
||||||
masm->DataAlign(Code::kMetadataAlignment);
|
masm->DataAlign(InstructionStream::kMetadataAlignment);
|
||||||
masm->RecordComment(";;; Exception handler table.");
|
masm->RecordComment(";;; Exception handler table.");
|
||||||
int table_start = masm->pc_offset();
|
int table_start = masm->pc_offset();
|
||||||
return table_start;
|
return table_start;
|
||||||
|
@ -15,7 +15,7 @@ namespace internal {
|
|||||||
class Assembler;
|
class Assembler;
|
||||||
class ByteArray;
|
class ByteArray;
|
||||||
class BytecodeArray;
|
class BytecodeArray;
|
||||||
class Code;
|
class InstructionStream;
|
||||||
class CodeDataContainer;
|
class CodeDataContainer;
|
||||||
|
|
||||||
namespace wasm {
|
namespace wasm {
|
||||||
@ -30,8 +30,9 @@ class WasmCode;
|
|||||||
// Layout looks as follows:
|
// Layout looks as follows:
|
||||||
// [ range-start , range-end , handler-offset , handler-data ]
|
// [ range-start , range-end , handler-offset , handler-data ]
|
||||||
// 2) Based on return addresses: Used for turbofanned code. Stored directly in
|
// 2) Based on return addresses: Used for turbofanned code. Stored directly in
|
||||||
// the instruction stream of the {Code} object. Contains one entry per
|
// the instruction stream of the {InstructionStream} object. Contains one
|
||||||
// call-site that could throw an exception. Layout looks as follows:
|
// entry per call-site that could throw an exception. Layout looks as
|
||||||
|
// follows:
|
||||||
// [ return-address-offset , handler-offset ]
|
// [ return-address-offset , handler-offset ]
|
||||||
class V8_EXPORT_PRIVATE HandlerTable {
|
class V8_EXPORT_PRIVATE HandlerTable {
|
||||||
public:
|
public:
|
||||||
@ -54,7 +55,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
|
|||||||
enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
|
enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
|
||||||
|
|
||||||
// Constructors for the various encodings.
|
// Constructors for the various encodings.
|
||||||
explicit HandlerTable(Code code);
|
explicit HandlerTable(InstructionStream code);
|
||||||
explicit HandlerTable(CodeDataContainer code);
|
explicit HandlerTable(CodeDataContainer code);
|
||||||
explicit HandlerTable(ByteArray byte_array);
|
explicit HandlerTable(ByteArray byte_array);
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
@ -121,8 +122,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Direct pointer into the encoded data. This pointer potentially points into
|
// Direct pointer into the encoded data. This pointer potentially points into
|
||||||
// objects on the GC heap (either {ByteArray} or {Code}) and could become
|
// objects on the GC heap (either {ByteArray} or {InstructionStream}) and
|
||||||
// stale during a collection. Hence we disallow any allocation.
|
// could become stale during a collection. Hence we disallow any allocation.
|
||||||
const Address raw_encoded_data_;
|
const Address raw_encoded_data_;
|
||||||
DISALLOW_GARBAGE_COLLECTION(no_gc_)
|
DISALLOW_GARBAGE_COLLECTION(no_gc_)
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ void Assembler::emit(const Immediate& x) {
|
|||||||
void Assembler::emit_code_relative_offset(Label* label) {
|
void Assembler::emit_code_relative_offset(Label* label) {
|
||||||
if (label->is_bound()) {
|
if (label->is_bound()) {
|
||||||
int32_t pos;
|
int32_t pos;
|
||||||
pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
|
pos = label->pos() + InstructionStream::kHeaderSize - kHeapObjectTag;
|
||||||
emit(pos);
|
emit(pos);
|
||||||
} else {
|
} else {
|
||||||
emit_disp(label, Displacement::CODE_RELATIVE);
|
emit_disp(label, Displacement::CODE_RELATIVE);
|
||||||
@ -222,7 +222,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::deserialization_set_special_target_at(
|
void Assembler::deserialization_set_special_target_at(
|
||||||
Address instruction_payload, Code code, Address target) {
|
Address instruction_payload, InstructionStream code, Address target) {
|
||||||
set_target_address_at(instruction_payload,
|
set_target_address_at(instruction_payload,
|
||||||
!code.is_null() ? code.constant_pool() : kNullAddress,
|
!code.is_null() ? code.constant_pool() : kNullAddress,
|
||||||
target);
|
target);
|
||||||
|
@ -320,13 +320,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
|
|||||||
SafepointTableBuilder* safepoint_table_builder,
|
SafepointTableBuilder* safepoint_table_builder,
|
||||||
int handler_table_offset) {
|
int handler_table_offset) {
|
||||||
// As a crutch to avoid having to add manual Align calls wherever we use a
|
// As a crutch to avoid having to add manual Align calls wherever we use a
|
||||||
// raw workflow to create Code objects (mostly in tests), add another Align
|
// raw workflow to create InstructionStream objects (mostly in tests), add
|
||||||
// call here. It does no harm - the end of the Code object is aligned to the
|
// another Align call here. It does no harm - the end of the InstructionStream
|
||||||
// (larger) kCodeAlignment anyways.
|
// object is aligned to the (larger) kCodeAlignment anyways.
|
||||||
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
||||||
// metadata table builders (safepoint, handler, constant pool, code
|
// metadata table builders (safepoint, handler, constant pool, code
|
||||||
// comments).
|
// comments).
|
||||||
DataAlign(Code::kMetadataAlignment);
|
DataAlign(InstructionStream::kMetadataAlignment);
|
||||||
|
|
||||||
const int code_comments_size = WriteCodeComments();
|
const int code_comments_size = WriteCodeComments();
|
||||||
|
|
||||||
@ -1537,8 +1537,9 @@ void Assembler::bind_to(Label* L, int pos) {
|
|||||||
long_at_put(fixup_pos, reinterpret_cast<int>(buffer_start_ + pos));
|
long_at_put(fixup_pos, reinterpret_cast<int>(buffer_start_ + pos));
|
||||||
internal_reference_positions_.push_back(fixup_pos);
|
internal_reference_positions_.push_back(fixup_pos);
|
||||||
} else if (disp.type() == Displacement::CODE_RELATIVE) {
|
} else if (disp.type() == Displacement::CODE_RELATIVE) {
|
||||||
// Relative to Code heap object pointer.
|
// Relative to InstructionStream heap object pointer.
|
||||||
long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
|
long_at_put(fixup_pos,
|
||||||
|
pos + InstructionStream::kHeaderSize - kHeapObjectTag);
|
||||||
} else {
|
} else {
|
||||||
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
|
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
|
||||||
DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected
|
DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected
|
||||||
@ -3406,7 +3407,8 @@ void Assembler::dd(Label* label) {
|
|||||||
|
|
||||||
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||||
if (!ShouldRecordRelocInfo(rmode)) return;
|
if (!ShouldRecordRelocInfo(rmode)) return;
|
||||||
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
|
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
|
||||||
|
InstructionStream());
|
||||||
reloc_info_writer.Write(&rinfo);
|
reloc_info_writer.Write(&rinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -405,7 +405,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
// This sets the branch destination (which is in the instruction on x86).
|
// This sets the branch destination (which is in the instruction on x86).
|
||||||
// This is for calls and branches within generated code.
|
// This is for calls and branches within generated code.
|
||||||
inline static void deserialization_set_special_target_at(
|
inline static void deserialization_set_special_target_at(
|
||||||
Address instruction_payload, Code code, Address target);
|
Address instruction_payload, InstructionStream code, Address target);
|
||||||
|
|
||||||
// Get the size of the special target encoded at 'instruction_payload'.
|
// Get the size of the special target encoded at 'instruction_payload'.
|
||||||
inline static int deserialization_special_target_size(
|
inline static int deserialization_special_target_size(
|
||||||
@ -433,7 +433,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
|
static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Code generation
|
// InstructionStream generation
|
||||||
//
|
//
|
||||||
// - function names correspond one-to-one to ia32 instruction mnemonics
|
// - function names correspond one-to-one to ia32 instruction mnemonics
|
||||||
// - unless specified otherwise, instructions operate on 32bit operands
|
// - unless specified otherwise, instructions operate on 32bit operands
|
||||||
|
@ -710,7 +710,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
|||||||
Register code_data_container) {
|
Register code_data_container) {
|
||||||
test(FieldOperand(code_data_container,
|
test(FieldOperand(code_data_container,
|
||||||
CodeDataContainer::kKindSpecificFlagsOffset),
|
CodeDataContainer::kKindSpecificFlagsOffset),
|
||||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
}
|
}
|
||||||
|
|
||||||
Immediate MacroAssembler::ClearedValue() const {
|
Immediate MacroAssembler::ClearedValue() const {
|
||||||
@ -2058,13 +2058,13 @@ void TurboAssembler::LoadCodeDataContainerEntry(
|
|||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register destination, Register code_data_container_object) {
|
Register destination, Register code_data_container_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Compute the Code object pointer from the code entry point.
|
// Compute the InstructionStream object pointer from the code entry point.
|
||||||
mov(destination, FieldOperand(code_data_container_object,
|
mov(destination, FieldOperand(code_data_container_object,
|
||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
sub(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeDataContainerObject(
|
void TurboAssembler::CallCodeDataContainerObject(
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class Code;
|
class InstructionStream;
|
||||||
class ExternalReference;
|
class ExternalReference;
|
||||||
class StatsCounter;
|
class StatsCounter;
|
||||||
|
|
||||||
@ -162,11 +162,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
|||||||
void LoadCodeDataContainerEntry(Register destination,
|
void LoadCodeDataContainerEntry(Register destination,
|
||||||
Register code_data_container_object);
|
Register code_data_container_object);
|
||||||
// Load code entry point from the CodeDataContainer object and compute
|
// Load code entry point from the CodeDataContainer object and compute
|
||||||
// Code object pointer out of it. Must not be used for CodeDataContainers
|
// InstructionStream object pointer out of it. Must not be used for
|
||||||
// corresponding to builtins, because their entry points values point to
|
// CodeDataContainers corresponding to builtins, because their entry points
|
||||||
// the embedded instruction stream in .text section.
|
// values point to the embedded instruction stream in .text section.
|
||||||
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
|
void LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register code_data_container_object);
|
Register destination, Register code_data_container_object);
|
||||||
void CallCodeDataContainerObject(Register code_data_container_object);
|
void CallCodeDataContainerObject(Register code_data_container_object);
|
||||||
void JumpCodeDataContainerObject(Register code_data_container_object,
|
void JumpCodeDataContainerObject(Register code_data_container_object,
|
||||||
JumpMode jump_mode = JumpMode::kJump);
|
JumpMode jump_mode = JumpMode::kJump);
|
||||||
|
@ -13,7 +13,7 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
|
MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
|
||||||
Code code)
|
InstructionStream code)
|
||||||
: MaglevSafepointTable(code.InstructionStart(isolate, pc),
|
: MaglevSafepointTable(code.InstructionStart(isolate, pc),
|
||||||
code.SafepointTableAddress()) {
|
code.SafepointTableAddress()) {
|
||||||
DCHECK(code.is_maglevved());
|
DCHECK(code.is_maglevved());
|
||||||
@ -160,7 +160,7 @@ void MaglevSafepointTableBuilder::Emit(Assembler* assembler) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Make sure the safepoint table is properly aligned. Pad with nops.
|
// Make sure the safepoint table is properly aligned. Pad with nops.
|
||||||
assembler->Align(Code::kMetadataAlignment);
|
assembler->Align(InstructionStream::kMetadataAlignment);
|
||||||
assembler->RecordComment(";;; Maglev safepoint table.");
|
assembler->RecordComment(";;; Maglev safepoint table.");
|
||||||
set_safepoint_table_offset(assembler->pc_offset());
|
set_safepoint_table_offset(assembler->pc_offset());
|
||||||
|
|
||||||
|
@ -65,13 +65,14 @@ class MaglevSafepointEntry : public SafepointEntryBase {
|
|||||||
uint32_t tagged_register_indexes_ = 0;
|
uint32_t tagged_register_indexes_ = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// A wrapper class for accessing the safepoint table embedded into the Code
|
// A wrapper class for accessing the safepoint table embedded into the
|
||||||
// object.
|
// InstructionStream object.
|
||||||
class MaglevSafepointTable {
|
class MaglevSafepointTable {
|
||||||
public:
|
public:
|
||||||
// The isolate and pc arguments are used for figuring out whether pc
|
// The isolate and pc arguments are used for figuring out whether pc
|
||||||
// belongs to the embedded or un-embedded code blob.
|
// belongs to the embedded or un-embedded code blob.
|
||||||
explicit MaglevSafepointTable(Isolate* isolate, Address pc, Code code);
|
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
|
||||||
|
InstructionStream code);
|
||||||
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
|
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
|
||||||
CodeDataContainer code);
|
CodeDataContainer code);
|
||||||
MaglevSafepointTable(const MaglevSafepointTable&) = delete;
|
MaglevSafepointTable(const MaglevSafepointTable&) = delete;
|
||||||
|
@ -169,7 +169,7 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void OptimizedCompilationInfo::SetCode(Handle<Code> code) {
|
void OptimizedCompilationInfo::SetCode(Handle<InstructionStream> code) {
|
||||||
DCHECK_EQ(code->kind(), code_kind());
|
DCHECK_EQ(code->kind(), code_kind());
|
||||||
code_ = code;
|
code_ = code;
|
||||||
}
|
}
|
||||||
|
@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
|
|||||||
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
|
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
|
||||||
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
|
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
|
||||||
Handle<JSFunction> closure() const { return closure_; }
|
Handle<JSFunction> closure() const { return closure_; }
|
||||||
Handle<Code> code() const { return code_; }
|
Handle<InstructionStream> code() const { return code_; }
|
||||||
CodeKind code_kind() const { return code_kind_; }
|
CodeKind code_kind() const { return code_kind_; }
|
||||||
Builtin builtin() const { return builtin_; }
|
Builtin builtin() const { return builtin_; }
|
||||||
void set_builtin(Builtin builtin) { builtin_ = builtin; }
|
void set_builtin(Builtin builtin) { builtin_ = builtin; }
|
||||||
@ -129,9 +129,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
|
|||||||
}
|
}
|
||||||
compiler::NodeObserver* node_observer() const { return node_observer_; }
|
compiler::NodeObserver* node_observer() const { return node_observer_; }
|
||||||
|
|
||||||
// Code getters and setters.
|
// InstructionStream getters and setters.
|
||||||
|
|
||||||
void SetCode(Handle<Code> code);
|
void SetCode(Handle<InstructionStream> code);
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void SetWasmCompilationResult(std::unique_ptr<wasm::WasmCompilationResult>);
|
void SetWasmCompilationResult(std::unique_ptr<wasm::WasmCompilationResult>);
|
||||||
@ -260,7 +260,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
|
|||||||
Handle<JSFunction> closure_;
|
Handle<JSFunction> closure_;
|
||||||
|
|
||||||
// The compiled code.
|
// The compiled code.
|
||||||
Handle<Code> code_;
|
Handle<InstructionStream> code_;
|
||||||
|
|
||||||
// Basic block profiling support.
|
// Basic block profiling support.
|
||||||
BasicBlockProfilerData* profiler_data_ = nullptr;
|
BasicBlockProfilerData* profiler_data_ = nullptr;
|
||||||
|
@ -253,23 +253,23 @@ void RelocIterator::next() {
|
|||||||
done_ = true;
|
done_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(Code code, int mode_mask)
|
RelocIterator::RelocIterator(InstructionStream code, int mode_mask)
|
||||||
: RelocIterator(code, code.unchecked_relocation_info(), mode_mask) {}
|
: RelocIterator(code, code.unchecked_relocation_info(), mode_mask) {}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(Code code, ByteArray relocation_info,
|
RelocIterator::RelocIterator(InstructionStream code, ByteArray relocation_info,
|
||||||
int mode_mask)
|
int mode_mask)
|
||||||
: RelocIterator(code, code.raw_instruction_start(), code.constant_pool(),
|
: RelocIterator(code, code.raw_instruction_start(), code.constant_pool(),
|
||||||
relocation_info.GetDataEndAddress(),
|
relocation_info.GetDataEndAddress(),
|
||||||
relocation_info.GetDataStartAddress(), mode_mask) {}
|
relocation_info.GetDataStartAddress(), mode_mask) {}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
|
RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
|
||||||
: RelocIterator(Code(), code_reference.instruction_start(),
|
: RelocIterator(InstructionStream(), code_reference.instruction_start(),
|
||||||
code_reference.constant_pool(),
|
code_reference.constant_pool(),
|
||||||
code_reference.relocation_end(),
|
code_reference.relocation_end(),
|
||||||
code_reference.relocation_start(), mode_mask) {}
|
code_reference.relocation_start(), mode_mask) {}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
|
RelocIterator::RelocIterator(EmbeddedData* embedded_data,
|
||||||
int mode_mask)
|
InstructionStream code, int mode_mask)
|
||||||
: RelocIterator(code,
|
: RelocIterator(code,
|
||||||
embedded_data->InstructionStartOfBuiltin(code.builtin_id()),
|
embedded_data->InstructionStartOfBuiltin(code.builtin_id()),
|
||||||
code.constant_pool(),
|
code.constant_pool(),
|
||||||
@ -277,20 +277,22 @@ RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
|
|||||||
code.relocation_start(), mode_mask) {}
|
code.relocation_start(), mode_mask) {}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
|
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
|
||||||
: RelocIterator(Code(), reinterpret_cast<Address>(desc.buffer), 0,
|
: RelocIterator(InstructionStream(), reinterpret_cast<Address>(desc.buffer),
|
||||||
desc.buffer + desc.buffer_size,
|
0, desc.buffer + desc.buffer_size,
|
||||||
desc.buffer + desc.buffer_size - desc.reloc_size,
|
desc.buffer + desc.buffer_size - desc.reloc_size,
|
||||||
mode_mask) {}
|
mode_mask) {}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(base::Vector<byte> instructions,
|
RelocIterator::RelocIterator(base::Vector<byte> instructions,
|
||||||
base::Vector<const byte> reloc_info,
|
base::Vector<const byte> reloc_info,
|
||||||
Address const_pool, int mode_mask)
|
Address const_pool, int mode_mask)
|
||||||
: RelocIterator(Code(), reinterpret_cast<Address>(instructions.begin()),
|
: RelocIterator(InstructionStream(),
|
||||||
const_pool, reloc_info.begin() + reloc_info.size(),
|
reinterpret_cast<Address>(instructions.begin()), const_pool,
|
||||||
reloc_info.begin(), mode_mask) {}
|
reloc_info.begin() + reloc_info.size(), reloc_info.begin(),
|
||||||
|
mode_mask) {}
|
||||||
|
|
||||||
RelocIterator::RelocIterator(Code host, Address pc, Address constant_pool,
|
RelocIterator::RelocIterator(InstructionStream host, Address pc,
|
||||||
const byte* pos, const byte* end, int mode_mask)
|
Address constant_pool, const byte* pos,
|
||||||
|
const byte* end, int mode_mask)
|
||||||
: pos_(pos), end_(end), mode_mask_(mode_mask) {
|
: pos_(pos), end_(end), mode_mask_(mode_mask) {
|
||||||
// Relocation info is read backwards.
|
// Relocation info is read backwards.
|
||||||
DCHECK_GE(pos_, end_);
|
DCHECK_GE(pos_, end_);
|
||||||
@ -350,7 +352,8 @@ void RelocInfo::set_target_address(Address target,
|
|||||||
icache_flush_mode);
|
icache_flush_mode);
|
||||||
if (!host().is_null() && IsCodeTargetMode(rmode_) &&
|
if (!host().is_null() && IsCodeTargetMode(rmode_) &&
|
||||||
!v8_flags.disable_write_barriers) {
|
!v8_flags.disable_write_barriers) {
|
||||||
Code target_code = Code::GetCodeFromTargetAddress(target);
|
InstructionStream target_code =
|
||||||
|
InstructionStream::GetCodeFromTargetAddress(target);
|
||||||
WriteBarrierForCode(host(), this, target_code, write_barrier_mode);
|
WriteBarrierForCode(host(), this, target_code, write_barrier_mode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -385,7 +388,7 @@ bool RelocInfo::RequiresRelocationAfterCodegen(const CodeDesc& desc) {
|
|||||||
return !it.done();
|
return !it.done();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RelocInfo::RequiresRelocation(Code code) {
|
bool RelocInfo::RequiresRelocation(InstructionStream code) {
|
||||||
RelocIterator it(code, RelocInfo::kApplyMask);
|
RelocIterator it(code, RelocInfo::kApplyMask);
|
||||||
return !it.done();
|
return !it.done();
|
||||||
}
|
}
|
||||||
@ -462,8 +465,9 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
|
|||||||
<< ")";
|
<< ")";
|
||||||
} else if (IsCodeTargetMode(rmode_)) {
|
} else if (IsCodeTargetMode(rmode_)) {
|
||||||
const Address code_target = target_address();
|
const Address code_target = target_address();
|
||||||
Code code = Code::GetCodeFromTargetAddress(code_target);
|
InstructionStream code =
|
||||||
DCHECK(code.IsCode());
|
InstructionStream::GetCodeFromTargetAddress(code_target);
|
||||||
|
DCHECK(code.IsInstructionStream());
|
||||||
os << " (" << CodeKindToString(code.kind());
|
os << " (" << CodeKindToString(code.kind());
|
||||||
if (Builtins::IsBuiltin(code)) {
|
if (Builtins::IsBuiltin(code)) {
|
||||||
os << " " << Builtins::name(code.builtin_id());
|
os << " " << Builtins::name(code.builtin_id());
|
||||||
@ -492,10 +496,11 @@ void RelocInfo::Verify(Isolate* isolate) {
|
|||||||
Address addr = target_address();
|
Address addr = target_address();
|
||||||
CHECK_NE(addr, kNullAddress);
|
CHECK_NE(addr, kNullAddress);
|
||||||
// Check that we can find the right code object.
|
// Check that we can find the right code object.
|
||||||
Code code = Code::GetCodeFromTargetAddress(addr);
|
InstructionStream code =
|
||||||
|
InstructionStream::GetCodeFromTargetAddress(addr);
|
||||||
CodeLookupResult lookup_result = isolate->FindCodeObject(addr);
|
CodeLookupResult lookup_result = isolate->FindCodeObject(addr);
|
||||||
CHECK(lookup_result.IsFound());
|
CHECK(lookup_result.IsFound());
|
||||||
CHECK_EQ(code.address(), lookup_result.code().address());
|
CHECK_EQ(code.address(), lookup_result.instruction_stream().address());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case INTERNAL_REFERENCE:
|
case INTERNAL_REFERENCE:
|
||||||
@ -504,7 +509,7 @@ void RelocInfo::Verify(Isolate* isolate) {
|
|||||||
Address pc = target_internal_reference_address();
|
Address pc = target_internal_reference_address();
|
||||||
CodeLookupResult lookup_result = isolate->FindCodeObject(pc);
|
CodeLookupResult lookup_result = isolate->FindCodeObject(pc);
|
||||||
CHECK(lookup_result.IsFound());
|
CHECK(lookup_result.IsFound());
|
||||||
Code code = lookup_result.code();
|
InstructionStream code = lookup_result.instruction_stream();
|
||||||
CHECK(target >= code.InstructionStart(isolate, pc));
|
CHECK(target >= code.InstructionStart(isolate, pc));
|
||||||
CHECK(target <= code.InstructionEnd(isolate, pc));
|
CHECK(target <= code.InstructionEnd(isolate, pc));
|
||||||
break;
|
break;
|
||||||
|
@ -114,7 +114,7 @@ class RelocInfo {
|
|||||||
|
|
||||||
RelocInfo() = default;
|
RelocInfo() = default;
|
||||||
|
|
||||||
RelocInfo(Address pc, Mode rmode, intptr_t data, Code host,
|
RelocInfo(Address pc, Mode rmode, intptr_t data, InstructionStream host,
|
||||||
Address constant_pool = kNullAddress)
|
Address constant_pool = kNullAddress)
|
||||||
: pc_(pc),
|
: pc_(pc),
|
||||||
rmode_(rmode),
|
rmode_(rmode),
|
||||||
@ -213,7 +213,7 @@ class RelocInfo {
|
|||||||
Address pc() const { return pc_; }
|
Address pc() const { return pc_; }
|
||||||
Mode rmode() const { return rmode_; }
|
Mode rmode() const { return rmode_; }
|
||||||
intptr_t data() const { return data_; }
|
intptr_t data() const { return data_; }
|
||||||
Code host() const { return host_; }
|
InstructionStream host() const { return host_; }
|
||||||
Address constant_pool() const { return constant_pool_; }
|
Address constant_pool() const { return constant_pool_; }
|
||||||
|
|
||||||
// Apply a relocation by delta bytes. When the code object is moved, PC
|
// Apply a relocation by delta bytes. When the code object is moved, PC
|
||||||
@ -332,7 +332,7 @@ class RelocInfo {
|
|||||||
// Check whether the given code contains relocation information that
|
// Check whether the given code contains relocation information that
|
||||||
// either is position-relative or movable by the garbage collector.
|
// either is position-relative or movable by the garbage collector.
|
||||||
static bool RequiresRelocationAfterCodegen(const CodeDesc& desc);
|
static bool RequiresRelocationAfterCodegen(const CodeDesc& desc);
|
||||||
static bool RequiresRelocation(Code code);
|
static bool RequiresRelocation(InstructionStream code);
|
||||||
|
|
||||||
#ifdef ENABLE_DISASSEMBLER
|
#ifdef ENABLE_DISASSEMBLER
|
||||||
// Printing
|
// Printing
|
||||||
@ -359,7 +359,7 @@ class RelocInfo {
|
|||||||
|
|
||||||
// In addition to modes covered by the apply mask (which is applied at GC
|
// In addition to modes covered by the apply mask (which is applied at GC
|
||||||
// time, among others), this covers all modes that are relocated by
|
// time, among others), this covers all modes that are relocated by
|
||||||
// Code::CopyFromNoFlush after code generation.
|
// InstructionStream::CopyFromNoFlush after code generation.
|
||||||
static int PostCodegenRelocationMask() {
|
static int PostCodegenRelocationMask() {
|
||||||
return ModeMask(RelocInfo::CODE_TARGET) |
|
return ModeMask(RelocInfo::CODE_TARGET) |
|
||||||
ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
|
ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
|
||||||
@ -374,7 +374,7 @@ class RelocInfo {
|
|||||||
Address pc_;
|
Address pc_;
|
||||||
Mode rmode_;
|
Mode rmode_;
|
||||||
intptr_t data_ = 0;
|
intptr_t data_ = 0;
|
||||||
Code host_;
|
InstructionStream host_;
|
||||||
Address constant_pool_ = kNullAddress;
|
Address constant_pool_ = kNullAddress;
|
||||||
friend class RelocIterator;
|
friend class RelocIterator;
|
||||||
};
|
};
|
||||||
@ -432,9 +432,11 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
|
|||||||
// the beginning of the reloc info.
|
// the beginning of the reloc info.
|
||||||
// Relocation information with mode k is included in the
|
// Relocation information with mode k is included in the
|
||||||
// iteration iff bit k of mode_mask is set.
|
// iteration iff bit k of mode_mask is set.
|
||||||
explicit RelocIterator(Code code, int mode_mask = -1);
|
explicit RelocIterator(InstructionStream code, int mode_mask = -1);
|
||||||
explicit RelocIterator(Code code, ByteArray relocation_info, int mode_mask);
|
explicit RelocIterator(InstructionStream code, ByteArray relocation_info,
|
||||||
explicit RelocIterator(EmbeddedData* embedded_data, Code code, int mode_mask);
|
int mode_mask);
|
||||||
|
explicit RelocIterator(EmbeddedData* embedded_data, InstructionStream code,
|
||||||
|
int mode_mask);
|
||||||
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
|
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
|
||||||
explicit RelocIterator(const CodeReference code_reference,
|
explicit RelocIterator(const CodeReference code_reference,
|
||||||
int mode_mask = -1);
|
int mode_mask = -1);
|
||||||
@ -457,8 +459,8 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
RelocIterator(Code host, Address pc, Address constant_pool, const byte* pos,
|
RelocIterator(InstructionStream host, Address pc, Address constant_pool,
|
||||||
const byte* end, int mode_mask);
|
const byte* pos, const byte* end, int mode_mask);
|
||||||
|
|
||||||
// Advance* moves the position before/after reading.
|
// Advance* moves the position before/after reading.
|
||||||
// *Read* reads from current byte(s) into rinfo_.
|
// *Read* reads from current byte(s) into rinfo_.
|
||||||
|
@ -20,7 +20,8 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
|
SafepointTable::SafepointTable(Isolate* isolate, Address pc,
|
||||||
|
InstructionStream code)
|
||||||
: SafepointTable(code.InstructionStart(isolate, pc),
|
: SafepointTable(code.InstructionStart(isolate, pc),
|
||||||
code.SafepointTableAddress()) {}
|
code.SafepointTableAddress()) {}
|
||||||
|
|
||||||
@ -169,7 +170,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Make sure the safepoint table is properly aligned. Pad with nops.
|
// Make sure the safepoint table is properly aligned. Pad with nops.
|
||||||
assembler->Align(Code::kMetadataAlignment);
|
assembler->Align(InstructionStream::kMetadataAlignment);
|
||||||
assembler->RecordComment(";;; Safepoint table.");
|
assembler->RecordComment(";;; Safepoint table.");
|
||||||
set_safepoint_table_offset(assembler->pc_offset());
|
set_safepoint_table_offset(assembler->pc_offset());
|
||||||
|
|
||||||
|
@ -54,13 +54,13 @@ class SafepointEntry : public SafepointEntryBase {
|
|||||||
base::Vector<uint8_t> tagged_slots_;
|
base::Vector<uint8_t> tagged_slots_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// A wrapper class for accessing the safepoint table embedded into the Code
|
// A wrapper class for accessing the safepoint table embedded into the
|
||||||
// object.
|
// InstructionStream object.
|
||||||
class SafepointTable {
|
class SafepointTable {
|
||||||
public:
|
public:
|
||||||
// The isolate and pc arguments are used for figuring out whether pc
|
// The isolate and pc arguments are used for figuring out whether pc
|
||||||
// belongs to the embedded or un-embedded code blob.
|
// belongs to the embedded or un-embedded code blob.
|
||||||
explicit SafepointTable(Isolate* isolate, Address pc, Code code);
|
explicit SafepointTable(Isolate* isolate, Address pc, InstructionStream code);
|
||||||
explicit SafepointTable(Isolate* isolate, Address pc, CodeDataContainer code);
|
explicit SafepointTable(Isolate* isolate, Address pc, CodeDataContainer code);
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
explicit SafepointTable(const wasm::WasmCode* code);
|
explicit SafepointTable(const wasm::WasmCode* code);
|
||||||
|
@ -60,7 +60,7 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
|
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
|
||||||
Handle<Code> code) const {
|
Handle<InstructionStream> code) const {
|
||||||
Isolate* isolate = code->GetIsolate();
|
Isolate* isolate = code->GetIsolate();
|
||||||
DeoptimizationData deopt_data =
|
DeoptimizationData deopt_data =
|
||||||
DeoptimizationData::cast(code->deoptimization_data());
|
DeoptimizationData::cast(code->deoptimization_data());
|
||||||
@ -79,7 +79,8 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
|
|||||||
return stack;
|
return stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
SourcePositionInfo SourcePosition::FirstInfo(Handle<Code> code) const {
|
SourcePositionInfo SourcePosition::FirstInfo(
|
||||||
|
Handle<InstructionStream> code) const {
|
||||||
DisallowGarbageCollection no_gc;
|
DisallowGarbageCollection no_gc;
|
||||||
Isolate* isolate = code->GetIsolate();
|
Isolate* isolate = code->GetIsolate();
|
||||||
DeoptimizationData deopt_data =
|
DeoptimizationData deopt_data =
|
||||||
@ -127,7 +128,7 @@ void SourcePosition::PrintJson(std::ostream& out) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SourcePosition::Print(std::ostream& out, Code code) const {
|
void SourcePosition::Print(std::ostream& out, InstructionStream code) const {
|
||||||
DeoptimizationData deopt_data =
|
DeoptimizationData deopt_data =
|
||||||
DeoptimizationData::cast(code.deoptimization_data());
|
DeoptimizationData::cast(code.deoptimization_data());
|
||||||
if (!isInlined()) {
|
if (!isInlined()) {
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class Code;
|
class InstructionStream;
|
||||||
class OptimizedCompilationInfo;
|
class OptimizedCompilationInfo;
|
||||||
class Script;
|
class Script;
|
||||||
class SharedFunctionInfo;
|
class SharedFunctionInfo;
|
||||||
@ -79,12 +79,13 @@ class SourcePosition final {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Assumes that the code object is optimized
|
// Assumes that the code object is optimized
|
||||||
std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
|
std::vector<SourcePositionInfo> InliningStack(
|
||||||
|
Handle<InstructionStream> code) const;
|
||||||
std::vector<SourcePositionInfo> InliningStack(
|
std::vector<SourcePositionInfo> InliningStack(
|
||||||
OptimizedCompilationInfo* cinfo) const;
|
OptimizedCompilationInfo* cinfo) const;
|
||||||
SourcePositionInfo FirstInfo(Handle<Code> code) const;
|
SourcePositionInfo FirstInfo(Handle<InstructionStream> code) const;
|
||||||
|
|
||||||
void Print(std::ostream& out, Code code) const;
|
void Print(std::ostream& out, InstructionStream code) const;
|
||||||
void PrintJson(std::ostream& out) const;
|
void PrintJson(std::ostream& out) const;
|
||||||
|
|
||||||
int ScriptOffset() const {
|
int ScriptOffset() const {
|
||||||
|
@ -215,7 +215,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::deserialization_set_special_target_at(
|
void Assembler::deserialization_set_special_target_at(
|
||||||
Address instruction_payload, Code code, Address target) {
|
Address instruction_payload, InstructionStream code, Address target) {
|
||||||
set_target_address_at(instruction_payload,
|
set_target_address_at(instruction_payload,
|
||||||
!code.is_null() ? code.constant_pool() : kNullAddress,
|
!code.is_null() ? code.constant_pool() : kNullAddress,
|
||||||
target);
|
target);
|
||||||
@ -285,8 +285,9 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
|||||||
DCHECK(!HAS_SMI_TAG(compressed));
|
DCHECK(!HAS_SMI_TAG(compressed));
|
||||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
||||||
compressed));
|
compressed));
|
||||||
// Embedding of compressed Code objects must not happen when external code
|
// Embedding of compressed InstructionStream objects must not happen when
|
||||||
// space is enabled, because CodeDataContainers must be used instead.
|
// external code space is enabled, because CodeDataContainers must be used
|
||||||
|
// instead.
|
||||||
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
|
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
|
||||||
!IsCodeSpaceObject(HeapObject::cast(obj)));
|
!IsCodeSpaceObject(HeapObject::cast(obj)));
|
||||||
return HeapObject::cast(obj);
|
return HeapObject::cast(obj);
|
||||||
|
@ -367,13 +367,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
|
|||||||
SafepointTableBuilderBase* safepoint_table_builder,
|
SafepointTableBuilderBase* safepoint_table_builder,
|
||||||
int handler_table_offset) {
|
int handler_table_offset) {
|
||||||
// As a crutch to avoid having to add manual Align calls wherever we use a
|
// As a crutch to avoid having to add manual Align calls wherever we use a
|
||||||
// raw workflow to create Code objects (mostly in tests), add another Align
|
// raw workflow to create InstructionStream objects (mostly in tests), add
|
||||||
// call here. It does no harm - the end of the Code object is aligned to the
|
// another Align call here. It does no harm - the end of the InstructionStream
|
||||||
// (larger) kCodeAlignment anyways.
|
// object is aligned to the (larger) kCodeAlignment anyways.
|
||||||
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
// TODO(jgruber): Consider moving responsibility for proper alignment to
|
||||||
// metadata table builders (safepoint, handler, constant pool, code
|
// metadata table builders (safepoint, handler, constant pool, code
|
||||||
// comments).
|
// comments).
|
||||||
DataAlign(Code::kMetadataAlignment);
|
DataAlign(InstructionStream::kMetadataAlignment);
|
||||||
|
|
||||||
PatchConstPool();
|
PatchConstPool();
|
||||||
DCHECK(constpool_.IsEmpty());
|
DCHECK(constpool_.IsEmpty());
|
||||||
@ -4492,7 +4492,8 @@ void Assembler::dq(Label* label) {
|
|||||||
|
|
||||||
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||||
if (!ShouldRecordRelocInfo(rmode)) return;
|
if (!ShouldRecordRelocInfo(rmode)) return;
|
||||||
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
|
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
|
||||||
|
InstructionStream());
|
||||||
reloc_info_writer.Write(&rinfo);
|
reloc_info_writer.Write(&rinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -447,12 +447,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
|
|
||||||
// Read/Modify the code target in the relative branch/call instruction at pc.
|
// Read/Modify the code target in the relative branch/call instruction at pc.
|
||||||
// On the x64 architecture, we use relative jumps with a 32-bit displacement
|
// On the x64 architecture, we use relative jumps with a 32-bit displacement
|
||||||
// to jump to other Code objects in the Code space in the heap.
|
// to jump to other InstructionStream objects in the InstructionStream space
|
||||||
// Jumps to C functions are done indirectly through a 64-bit register holding
|
// in the heap. Jumps to C functions are done indirectly through a 64-bit
|
||||||
// the absolute address of the target.
|
// register holding the absolute address of the target. These functions
|
||||||
// These functions convert between absolute Addresses of Code objects and
|
// convert between absolute Addresses of InstructionStream objects and the
|
||||||
// the relative displacements stored in the code.
|
// relative displacements stored in the code. The isolate argument is unused
|
||||||
// The isolate argument is unused (and may be nullptr) when skipping flushing.
|
// (and may be nullptr) when skipping flushing.
|
||||||
static inline Address target_address_at(Address pc, Address constant_pool);
|
static inline Address target_address_at(Address pc, Address constant_pool);
|
||||||
static inline void set_target_address_at(
|
static inline void set_target_address_at(
|
||||||
Address pc, Address constant_pool, Address target,
|
Address pc, Address constant_pool, Address target,
|
||||||
@ -467,7 +467,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
// This sets the branch destination (which is in the instruction on x64).
|
// This sets the branch destination (which is in the instruction on x64).
|
||||||
// This is for calls and branches within generated code.
|
// This is for calls and branches within generated code.
|
||||||
inline static void deserialization_set_special_target_at(
|
inline static void deserialization_set_special_target_at(
|
||||||
Address instruction_payload, Code code, Address target);
|
Address instruction_payload, InstructionStream code, Address target);
|
||||||
|
|
||||||
// Get the size of the special target encoded at 'instruction_payload'.
|
// Get the size of the special target encoded at 'instruction_payload'.
|
||||||
inline static int deserialization_special_target_size(
|
inline static int deserialization_special_target_size(
|
||||||
@ -505,7 +505,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
|
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Code generation
|
// InstructionStream generation
|
||||||
//
|
//
|
||||||
// Function names correspond one-to-one to x64 instruction mnemonics.
|
// Function names correspond one-to-one to x64 instruction mnemonics.
|
||||||
// Unless specified otherwise, instructions operate on 64-bit operands.
|
// Unless specified otherwise, instructions operate on 64-bit operands.
|
||||||
@ -2130,7 +2130,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
WriteUnalignedValue(addr_at(pos), x);
|
WriteUnalignedValue(addr_at(pos), x);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Code emission.
|
// InstructionStream emission.
|
||||||
V8_NOINLINE V8_PRESERVE_MOST void GrowBuffer();
|
V8_NOINLINE V8_PRESERVE_MOST void GrowBuffer();
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -2308,13 +2308,13 @@ void TurboAssembler::LoadCodeDataContainerEntry(
|
|||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
|
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register destination, Register code_data_container_object) {
|
Register destination, Register code_data_container_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Compute the Code object pointer from the code entry point.
|
// Compute the InstructionStream object pointer from the code entry point.
|
||||||
movq(destination, FieldOperand(code_data_container_object,
|
movq(destination, FieldOperand(code_data_container_object,
|
||||||
CodeDataContainer::kCodeEntryPointOffset));
|
CodeDataContainer::kCodeEntryPointOffset));
|
||||||
subq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
subq(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeDataContainerObject(
|
void TurboAssembler::CallCodeDataContainerObject(
|
||||||
@ -2610,7 +2610,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
|
|||||||
Register code_data_container) {
|
Register code_data_container) {
|
||||||
testl(FieldOperand(code_data_container,
|
testl(FieldOperand(code_data_container,
|
||||||
CodeDataContainer::kKindSpecificFlagsOffset),
|
CodeDataContainer::kKindSpecificFlagsOffset),
|
||||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
}
|
}
|
||||||
|
|
||||||
Immediate MacroAssembler::ClearedValue() const {
|
Immediate MacroAssembler::ClearedValue() const {
|
||||||
@ -3391,11 +3391,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
|
|||||||
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
||||||
// 3. if it is not zero then it jumps to the builtin.
|
// 3. if it is not zero then it jumps to the builtin.
|
||||||
void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
|
void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
|
||||||
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
int offset = InstructionStream::kCodeDataContainerOffset -
|
||||||
|
InstructionStream::kHeaderSize;
|
||||||
LoadTaggedPointerField(scratch,
|
LoadTaggedPointerField(scratch,
|
||||||
Operand(kJavaScriptCallCodeStartRegister, offset));
|
Operand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
|
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
RelocInfo::CODE_TARGET, not_zero);
|
RelocInfo::CODE_TARGET, not_zero);
|
||||||
}
|
}
|
||||||
|
@ -404,11 +404,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
|||||||
void LoadCodeDataContainerEntry(Register destination,
|
void LoadCodeDataContainerEntry(Register destination,
|
||||||
Register code_data_container_object);
|
Register code_data_container_object);
|
||||||
// Load code entry point from the CodeDataContainer object and compute
|
// Load code entry point from the CodeDataContainer object and compute
|
||||||
// Code object pointer out of it. Must not be used for CodeDataContainers
|
// InstructionStream object pointer out of it. Must not be used for
|
||||||
// corresponding to builtins, because their entry points values point to
|
// CodeDataContainers corresponding to builtins, because their entry points
|
||||||
// the embedded instruction stream in .text section.
|
// values point to the embedded instruction stream in .text section.
|
||||||
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
|
void LoadCodeDataContainerInstructionStreamNonBuiltin(
|
||||||
Register code_data_container_object);
|
Register destination, Register code_data_container_object);
|
||||||
void CallCodeDataContainerObject(Register code_data_container_object);
|
void CallCodeDataContainerObject(Register code_data_container_object);
|
||||||
void JumpCodeDataContainerObject(Register code_data_container_object,
|
void JumpCodeDataContainerObject(Register code_data_container_object,
|
||||||
JumpMode jump_mode = JumpMode::kJump);
|
JumpMode jump_mode = JumpMode::kJump);
|
||||||
|
@ -867,7 +867,7 @@ using RuntimeArguments = Arguments<ArgumentsType::kRuntime>;
|
|||||||
using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
|
using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
|
||||||
class Assembler;
|
class Assembler;
|
||||||
class ClassScope;
|
class ClassScope;
|
||||||
class Code;
|
class InstructionStream;
|
||||||
class CodeDataContainer;
|
class CodeDataContainer;
|
||||||
class CodeSpace;
|
class CodeSpace;
|
||||||
class Context;
|
class Context;
|
||||||
@ -989,9 +989,10 @@ using HeapObjectSlot = SlotTraits::THeapObjectSlot;
|
|||||||
using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot;
|
using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot;
|
||||||
|
|
||||||
// A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot")
|
// A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot")
|
||||||
// holding a strong pointer to a Code object. The Code object slots might be
|
// holding a strong pointer to a InstructionStream object. The InstructionStream
|
||||||
// compressed and since code space might be allocated off the main heap
|
// object slots might be compressed and since code space might be allocated off
|
||||||
// the load operations require explicit cage base value for code space.
|
// the main heap the load operations require explicit cage base value for code
|
||||||
|
// space.
|
||||||
using CodeObjectSlot = SlotTraits::TCodeObjectSlot;
|
using CodeObjectSlot = SlotTraits::TCodeObjectSlot;
|
||||||
|
|
||||||
using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
|
using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
|
||||||
@ -1028,10 +1029,10 @@ constexpr int kSpaceTagSize = 4;
|
|||||||
static_assert(FIRST_SPACE == 0);
|
static_assert(FIRST_SPACE == 0);
|
||||||
|
|
||||||
enum class AllocationType : uint8_t {
|
enum class AllocationType : uint8_t {
|
||||||
kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
|
kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
|
||||||
kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
|
kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
|
||||||
kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE
|
kCode, // InstructionStream object allocated in CODE_SPACE or CODE_LO_SPACE
|
||||||
kMap, // Map object allocated in OLD_SPACE
|
kMap, // Map object allocated in OLD_SPACE
|
||||||
kReadOnly, // Object allocated in RO_SPACE
|
kReadOnly, // Object allocated in RO_SPACE
|
||||||
kSharedOld, // Regular object allocated in OLD_SPACE in the shared heap
|
kSharedOld, // Regular object allocated in OLD_SPACE in the shared heap
|
||||||
kSharedMap, // Map object in OLD_SPACE in the shared heap
|
kSharedMap, // Map object in OLD_SPACE in the shared heap
|
||||||
@ -2056,7 +2057,8 @@ enum class IcCheckType { kElement, kProperty };
|
|||||||
|
|
||||||
// Helper stubs can be called in different ways depending on where the target
|
// Helper stubs can be called in different ways depending on where the target
|
||||||
// code is located and how the call sequence is expected to look like:
|
// code is located and how the call sequence is expected to look like:
|
||||||
// - CodeObject: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
|
// - CodeObject: Call on-heap {Code} object via
|
||||||
|
// {RelocInfo::CODE_TARGET}.
|
||||||
// - WasmRuntimeStub: Call native {WasmCode} stub via
|
// - WasmRuntimeStub: Call native {WasmCode} stub via
|
||||||
// {RelocInfo::WASM_STUB_CALL}.
|
// {RelocInfo::WASM_STUB_CALL}.
|
||||||
// - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic
|
// - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic
|
||||||
|
@ -60,9 +60,9 @@ class V8HeapCompressionScheme {
|
|||||||
|
|
||||||
#ifdef V8_EXTERNAL_CODE_SPACE
|
#ifdef V8_EXTERNAL_CODE_SPACE
|
||||||
|
|
||||||
// Compression scheme used for fields containing Code objects (namely for the
|
// Compression scheme used for fields containing InstructionStream objects
|
||||||
// CodeDataContainer::code field).
|
// (namely for the CodeDataContainer::code field). Same as
|
||||||
// Same as V8HeapCompressionScheme but with a different base value.
|
// V8HeapCompressionScheme but with a different base value.
|
||||||
class ExternalCodeCompressionScheme {
|
class ExternalCodeCompressionScheme {
|
||||||
public:
|
public:
|
||||||
V8_INLINE static Address PrepareCageBaseAddress(Address on_heap_addr);
|
V8_INLINE static Address PrepareCageBaseAddress(Address on_heap_addr);
|
||||||
|
@ -647,11 +647,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
|||||||
void CodeGenerator::BailoutIfDeoptimized() {
|
void CodeGenerator::BailoutIfDeoptimized() {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(tasm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
int offset = InstructionStream::kCodeDataContainerOffset -
|
||||||
|
InstructionStream::kHeaderSize;
|
||||||
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
__ ldr(scratch,
|
__ ldr(scratch,
|
||||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||||
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
__ tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
RelocInfo::CODE_TARGET, ne);
|
RelocInfo::CODE_TARGET, ne);
|
||||||
}
|
}
|
||||||
|
@ -3376,8 +3376,8 @@ void CodeGenerator::PrepareForDeoptimizationExits(
|
|||||||
false, false,
|
false, false,
|
||||||
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
|
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
|
||||||
|
|
||||||
// Check which deopt kinds exist in this Code object, to avoid emitting jumps
|
// Check which deopt kinds exist in this InstructionStream object, to avoid
|
||||||
// to unused entries.
|
// emitting jumps to unused entries.
|
||||||
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
|
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
|
||||||
for (auto exit : *exits) {
|
for (auto exit : *exits) {
|
||||||
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
|
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
|
||||||
|
@ -411,7 +411,7 @@ void CodeGenerator::AssembleCode() {
|
|||||||
unwinding_info_writer_.Finish(tasm()->pc_offset());
|
unwinding_info_writer_.Finish(tasm()->pc_offset());
|
||||||
|
|
||||||
// Final alignment before starting on the metadata section.
|
// Final alignment before starting on the metadata section.
|
||||||
tasm()->Align(Code::kMetadataAlignment);
|
tasm()->Align(InstructionStream::kMetadataAlignment);
|
||||||
|
|
||||||
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
|
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
|
||||||
|
|
||||||
@ -467,10 +467,10 @@ base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
|
|||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
MaybeHandle<InstructionStream> CodeGenerator::FinalizeCode() {
|
||||||
if (result_ != kSuccess) {
|
if (result_ != kSuccess) {
|
||||||
tasm()->AbortedCodeGeneration();
|
tasm()->AbortedCodeGeneration();
|
||||||
return MaybeHandle<Code>();
|
return MaybeHandle<InstructionStream>();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate the source position table.
|
// Allocate the source position table.
|
||||||
@ -494,7 +494,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
|||||||
unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
|
unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> maybe_code =
|
MaybeHandle<InstructionStream> maybe_code =
|
||||||
Factory::CodeBuilder(isolate(), desc, info()->code_kind())
|
Factory::CodeBuilder(isolate(), desc, info()->code_kind())
|
||||||
.set_builtin(info()->builtin())
|
.set_builtin(info()->builtin())
|
||||||
.set_inlined_bytecode_size(info()->inlined_bytecode_size())
|
.set_inlined_bytecode_size(info()->inlined_bytecode_size())
|
||||||
@ -506,10 +506,10 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
|||||||
.set_osr_offset(info()->osr_offset())
|
.set_osr_offset(info()->osr_offset())
|
||||||
.TryBuild();
|
.TryBuild();
|
||||||
|
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (!maybe_code.ToHandle(&code)) {
|
if (!maybe_code.ToHandle(&code)) {
|
||||||
tasm()->AbortedCodeGeneration();
|
tasm()->AbortedCodeGeneration();
|
||||||
return MaybeHandle<Code>();
|
return MaybeHandle<InstructionStream>();
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(
|
LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(
|
||||||
|
@ -159,7 +159,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
|
|||||||
// produce the actual code object. If an error occurs during either phase,
|
// produce the actual code object. If an error occurs during either phase,
|
||||||
// FinalizeCode returns an empty MaybeHandle.
|
// FinalizeCode returns an empty MaybeHandle.
|
||||||
void AssembleCode(); // Does not need to run on main thread.
|
void AssembleCode(); // Does not need to run on main thread.
|
||||||
MaybeHandle<Code> FinalizeCode();
|
MaybeHandle<InstructionStream> FinalizeCode();
|
||||||
|
|
||||||
base::OwnedVector<byte> GetSourcePositionTable();
|
base::OwnedVector<byte> GetSourcePositionTable();
|
||||||
base::OwnedVector<byte> GetProtectedInstructionsData();
|
base::OwnedVector<byte> GetProtectedInstructionsData();
|
||||||
@ -466,8 +466,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
|
|||||||
// with function size. {jump_deoptimization_entry_labels_} is an optimization
|
// with function size. {jump_deoptimization_entry_labels_} is an optimization
|
||||||
// to that effect, which extracts the (potentially large) instruction
|
// to that effect, which extracts the (potentially large) instruction
|
||||||
// sequence for the final jump to the deoptimization entry into a single spot
|
// sequence for the final jump to the deoptimization entry into a single spot
|
||||||
// per Code object. All deopt exits can then near-call to this label. Note:
|
// per InstructionStream object. All deopt exits can then near-call to this
|
||||||
// not used on all architectures.
|
// label. Note: not used on all architectures.
|
||||||
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
|
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
|
||||||
|
|
||||||
// The maximal combined height of all frames produced upon deoptimization, and
|
// The maximal combined height of all frames produced upon deoptimization, and
|
||||||
|
@ -662,11 +662,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
|||||||
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
||||||
// 3. if it is not zero then it jumps to the builtin.
|
// 3. if it is not zero then it jumps to the builtin.
|
||||||
void CodeGenerator::BailoutIfDeoptimized() {
|
void CodeGenerator::BailoutIfDeoptimized() {
|
||||||
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
int offset = InstructionStream::kCodeDataContainerOffset -
|
||||||
|
InstructionStream::kHeaderSize;
|
||||||
__ push(eax); // Push eax so we can use it as a scratch register.
|
__ push(eax); // Push eax so we can use it as a scratch register.
|
||||||
__ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset));
|
__ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
|
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
|
||||||
__ pop(eax); // Restore eax.
|
__ pop(eax); // Restore eax.
|
||||||
|
|
||||||
Label skip;
|
Label skip;
|
||||||
@ -827,7 +828,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ PushPC();
|
__ PushPC();
|
||||||
int pc = __ pc_offset();
|
int pc = __ pc_offset();
|
||||||
__ pop(scratch);
|
__ pop(scratch);
|
||||||
__ sub(scratch, Immediate(pc + Code::kHeaderSize - kHeapObjectTag));
|
__ sub(scratch,
|
||||||
|
Immediate(pc + InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
__ add(scratch, Immediate::CodeRelativeOffset(&return_location));
|
__ add(scratch, Immediate::CodeRelativeOffset(&return_location));
|
||||||
__ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset),
|
__ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset),
|
||||||
scratch);
|
scratch);
|
||||||
|
@ -162,14 +162,14 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
Handle<Code> CodeAssembler::GenerateCode(
|
Handle<InstructionStream> CodeAssembler::GenerateCode(
|
||||||
CodeAssemblerState* state, const AssemblerOptions& options,
|
CodeAssemblerState* state, const AssemblerOptions& options,
|
||||||
const ProfileDataFromFile* profile_data) {
|
const ProfileDataFromFile* profile_data) {
|
||||||
DCHECK(!state->code_generated_);
|
DCHECK(!state->code_generated_);
|
||||||
|
|
||||||
RawMachineAssembler* rasm = state->raw_assembler_.get();
|
RawMachineAssembler* rasm = state->raw_assembler_.get();
|
||||||
|
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
Graph* graph = rasm->ExportForOptimization();
|
Graph* graph = rasm->ExportForOptimization();
|
||||||
|
|
||||||
code = Pipeline::GenerateCodeForCodeStub(
|
code = Pipeline::GenerateCodeForCodeStub(
|
||||||
|
@ -387,9 +387,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
|||||||
CodeAssembler(const CodeAssembler&) = delete;
|
CodeAssembler(const CodeAssembler&) = delete;
|
||||||
CodeAssembler& operator=(const CodeAssembler&) = delete;
|
CodeAssembler& operator=(const CodeAssembler&) = delete;
|
||||||
|
|
||||||
static Handle<Code> GenerateCode(CodeAssemblerState* state,
|
static Handle<InstructionStream> GenerateCode(
|
||||||
const AssemblerOptions& options,
|
CodeAssemblerState* state, const AssemblerOptions& options,
|
||||||
const ProfileDataFromFile* profile_data);
|
const ProfileDataFromFile* profile_data);
|
||||||
bool Is64() const;
|
bool Is64() const;
|
||||||
bool Is32() const;
|
bool Is32() const;
|
||||||
bool IsFloat64RoundUpSupported() const;
|
bool IsFloat64RoundUpSupported() const;
|
||||||
|
@ -115,16 +115,16 @@ class PendingDependencies final {
|
|||||||
|
|
||||||
void Register(Handle<HeapObject> object,
|
void Register(Handle<HeapObject> object,
|
||||||
DependentCode::DependencyGroup group) {
|
DependentCode::DependencyGroup group) {
|
||||||
// Code, which are per-local Isolate, cannot depend on objects in the shared
|
// InstructionStream, which are per-local Isolate, cannot depend on objects
|
||||||
// heap. Shared heap dependencies are designed to never invalidate
|
// in the shared heap. Shared heap dependencies are designed to never
|
||||||
// assumptions. E.g., maps for shared structs do not have transitions or
|
// invalidate assumptions. E.g., maps for shared structs do not have
|
||||||
// change the shape of their fields. See
|
// transitions or change the shape of their fields. See
|
||||||
// DependentCode::DeoptimizeDependencyGroups for corresponding DCHECK.
|
// DependentCode::DeoptimizeDependencyGroups for corresponding DCHECK.
|
||||||
if (object->InSharedWritableHeap()) return;
|
if (object->InSharedWritableHeap()) return;
|
||||||
deps_[object] |= group;
|
deps_[object] |= group;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstallAll(Isolate* isolate, Handle<Code> code) {
|
void InstallAll(Isolate* isolate, Handle<InstructionStream> code) {
|
||||||
if (V8_UNLIKELY(v8_flags.predictable)) {
|
if (V8_UNLIKELY(v8_flags.predictable)) {
|
||||||
InstallAllPredictable(isolate, code);
|
InstallAllPredictable(isolate, code);
|
||||||
return;
|
return;
|
||||||
@ -139,7 +139,7 @@ class PendingDependencies final {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstallAllPredictable(Isolate* isolate, Handle<Code> code) {
|
void InstallAllPredictable(Isolate* isolate, Handle<InstructionStream> code) {
|
||||||
CHECK(v8_flags.predictable);
|
CHECK(v8_flags.predictable);
|
||||||
// First, guarantee predictable iteration order.
|
// First, guarantee predictable iteration order.
|
||||||
using HandleAndGroup =
|
using HandleAndGroup =
|
||||||
@ -1189,7 +1189,7 @@ V8_INLINE void TraceInvalidCompilationDependency(
|
|||||||
PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
|
PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CompilationDependencies::Commit(Handle<Code> code) {
|
bool CompilationDependencies::Commit(Handle<InstructionStream> code) {
|
||||||
if (!PrepareInstall()) return false;
|
if (!PrepareInstall()) return false;
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -31,7 +31,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
|
|||||||
public:
|
public:
|
||||||
CompilationDependencies(JSHeapBroker* broker, Zone* zone);
|
CompilationDependencies(JSHeapBroker* broker, Zone* zone);
|
||||||
|
|
||||||
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
|
V8_WARN_UNUSED_RESULT bool Commit(Handle<InstructionStream> code);
|
||||||
|
|
||||||
// Return the initial map of {function} and record the assumption that it
|
// Return the initial map of {function} and record the assumption that it
|
||||||
// stays the initial map.
|
// stays the initial map.
|
||||||
|
@ -134,30 +134,30 @@ class Reducer;
|
|||||||
V(Uint64Div) \
|
V(Uint64Div) \
|
||||||
V(Uint64Mod)
|
V(Uint64Mod)
|
||||||
|
|
||||||
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
|
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
|
||||||
V(AllocateInOldGenerationStub, Code) \
|
V(AllocateInOldGenerationStub, InstructionStream) \
|
||||||
V(AllocateInYoungGenerationStub, Code) \
|
V(AllocateInYoungGenerationStub, InstructionStream) \
|
||||||
V(AllocateRegularInOldGenerationStub, Code) \
|
V(AllocateRegularInOldGenerationStub, InstructionStream) \
|
||||||
V(AllocateRegularInYoungGenerationStub, Code) \
|
V(AllocateRegularInYoungGenerationStub, InstructionStream) \
|
||||||
V(BigIntMap, Map) \
|
V(BigIntMap, Map) \
|
||||||
V(BooleanMap, Map) \
|
V(BooleanMap, Map) \
|
||||||
V(EmptyString, String) \
|
V(EmptyString, String) \
|
||||||
V(ExternalObjectMap, Map) \
|
V(ExternalObjectMap, Map) \
|
||||||
V(False, Boolean) \
|
V(False, Boolean) \
|
||||||
V(FixedArrayMap, Map) \
|
V(FixedArrayMap, Map) \
|
||||||
V(FixedDoubleArrayMap, Map) \
|
V(FixedDoubleArrayMap, Map) \
|
||||||
V(WeakFixedArrayMap, Map) \
|
V(WeakFixedArrayMap, Map) \
|
||||||
V(HeapNumberMap, Map) \
|
V(HeapNumberMap, Map) \
|
||||||
V(MinusOne, Number) \
|
V(MinusOne, Number) \
|
||||||
V(NaN, Number) \
|
V(NaN, Number) \
|
||||||
V(NoContext, Object) \
|
V(NoContext, Object) \
|
||||||
V(Null, Oddball) \
|
V(Null, Oddball) \
|
||||||
V(One, Number) \
|
V(One, Number) \
|
||||||
V(TheHole, Oddball) \
|
V(TheHole, Oddball) \
|
||||||
V(ToNumberBuiltin, Code) \
|
V(ToNumberBuiltin, InstructionStream) \
|
||||||
V(PlainPrimitiveToNumberBuiltin, Code) \
|
V(PlainPrimitiveToNumberBuiltin, InstructionStream) \
|
||||||
V(True, Boolean) \
|
V(True, Boolean) \
|
||||||
V(Undefined, Oddball) \
|
V(Undefined, Oddball) \
|
||||||
V(Zero, Number)
|
V(Zero, Number)
|
||||||
|
|
||||||
class GraphAssembler;
|
class GraphAssembler;
|
||||||
|
@ -71,7 +71,8 @@ bool IsReadOnlyHeapObjectForCompiler(PtrComprCageBase cage_base,
|
|||||||
// TODO(jgruber): Remove this compiler-specific predicate and use the plain
|
// TODO(jgruber): Remove this compiler-specific predicate and use the plain
|
||||||
// heap predicate instead. This would involve removing the special cases for
|
// heap predicate instead. This would involve removing the special cases for
|
||||||
// builtins.
|
// builtins.
|
||||||
return (object.IsCode(cage_base) && Code::cast(object).is_builtin()) ||
|
return (object.IsInstructionStream(cage_base) &&
|
||||||
|
InstructionStream::cast(object).is_builtin()) ||
|
||||||
ReadOnlyHeap::Contains(object);
|
ReadOnlyHeap::Contains(object);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2286,7 +2287,7 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
unsigned GetInlinedBytecodeSizeImpl(Code code) {
|
unsigned GetInlinedBytecodeSizeImpl(InstructionStream code) {
|
||||||
unsigned value = code.inlined_bytecode_size();
|
unsigned value = code.inlined_bytecode_size();
|
||||||
if (value > 0) {
|
if (value > 0) {
|
||||||
// Don't report inlined bytecode size if the code object was already
|
// Don't report inlined bytecode size if the code object was already
|
||||||
@ -2298,7 +2299,7 @@ unsigned GetInlinedBytecodeSizeImpl(Code code) {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
unsigned CodeRef::GetInlinedBytecodeSize() const {
|
unsigned InstructionStreamRef::GetInlinedBytecodeSize() const {
|
||||||
return GetInlinedBytecodeSizeImpl(*object());
|
return GetInlinedBytecodeSizeImpl(*object());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2308,9 +2309,10 @@ unsigned CodeDataContainerRef::GetInlinedBytecodeSize() const {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Safe to do a relaxed conversion to Code here since CodeDataContainer::code
|
// Safe to do a relaxed conversion to InstructionStream here since
|
||||||
// field is modified only by GC and the CodeDataContainer was acquire-loaded.
|
// CodeDataContainer::code field is modified only by GC and the
|
||||||
Code code = code_data_container.code(kRelaxedLoad);
|
// CodeDataContainer was acquire-loaded.
|
||||||
|
InstructionStream code = code_data_container.instruction_stream(kRelaxedLoad);
|
||||||
return GetInlinedBytecodeSizeImpl(code);
|
return GetInlinedBytecodeSizeImpl(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ enum class RefSerializationKind {
|
|||||||
BACKGROUND_SERIALIZED(BigInt) \
|
BACKGROUND_SERIALIZED(BigInt) \
|
||||||
NEVER_SERIALIZED(CallHandlerInfo) \
|
NEVER_SERIALIZED(CallHandlerInfo) \
|
||||||
NEVER_SERIALIZED(Cell) \
|
NEVER_SERIALIZED(Cell) \
|
||||||
NEVER_SERIALIZED(Code) \
|
NEVER_SERIALIZED(InstructionStream) \
|
||||||
NEVER_SERIALIZED(CodeDataContainer) \
|
NEVER_SERIALIZED(CodeDataContainer) \
|
||||||
NEVER_SERIALIZED(Context) \
|
NEVER_SERIALIZED(Context) \
|
||||||
NEVER_SERIALIZED(DescriptorArray) \
|
NEVER_SERIALIZED(DescriptorArray) \
|
||||||
@ -1010,11 +1010,11 @@ class JSGlobalProxyRef : public JSObjectRef {
|
|||||||
Handle<JSGlobalProxy> object() const;
|
Handle<JSGlobalProxy> object() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CodeRef : public HeapObjectRef {
|
class InstructionStreamRef : public HeapObjectRef {
|
||||||
public:
|
public:
|
||||||
DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
|
DEFINE_REF_CONSTRUCTOR(InstructionStream, HeapObjectRef)
|
||||||
|
|
||||||
Handle<Code> object() const;
|
Handle<InstructionStream> object() const;
|
||||||
|
|
||||||
unsigned GetInlinedBytecodeSize() const;
|
unsigned GetInlinedBytecodeSize() const;
|
||||||
};
|
};
|
||||||
|
@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
|
|||||||
Isolate* isolate() const { return isolate_; }
|
Isolate* isolate() const { return isolate_; }
|
||||||
|
|
||||||
// The pointer compression cage base value used for decompression of all
|
// The pointer compression cage base value used for decompression of all
|
||||||
// tagged values except references to Code objects.
|
// tagged values except references to InstructionStream objects.
|
||||||
PtrComprCageBase cage_base() const {
|
PtrComprCageBase cage_base() const {
|
||||||
#if V8_COMPRESS_POINTERS
|
#if V8_COMPRESS_POINTERS
|
||||||
return cage_base_;
|
return cage_base_;
|
||||||
|
@ -167,9 +167,10 @@ Reduction MemoryLowering::ReduceAllocateRaw(
|
|||||||
if (v8_flags.single_generation && allocation_type == AllocationType::kYoung) {
|
if (v8_flags.single_generation && allocation_type == AllocationType::kYoung) {
|
||||||
allocation_type = AllocationType::kOld;
|
allocation_type = AllocationType::kOld;
|
||||||
}
|
}
|
||||||
// Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
|
// InstructionStream objects may have a maximum size smaller than
|
||||||
// guard pages. If we need to support allocating code here we would need to
|
// kMaxHeapObjectSize due to guard pages. If we need to support allocating
|
||||||
// call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
|
// code here we would need to call
|
||||||
|
// MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
|
||||||
DCHECK_NE(allocation_type, AllocationType::kCode);
|
DCHECK_NE(allocation_type, AllocationType::kCode);
|
||||||
Node* value;
|
Node* value;
|
||||||
Node* size = node->InputAt(0);
|
Node* size = node->InputAt(0);
|
||||||
|
@ -360,8 +360,8 @@ class PipelineData {
|
|||||||
bool verify_graph() const { return verify_graph_; }
|
bool verify_graph() const { return verify_graph_; }
|
||||||
void set_verify_graph(bool value) { verify_graph_ = value; }
|
void set_verify_graph(bool value) { verify_graph_ = value; }
|
||||||
|
|
||||||
MaybeHandle<Code> code() { return code_; }
|
MaybeHandle<InstructionStream> code() { return code_; }
|
||||||
void set_code(MaybeHandle<Code> code) {
|
void set_code(MaybeHandle<InstructionStream> code) {
|
||||||
DCHECK(code_.is_null());
|
DCHECK(code_.is_null());
|
||||||
code_ = code;
|
code_ = code;
|
||||||
}
|
}
|
||||||
@ -655,7 +655,7 @@ class PipelineData {
|
|||||||
bool verify_graph_ = false;
|
bool verify_graph_ = false;
|
||||||
int start_source_position_ = kNoSourcePosition;
|
int start_source_position_ = kNoSourcePosition;
|
||||||
base::Optional<OsrHelper> osr_helper_;
|
base::Optional<OsrHelper> osr_helper_;
|
||||||
MaybeHandle<Code> code_;
|
MaybeHandle<InstructionStream> code_;
|
||||||
CodeGenerator* code_generator_ = nullptr;
|
CodeGenerator* code_generator_ = nullptr;
|
||||||
Typer* typer_ = nullptr;
|
Typer* typer_ = nullptr;
|
||||||
Typer::Flags typer_flags_ = Typer::kNoFlags;
|
Typer::Flags typer_flags_ = Typer::kNoFlags;
|
||||||
@ -750,15 +750,15 @@ class PipelineImpl final {
|
|||||||
void AssembleCode(Linkage* linkage);
|
void AssembleCode(Linkage* linkage);
|
||||||
|
|
||||||
// Step D. Run the code finalization pass.
|
// Step D. Run the code finalization pass.
|
||||||
MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
|
MaybeHandle<InstructionStream> FinalizeCode(bool retire_broker = true);
|
||||||
|
|
||||||
// Step E. Install any code dependencies.
|
// Step E. Install any code dependencies.
|
||||||
bool CommitDependencies(Handle<Code> code);
|
bool CommitDependencies(Handle<InstructionStream> code);
|
||||||
|
|
||||||
void VerifyGeneratedCodeIsIdempotent();
|
void VerifyGeneratedCodeIsIdempotent();
|
||||||
void RunPrintAndVerify(const char* phase, bool untyped = false);
|
void RunPrintAndVerify(const char* phase, bool untyped = false);
|
||||||
bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
|
bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
|
||||||
MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
|
MaybeHandle<InstructionStream> GenerateCode(CallDescriptor* call_descriptor);
|
||||||
void AllocateRegistersForTopTier(const RegisterConfiguration* config,
|
void AllocateRegistersForTopTier(const RegisterConfiguration* config,
|
||||||
CallDescriptor* call_descriptor,
|
CallDescriptor* call_descriptor,
|
||||||
bool run_verifier);
|
bool run_verifier);
|
||||||
@ -945,7 +945,7 @@ void PrintParticipatingSource(OptimizedCompilationInfo* info,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print the code after compiling it.
|
// Print the code after compiling it.
|
||||||
void PrintCode(Isolate* isolate, Handle<Code> code,
|
void PrintCode(Isolate* isolate, Handle<InstructionStream> code,
|
||||||
OptimizedCompilationInfo* info) {
|
OptimizedCompilationInfo* info) {
|
||||||
if (v8_flags.print_opt_source && info->IsOptimizing()) {
|
if (v8_flags.print_opt_source && info->IsOptimizing()) {
|
||||||
PrintParticipatingSource(info, isolate);
|
PrintParticipatingSource(info, isolate);
|
||||||
@ -1145,7 +1145,7 @@ class PipelineCompilationJob final : public TurbofanCompilationJob {
|
|||||||
// Registers weak object to optimized code dependencies.
|
// Registers weak object to optimized code dependencies.
|
||||||
void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
|
void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
|
||||||
Handle<NativeContext> context,
|
Handle<NativeContext> context,
|
||||||
Handle<Code> code);
|
Handle<InstructionStream> code);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Zone zone_;
|
Zone zone_;
|
||||||
@ -1286,8 +1286,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
|
|||||||
// phases happening during PrepareJob.
|
// phases happening during PrepareJob.
|
||||||
PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
|
PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
|
||||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
|
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
|
||||||
MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
|
MaybeHandle<InstructionStream> maybe_code = pipeline_.FinalizeCode();
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (!maybe_code.ToHandle(&code)) {
|
if (!maybe_code.ToHandle(&code)) {
|
||||||
if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
|
if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
|
||||||
return AbortOptimization(BailoutReason::kCodeGenerationFailed);
|
return AbortOptimization(BailoutReason::kCodeGenerationFailed);
|
||||||
@ -1305,7 +1305,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
|
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
|
||||||
Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
|
Isolate* isolate, Handle<NativeContext> context,
|
||||||
|
Handle<InstructionStream> code) {
|
||||||
std::vector<Handle<Map>> maps;
|
std::vector<Handle<Map>> maps;
|
||||||
DCHECK(code->is_optimized_code());
|
DCHECK(code->is_optimized_code());
|
||||||
{
|
{
|
||||||
@ -2916,7 +2917,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
|
|||||||
|
|
||||||
CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
|
CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
|
||||||
Isolate* isolate) {
|
Isolate* isolate) {
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
|
if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
|
||||||
V8::FatalProcessOutOfMemory(isolate,
|
V8::FatalProcessOutOfMemory(isolate,
|
||||||
"WasmHeapStubCompilationJob::FinalizeJobImpl");
|
"WasmHeapStubCompilationJob::FinalizeJobImpl");
|
||||||
@ -3246,7 +3247,7 @@ int HashGraphForPGO(Graph* graph) {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
|
MaybeHandle<InstructionStream> Pipeline::GenerateCodeForCodeStub(
|
||||||
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
|
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
|
||||||
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
|
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
|
||||||
const char* debug_name, Builtin builtin, const AssemblerOptions& options,
|
const char* debug_name, Builtin builtin, const AssemblerOptions& options,
|
||||||
@ -3741,7 +3742,7 @@ void Pipeline::GenerateCodeForWasmFunction(
|
|||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
// static
|
// static
|
||||||
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
|
MaybeHandle<InstructionStream> Pipeline::GenerateCodeForTesting(
|
||||||
OptimizedCompilationInfo* info, Isolate* isolate,
|
OptimizedCompilationInfo* info, Isolate* isolate,
|
||||||
std::unique_ptr<JSHeapBroker>* out_broker) {
|
std::unique_ptr<JSHeapBroker>* out_broker) {
|
||||||
ZoneStats zone_stats(isolate->allocator());
|
ZoneStats zone_stats(isolate->allocator());
|
||||||
@ -3764,9 +3765,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
|
|||||||
{
|
{
|
||||||
LocalIsolateScope local_isolate_scope(data.broker(), info,
|
LocalIsolateScope local_isolate_scope(data.broker(), info,
|
||||||
isolate->main_thread_local_isolate());
|
isolate->main_thread_local_isolate());
|
||||||
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
|
if (!pipeline.CreateGraph()) return MaybeHandle<InstructionStream>();
|
||||||
// We selectively Unpark inside OptimizeGraph.
|
// We selectively Unpark inside OptimizeGraph.
|
||||||
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
|
if (!pipeline.OptimizeGraph(&linkage))
|
||||||
|
return MaybeHandle<InstructionStream>();
|
||||||
|
|
||||||
pipeline.AssembleCode(&linkage);
|
pipeline.AssembleCode(&linkage);
|
||||||
}
|
}
|
||||||
@ -3780,17 +3782,17 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
|
|||||||
info->DetachPersistentHandles(), info->DetachCanonicalHandles());
|
info->DetachPersistentHandles(), info->DetachCanonicalHandles());
|
||||||
}
|
}
|
||||||
|
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
|
if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
|
||||||
pipeline.CommitDependencies(code)) {
|
pipeline.CommitDependencies(code)) {
|
||||||
if (!will_retire_broker) *out_broker = data.ReleaseBroker();
|
if (!will_retire_broker) *out_broker = data.ReleaseBroker();
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
return MaybeHandle<Code>();
|
return MaybeHandle<InstructionStream>();
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
|
MaybeHandle<InstructionStream> Pipeline::GenerateCodeForTesting(
|
||||||
OptimizedCompilationInfo* info, Isolate* isolate,
|
OptimizedCompilationInfo* info, Isolate* isolate,
|
||||||
CallDescriptor* call_descriptor, Graph* graph,
|
CallDescriptor* call_descriptor, Graph* graph,
|
||||||
const AssemblerOptions& options, Schedule* schedule) {
|
const AssemblerOptions& options, Schedule* schedule) {
|
||||||
@ -3822,12 +3824,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
|
|||||||
pipeline.ComputeScheduledGraph();
|
pipeline.ComputeScheduledGraph();
|
||||||
}
|
}
|
||||||
|
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
|
if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
|
||||||
pipeline.CommitDependencies(code)) {
|
pipeline.CommitDependencies(code)) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
return MaybeHandle<Code>();
|
return MaybeHandle<InstructionStream>();
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -4110,7 +4112,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage) {
|
|||||||
data->EndPhaseKind();
|
data->EndPhaseKind();
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
|
MaybeHandle<InstructionStream> PipelineImpl::FinalizeCode(bool retire_broker) {
|
||||||
PipelineData* data = this->data_;
|
PipelineData* data = this->data_;
|
||||||
data->BeginPhaseKind("V8.TFFinalizeCode");
|
data->BeginPhaseKind("V8.TFFinalizeCode");
|
||||||
if (data->broker() && retire_broker) {
|
if (data->broker() && retire_broker) {
|
||||||
@ -4118,8 +4120,8 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
|
|||||||
}
|
}
|
||||||
Run<FinalizeCodePhase>();
|
Run<FinalizeCodePhase>();
|
||||||
|
|
||||||
MaybeHandle<Code> maybe_code = data->code();
|
MaybeHandle<InstructionStream> maybe_code = data->code();
|
||||||
Handle<Code> code;
|
Handle<InstructionStream> code;
|
||||||
if (!maybe_code.ToHandle(&code)) {
|
if (!maybe_code.ToHandle(&code)) {
|
||||||
return maybe_code;
|
return maybe_code;
|
||||||
}
|
}
|
||||||
@ -4174,14 +4176,15 @@ bool PipelineImpl::SelectInstructionsAndAssemble(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
|
MaybeHandle<InstructionStream> PipelineImpl::GenerateCode(
|
||||||
|
CallDescriptor* call_descriptor) {
|
||||||
if (!SelectInstructionsAndAssemble(call_descriptor)) {
|
if (!SelectInstructionsAndAssemble(call_descriptor)) {
|
||||||
return MaybeHandle<Code>();
|
return MaybeHandle<InstructionStream>();
|
||||||
}
|
}
|
||||||
return FinalizeCode();
|
return FinalizeCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PipelineImpl::CommitDependencies(Handle<Code> code) {
|
bool PipelineImpl::CommitDependencies(Handle<InstructionStream> code) {
|
||||||
return data_->dependencies() == nullptr ||
|
return data_->dependencies() == nullptr ||
|
||||||
data_->dependencies()->Commit(code);
|
data_->dependencies()->Commit(code);
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ class Pipeline : public AllStatic {
|
|||||||
SourcePositionTable* source_positions = nullptr);
|
SourcePositionTable* source_positions = nullptr);
|
||||||
|
|
||||||
// Run the pipeline on a machine graph and generate code.
|
// Run the pipeline on a machine graph and generate code.
|
||||||
static MaybeHandle<Code> GenerateCodeForCodeStub(
|
static MaybeHandle<InstructionStream> GenerateCodeForCodeStub(
|
||||||
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
|
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
|
||||||
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
|
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
|
||||||
const char* debug_name, Builtin builtin, const AssemblerOptions& options,
|
const char* debug_name, Builtin builtin, const AssemblerOptions& options,
|
||||||
@ -88,16 +88,17 @@ class Pipeline : public AllStatic {
|
|||||||
// Run the pipeline on JavaScript bytecode and generate code. If requested,
|
// Run the pipeline on JavaScript bytecode and generate code. If requested,
|
||||||
// hands out the heap broker on success, transferring its ownership to the
|
// hands out the heap broker on success, transferring its ownership to the
|
||||||
// caller.
|
// caller.
|
||||||
V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
|
V8_EXPORT_PRIVATE static MaybeHandle<InstructionStream>
|
||||||
OptimizedCompilationInfo* info, Isolate* isolate,
|
GenerateCodeForTesting(OptimizedCompilationInfo* info, Isolate* isolate,
|
||||||
std::unique_ptr<JSHeapBroker>* out_broker = nullptr);
|
std::unique_ptr<JSHeapBroker>* out_broker = nullptr);
|
||||||
|
|
||||||
// Run the pipeline on a machine graph and generate code. If {schedule} is
|
// Run the pipeline on a machine graph and generate code. If {schedule} is
|
||||||
// {nullptr}, then compute a new schedule for code generation.
|
// {nullptr}, then compute a new schedule for code generation.
|
||||||
V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
|
V8_EXPORT_PRIVATE static MaybeHandle<InstructionStream>
|
||||||
OptimizedCompilationInfo* info, Isolate* isolate,
|
GenerateCodeForTesting(OptimizedCompilationInfo* info, Isolate* isolate,
|
||||||
CallDescriptor* call_descriptor, Graph* graph,
|
CallDescriptor* call_descriptor, Graph* graph,
|
||||||
const AssemblerOptions& options, Schedule* schedule = nullptr);
|
const AssemblerOptions& options,
|
||||||
|
Schedule* schedule = nullptr);
|
||||||
|
|
||||||
// Run just the register allocator phases.
|
// Run just the register allocator phases.
|
||||||
V8_EXPORT_PRIVATE static void AllocateRegistersForTesting(
|
V8_EXPORT_PRIVATE static void AllocateRegistersForTesting(
|
||||||
|
@ -368,7 +368,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
|
|||||||
case SCRIPT_CONTEXT_TYPE:
|
case SCRIPT_CONTEXT_TYPE:
|
||||||
case WITH_CONTEXT_TYPE:
|
case WITH_CONTEXT_TYPE:
|
||||||
case SCRIPT_TYPE:
|
case SCRIPT_TYPE:
|
||||||
case CODE_TYPE:
|
case INSTRUCTION_STREAM_TYPE:
|
||||||
case CODE_DATA_CONTAINER_TYPE:
|
case CODE_DATA_CONTAINER_TYPE:
|
||||||
case PROPERTY_CELL_TYPE:
|
case PROPERTY_CELL_TYPE:
|
||||||
case SOURCE_TEXT_MODULE_TYPE:
|
case SOURCE_TEXT_MODULE_TYPE:
|
||||||
|
@ -8312,11 +8312,9 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
|
MaybeHandle<InstructionStream> CompileWasmToJSWrapper(
|
||||||
const wasm::FunctionSig* sig,
|
Isolate* isolate, const wasm::FunctionSig* sig, WasmImportCallKind kind,
|
||||||
WasmImportCallKind kind,
|
int expected_arity, wasm::Suspend suspend) {
|
||||||
int expected_arity,
|
|
||||||
wasm::Suspend suspend) {
|
|
||||||
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
|
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
|
||||||
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
|
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
|
||||||
|
|
||||||
@ -8359,15 +8357,15 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
|
|||||||
if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
|
if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
|
||||||
CompilationJob::FAILED ||
|
CompilationJob::FAILED ||
|
||||||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
|
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
|
||||||
return Handle<Code>();
|
return Handle<InstructionStream>();
|
||||||
}
|
}
|
||||||
Handle<Code> code = job->compilation_info()->code();
|
Handle<InstructionStream> code = job->compilation_info()->code();
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
|
MaybeHandle<InstructionStream> CompileJSToJSWrapper(
|
||||||
const wasm::FunctionSig* sig,
|
Isolate* isolate, const wasm::FunctionSig* sig,
|
||||||
const wasm::WasmModule* module) {
|
const wasm::WasmModule* module) {
|
||||||
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
|
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
|
||||||
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
|
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
|
||||||
Graph* graph = zone->New<Graph>(zone.get());
|
Graph* graph = zone->New<Graph>(zone.get());
|
||||||
@ -8409,7 +8407,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
|
|||||||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
|
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
Handle<Code> code = job->compilation_info()->code();
|
Handle<InstructionStream> code = job->compilation_info()->code();
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -142,17 +142,15 @@ std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
|
|||||||
const wasm::WasmModule* module, bool is_import,
|
const wasm::WasmModule* module, bool is_import,
|
||||||
const wasm::WasmFeatures& enabled_features);
|
const wasm::WasmFeatures& enabled_features);
|
||||||
|
|
||||||
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
|
MaybeHandle<InstructionStream> CompileWasmToJSWrapper(
|
||||||
const wasm::FunctionSig* sig,
|
Isolate* isolate, const wasm::FunctionSig* sig, WasmImportCallKind kind,
|
||||||
WasmImportCallKind kind,
|
int expected_arity, wasm::Suspend suspend);
|
||||||
int expected_arity,
|
|
||||||
wasm::Suspend suspend);
|
|
||||||
|
|
||||||
// Compiles a stub with JS linkage that serves as an adapter for function
|
// Compiles a stub with JS linkage that serves as an adapter for function
|
||||||
// objects constructed via {WebAssembly.Function}. It performs a round-trip
|
// objects constructed via {WebAssembly.Function}. It performs a round-trip
|
||||||
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
|
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
|
||||||
MaybeHandle<Code> CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*,
|
MaybeHandle<InstructionStream> CompileJSToJSWrapper(
|
||||||
const wasm::WasmModule* module);
|
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
|
||||||
|
|
||||||
enum CWasmEntryParameters {
|
enum CWasmEntryParameters {
|
||||||
kCodeEntry,
|
kCodeEntry,
|
||||||
|
@ -1231,7 +1231,8 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
|
|||||||
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
|
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
|
||||||
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
|
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
|
||||||
if (state != DebugInfo::kHasNoSideEffect) continue;
|
if (state != DebugInfo::kHasNoSideEffect) continue;
|
||||||
Code code = FromCodeDataContainer(isolate->builtins()->code(caller));
|
InstructionStream code =
|
||||||
|
FromCodeDataContainer(isolate->builtins()->code(caller));
|
||||||
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
|
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
|
||||||
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
|
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ void BreakLocation::AllAtCurrentStatement(
|
|||||||
int offset = summary.code_offset();
|
int offset = summary.code_offset();
|
||||||
Handle<AbstractCode> abstract_code = summary.abstract_code();
|
Handle<AbstractCode> abstract_code = summary.abstract_code();
|
||||||
PtrComprCageBase cage_base = GetPtrComprCageBase(*debug_info);
|
PtrComprCageBase cage_base = GetPtrComprCageBase(*debug_info);
|
||||||
if (abstract_code->IsCode(cage_base)) offset = offset - 1;
|
if (abstract_code->IsInstructionStream(cage_base)) offset = offset - 1;
|
||||||
int statement_position;
|
int statement_position;
|
||||||
{
|
{
|
||||||
BreakIterator it(debug_info);
|
BreakIterator it(debug_info);
|
||||||
@ -1941,7 +1941,8 @@ bool Debug::FindSharedFunctionInfosIntersectingRange(
|
|||||||
for (const auto& candidate : candidates) {
|
for (const auto& candidate : candidates) {
|
||||||
IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_));
|
IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_));
|
||||||
if (!is_compiled_scope.is_compiled()) {
|
if (!is_compiled_scope.is_compiled()) {
|
||||||
// Code that cannot be compiled lazily are internal and not debuggable.
|
// InstructionStream that cannot be compiled lazily are internal and not
|
||||||
|
// debuggable.
|
||||||
DCHECK(candidate->allows_lazy_compilation());
|
DCHECK(candidate->allows_lazy_compilation());
|
||||||
if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
|
if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
|
||||||
&is_compiled_scope)) {
|
&is_compiled_scope)) {
|
||||||
@ -2006,7 +2007,8 @@ Handle<Object> Debug::FindInnermostContainingFunctionInfo(Handle<Script> script,
|
|||||||
}
|
}
|
||||||
// If not, compile to reveal inner functions.
|
// If not, compile to reveal inner functions.
|
||||||
HandleScope scope(isolate_);
|
HandleScope scope(isolate_);
|
||||||
// Code that cannot be compiled lazily are internal and not debuggable.
|
// InstructionStream that cannot be compiled lazily are internal and not
|
||||||
|
// debuggable.
|
||||||
DCHECK(shared.allows_lazy_compilation());
|
DCHECK(shared.allows_lazy_compilation());
|
||||||
if (!Compiler::Compile(isolate_, handle(shared, isolate_),
|
if (!Compiler::Compile(isolate_, handle(shared, isolate_),
|
||||||
Compiler::CLEAR_EXCEPTION, &is_compiled_scope)) {
|
Compiler::CLEAR_EXCEPTION, &is_compiled_scope)) {
|
||||||
|
@ -340,8 +340,9 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
|||||||
|
|
||||||
// Mark all code, then deoptimize.
|
// Mark all code, then deoptimize.
|
||||||
{
|
{
|
||||||
Code::OptimizedCodeIterator it(isolate);
|
InstructionStream::OptimizedCodeIterator it(isolate);
|
||||||
for (Code code = it.Next(); !code.is_null(); code = it.Next()) {
|
for (InstructionStream code = it.Next(); !code.is_null();
|
||||||
|
code = it.Next()) {
|
||||||
code.set_marked_for_deoptimization(true);
|
code.set_marked_for_deoptimization(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -385,8 +386,9 @@ void Deoptimizer::DeoptimizeAllOptimizedCodeWithFunction(
|
|||||||
// Mark all code that inlines this function, then deoptimize.
|
// Mark all code that inlines this function, then deoptimize.
|
||||||
bool any_marked = false;
|
bool any_marked = false;
|
||||||
{
|
{
|
||||||
Code::OptimizedCodeIterator it(isolate);
|
InstructionStream::OptimizedCodeIterator it(isolate);
|
||||||
for (Code code = it.Next(); !code.is_null(); code = it.Next()) {
|
for (InstructionStream code = it.Next(); !code.is_null();
|
||||||
|
code = it.Next()) {
|
||||||
if (code.Inlines(*function)) {
|
if (code.Inlines(*function)) {
|
||||||
code.set_marked_for_deoptimization(true);
|
code.set_marked_for_deoptimization(true);
|
||||||
any_marked = true;
|
any_marked = true;
|
||||||
@ -500,17 +502,17 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Code Deoptimizer::FindOptimizedCode() {
|
InstructionStream Deoptimizer::FindOptimizedCode() {
|
||||||
CodeLookupResult lookup_result = isolate_->FindCodeObject(from_);
|
CodeLookupResult lookup_result = isolate_->FindCodeObject(from_);
|
||||||
return lookup_result.code();
|
return lookup_result.instruction_stream();
|
||||||
}
|
}
|
||||||
|
|
||||||
Handle<JSFunction> Deoptimizer::function() const {
|
Handle<JSFunction> Deoptimizer::function() const {
|
||||||
return Handle<JSFunction>(function_, isolate());
|
return Handle<JSFunction>(function_, isolate());
|
||||||
}
|
}
|
||||||
|
|
||||||
Handle<Code> Deoptimizer::compiled_code() const {
|
Handle<InstructionStream> Deoptimizer::compiled_code() const {
|
||||||
return Handle<Code>(compiled_code_, isolate());
|
return Handle<InstructionStream>(compiled_code_, isolate());
|
||||||
}
|
}
|
||||||
|
|
||||||
Deoptimizer::~Deoptimizer() {
|
Deoptimizer::~Deoptimizer() {
|
||||||
@ -626,7 +628,8 @@ void Deoptimizer::TraceDeoptEnd(double deopt_duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
|
void Deoptimizer::TraceMarkForDeoptimization(InstructionStream code,
|
||||||
|
const char* reason) {
|
||||||
if (!v8_flags.trace_deopt && !v8_flags.log_deopt) return;
|
if (!v8_flags.trace_deopt && !v8_flags.log_deopt) return;
|
||||||
|
|
||||||
DisallowGarbageCollection no_gc;
|
DisallowGarbageCollection no_gc;
|
||||||
@ -1940,7 +1943,8 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
|
|||||||
return parameter_slots * kSystemPointerSize;
|
return parameter_slots * kSystemPointerSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
|
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(InstructionStream code,
|
||||||
|
Address pc) {
|
||||||
CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd());
|
CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd());
|
||||||
SourcePosition last_position = SourcePosition::Unknown();
|
SourcePosition last_position = SourcePosition::Unknown();
|
||||||
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
|
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
|
||||||
|
@ -43,7 +43,7 @@ class Deoptimizer : public Malloced {
|
|||||||
const int deopt_id;
|
const int deopt_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DeoptInfo GetDeoptInfo(Code code, Address from);
|
static DeoptInfo GetDeoptInfo(InstructionStream code, Address from);
|
||||||
DeoptInfo GetDeoptInfo() const {
|
DeoptInfo GetDeoptInfo() const {
|
||||||
return Deoptimizer::GetDeoptInfo(compiled_code_, from_);
|
return Deoptimizer::GetDeoptInfo(compiled_code_, from_);
|
||||||
}
|
}
|
||||||
@ -55,7 +55,7 @@ class Deoptimizer : public Malloced {
|
|||||||
static const char* MessageFor(DeoptimizeKind kind);
|
static const char* MessageFor(DeoptimizeKind kind);
|
||||||
|
|
||||||
Handle<JSFunction> function() const;
|
Handle<JSFunction> function() const;
|
||||||
Handle<Code> compiled_code() const;
|
Handle<InstructionStream> compiled_code() const;
|
||||||
DeoptimizeKind deopt_kind() const { return deopt_kind_; }
|
DeoptimizeKind deopt_kind() const { return deopt_kind_; }
|
||||||
|
|
||||||
// Where the deopt exit occurred *in the outermost frame*, i.e in the
|
// Where the deopt exit occurred *in the outermost frame*, i.e in the
|
||||||
@ -115,7 +115,7 @@ class Deoptimizer : public Malloced {
|
|||||||
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
|
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
|
||||||
DeoptimizeKind* type_out);
|
DeoptimizeKind* type_out);
|
||||||
|
|
||||||
// Code generation support.
|
// InstructionStream generation support.
|
||||||
static int input_offset() { return offsetof(Deoptimizer, input_); }
|
static int input_offset() { return offsetof(Deoptimizer, input_); }
|
||||||
static int output_count_offset() {
|
static int output_count_offset() {
|
||||||
return offsetof(Deoptimizer, output_count_);
|
return offsetof(Deoptimizer, output_count_);
|
||||||
@ -140,7 +140,8 @@ class Deoptimizer : public Malloced {
|
|||||||
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
|
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
|
||||||
|
|
||||||
// Tracing.
|
// Tracing.
|
||||||
static void TraceMarkForDeoptimization(Code code, const char* reason);
|
static void TraceMarkForDeoptimization(InstructionStream code,
|
||||||
|
const char* reason);
|
||||||
static void TraceEvictFromOptimizedCodeCache(SharedFunctionInfo sfi,
|
static void TraceEvictFromOptimizedCodeCache(SharedFunctionInfo sfi,
|
||||||
const char* reason);
|
const char* reason);
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ class Deoptimizer : public Malloced {
|
|||||||
|
|
||||||
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
|
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
|
||||||
Address from, int fp_to_sp_delta);
|
Address from, int fp_to_sp_delta);
|
||||||
Code FindOptimizedCode();
|
InstructionStream FindOptimizedCode();
|
||||||
void DeleteFrameDescriptions();
|
void DeleteFrameDescriptions();
|
||||||
|
|
||||||
void DoComputeOutputFrames();
|
void DoComputeOutputFrames();
|
||||||
@ -180,10 +181,10 @@ class Deoptimizer : public Malloced {
|
|||||||
|
|
||||||
static void MarkAllCodeForContext(NativeContext native_context);
|
static void MarkAllCodeForContext(NativeContext native_context);
|
||||||
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
|
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
|
||||||
// Searches the list of known deoptimizing code for a Code object
|
// Searches the list of known deoptimizing code for a InstructionStream object
|
||||||
// containing the given address (which is supposedly faster than
|
// containing the given address (which is supposedly faster than
|
||||||
// searching all code objects).
|
// searching all code objects).
|
||||||
Code FindDeoptimizingCode(Address addr);
|
InstructionStream FindDeoptimizingCode(Address addr);
|
||||||
|
|
||||||
// Tracing.
|
// Tracing.
|
||||||
bool tracing_enabled() const { return trace_scope_ != nullptr; }
|
bool tracing_enabled() const { return trace_scope_ != nullptr; }
|
||||||
@ -206,7 +207,7 @@ class Deoptimizer : public Malloced {
|
|||||||
|
|
||||||
Isolate* isolate_;
|
Isolate* isolate_;
|
||||||
JSFunction function_;
|
JSFunction function_;
|
||||||
Code compiled_code_;
|
InstructionStream compiled_code_;
|
||||||
unsigned deopt_exit_index_;
|
unsigned deopt_exit_index_;
|
||||||
BytecodeOffset bytecode_offset_in_outermost_frame_ = BytecodeOffset::None();
|
BytecodeOffset bytecode_offset_in_outermost_frame_ = BytecodeOffset::None();
|
||||||
DeoptimizeKind deopt_kind_;
|
DeoptimizeKind deopt_kind_;
|
||||||
|
@ -382,9 +382,9 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
|
|||||||
const CodeReference& host = code;
|
const CodeReference& host = code;
|
||||||
Address constant_pool =
|
Address constant_pool =
|
||||||
host.is_null() ? kNullAddress : host.constant_pool();
|
host.is_null() ? kNullAddress : host.constant_pool();
|
||||||
Code code_pointer;
|
InstructionStream code_pointer;
|
||||||
if (host.is_code()) {
|
if (host.is_instruction_stream()) {
|
||||||
code_pointer = *host.as_code();
|
code_pointer = *host.as_instruction_stream();
|
||||||
}
|
}
|
||||||
|
|
||||||
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], code_pointer,
|
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], code_pointer,
|
||||||
@ -404,7 +404,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
|
|||||||
// by IsInConstantPool() below.
|
// by IsInConstantPool() below.
|
||||||
if (pcs.empty() && !code.is_null() && !decoding_constant_pool) {
|
if (pcs.empty() && !code.is_null() && !decoding_constant_pool) {
|
||||||
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
|
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
|
||||||
RelocInfo::NO_INFO, 0, Code());
|
RelocInfo::NO_INFO, 0, InstructionStream());
|
||||||
if (dummy_rinfo.IsInConstantPool()) {
|
if (dummy_rinfo.IsInConstantPool()) {
|
||||||
Address constant_pool_entry_address =
|
Address constant_pool_entry_address =
|
||||||
dummy_rinfo.constant_pool_entry_address();
|
dummy_rinfo.constant_pool_entry_address();
|
||||||
|
@ -37,8 +37,8 @@ namespace GDBJITInterface {
|
|||||||
void EventHandler(const v8::JitCodeEvent* event);
|
void EventHandler(const v8::JitCodeEvent* event);
|
||||||
|
|
||||||
// Expose some functions for unittests. These only exercise the logic to add
|
// Expose some functions for unittests. These only exercise the logic to add
|
||||||
// AddressRegion to CodeMap, and checking for overlap. It does not touch the
|
// AddressRegion to InstructionStreamMap, and checking for overlap. It does not
|
||||||
// actual JITCodeEntry at all.
|
// touch the actual JITCodeEntry at all.
|
||||||
V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
|
V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
|
||||||
V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
|
V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
|
||||||
V8_EXPORT_PRIVATE size_t
|
V8_EXPORT_PRIVATE size_t
|
||||||
|
@ -243,8 +243,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
|
|||||||
TransitionArray::cast(*this).TransitionArrayVerify(isolate);
|
TransitionArray::cast(*this).TransitionArrayVerify(isolate);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CODE_TYPE:
|
case INSTRUCTION_STREAM_TYPE:
|
||||||
Code::cast(*this).CodeVerify(isolate);
|
InstructionStream::cast(*this).InstructionStreamVerify(isolate);
|
||||||
break;
|
break;
|
||||||
case JS_API_OBJECT_TYPE:
|
case JS_API_OBJECT_TYPE:
|
||||||
case JS_ARRAY_ITERATOR_PROTOTYPE_TYPE:
|
case JS_ARRAY_ITERATOR_PROTOTYPE_TYPE:
|
||||||
@ -335,7 +335,7 @@ void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
|
|||||||
// If you crashed here and {isolate->is_shared()}, there is a bug causing the
|
// If you crashed here and {isolate->is_shared()}, there is a bug causing the
|
||||||
// host of {p} to point to a non-shared object.
|
// host of {p} to point to a non-shared object.
|
||||||
CHECK(IsValidHeapObject(isolate->heap(), HeapObject::cast(p)));
|
CHECK(IsValidHeapObject(isolate->heap(), HeapObject::cast(p)));
|
||||||
CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !p.IsCode());
|
CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !p.IsInstructionStream());
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -343,7 +343,7 @@ void HeapObject::VerifyCodePointer(Isolate* isolate, Object p) {
|
|||||||
CHECK(p.IsHeapObject());
|
CHECK(p.IsHeapObject());
|
||||||
CHECK(IsValidCodeObject(isolate->heap(), HeapObject::cast(p)));
|
CHECK(IsValidCodeObject(isolate->heap(), HeapObject::cast(p)));
|
||||||
PtrComprCageBase cage_base(isolate);
|
PtrComprCageBase cage_base(isolate);
|
||||||
CHECK(HeapObject::cast(p).IsCode(cage_base));
|
CHECK(HeapObject::cast(p).IsInstructionStream(cage_base));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Symbol::SymbolVerify(Isolate* isolate) {
|
void Symbol::SymbolVerify(Isolate* isolate) {
|
||||||
@ -1091,36 +1091,38 @@ void PropertyCell::PropertyCellVerify(Isolate* isolate) {
|
|||||||
|
|
||||||
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
|
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
|
||||||
CHECK(IsCodeDataContainer());
|
CHECK(IsCodeDataContainer());
|
||||||
if (raw_code() != Smi::zero()) {
|
if (raw_instruction_stream() != Smi::zero()) {
|
||||||
Code code = this->code();
|
InstructionStream code = this->instruction_stream();
|
||||||
CHECK_EQ(code.kind(), kind());
|
CHECK_EQ(code.kind(), kind());
|
||||||
CHECK_EQ(code.builtin_id(), builtin_id());
|
CHECK_EQ(code.builtin_id(), builtin_id());
|
||||||
// When v8_flags.interpreted_frames_native_stack is enabled each
|
// When v8_flags.interpreted_frames_native_stack is enabled each
|
||||||
// interpreted function gets its own copy of the
|
// interpreted function gets its own copy of the
|
||||||
// InterpreterEntryTrampoline. Thus, there could be Code'ful builtins.
|
// InterpreterEntryTrampoline. Thus, there could be InstructionStream'ful
|
||||||
|
// builtins.
|
||||||
CHECK_IMPLIES(isolate->embedded_blob_code() && is_off_heap_trampoline(),
|
CHECK_IMPLIES(isolate->embedded_blob_code() && is_off_heap_trampoline(),
|
||||||
builtin_id() == Builtin::kInterpreterEntryTrampoline);
|
builtin_id() == Builtin::kInterpreterEntryTrampoline);
|
||||||
CHECK_EQ(code.code_data_container(kAcquireLoad), *this);
|
CHECK_EQ(code.code_data_container(kAcquireLoad), *this);
|
||||||
|
|
||||||
// Ensure the cached code entry point corresponds to the Code object
|
// Ensure the cached code entry point corresponds to the InstructionStream
|
||||||
// associated with this CodeDataContainer.
|
// object associated with this CodeDataContainer.
|
||||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||||
if (V8_SHORT_BUILTIN_CALLS_BOOL) {
|
if (V8_SHORT_BUILTIN_CALLS_BOOL) {
|
||||||
if (code.InstructionStart() == code_entry_point()) {
|
if (code.InstructionStart() == code_entry_point()) {
|
||||||
// Most common case, all good.
|
// Most common case, all good.
|
||||||
} else {
|
} else {
|
||||||
// When shared pointer compression cage is enabled and it has the
|
// When shared pointer compression cage is enabled and it has the
|
||||||
// embedded code blob copy then the Code::InstructionStart() might
|
// embedded code blob copy then the
|
||||||
// return the address of the remapped builtin regardless of whether
|
// InstructionStream::InstructionStart() might return the address of the
|
||||||
// the builtins copy existed when the code_entry_point value was
|
// remapped builtin regardless of whether the builtins copy existed when
|
||||||
// cached in the CodeDataContainer (see
|
// the code_entry_point value was cached in the CodeDataContainer (see
|
||||||
// Code::OffHeapInstructionStart()). So, do a reverse Code object
|
// InstructionStream::OffHeapInstructionStart()). So, do a reverse
|
||||||
// lookup via code_entry_point value to ensure it corresponds to the
|
// InstructionStream object lookup via code_entry_point value to ensure
|
||||||
// same Code object associated with this CodeDataContainer.
|
// it corresponds to the same InstructionStream object associated with
|
||||||
|
// this CodeDataContainer.
|
||||||
CodeLookupResult lookup_result =
|
CodeLookupResult lookup_result =
|
||||||
isolate->heap()->GcSafeFindCodeForInnerPointer(code_entry_point());
|
isolate->heap()->GcSafeFindCodeForInnerPointer(code_entry_point());
|
||||||
CHECK(lookup_result.IsFound());
|
CHECK(lookup_result.IsFound());
|
||||||
CHECK_EQ(lookup_result.ToCode(), code);
|
CHECK_EQ(lookup_result.ToInstructionStream(), code);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CHECK_EQ(code.InstructionStart(), code_entry_point());
|
CHECK_EQ(code.InstructionStart(), code_entry_point());
|
||||||
@ -1131,9 +1133,10 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Code::CodeVerify(Isolate* isolate) {
|
void InstructionStream::InstructionStreamVerify(Isolate* isolate) {
|
||||||
CHECK(IsAligned(InstructionSize(),
|
CHECK(
|
||||||
static_cast<unsigned>(Code::kMetadataAlignment)));
|
IsAligned(InstructionSize(),
|
||||||
|
static_cast<unsigned>(InstructionStream::kMetadataAlignment)));
|
||||||
CHECK_EQ(safepoint_table_offset(), 0);
|
CHECK_EQ(safepoint_table_offset(), 0);
|
||||||
CHECK_LE(safepoint_table_offset(), handler_table_offset());
|
CHECK_LE(safepoint_table_offset(), handler_table_offset());
|
||||||
CHECK_LE(handler_table_offset(), constant_pool_offset());
|
CHECK_LE(handler_table_offset(), constant_pool_offset());
|
||||||
@ -1147,11 +1150,11 @@ void Code::CodeVerify(Isolate* isolate) {
|
|||||||
#endif // !defined(_MSC_VER) || defined(__clang__)
|
#endif // !defined(_MSC_VER) || defined(__clang__)
|
||||||
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
|
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
|
||||||
IsAligned(raw_instruction_start(), kCodeAlignment));
|
IsAligned(raw_instruction_start(), kCodeAlignment));
|
||||||
CHECK_EQ(*this, code_data_container(kAcquireLoad).code());
|
CHECK_EQ(*this, code_data_container(kAcquireLoad).instruction_stream());
|
||||||
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
|
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
|
||||||
// following CHECK works builtin trampolines. It currently fails because
|
// following CHECK works builtin trampolines. It currently fails because
|
||||||
// CodeVerify is called halfway through constructing the trampoline and so not
|
// InstructionStreamVerify is called halfway through constructing the
|
||||||
// everything is set up.
|
// trampoline and so not everything is set up.
|
||||||
// CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
|
// CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
|
||||||
relocation_info().ObjectVerify(isolate);
|
relocation_info().ObjectVerify(isolate);
|
||||||
CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
|
CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
|
||||||
@ -1577,7 +1580,8 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
|
|||||||
FixedArray arr = FixedArray::cast(data());
|
FixedArray arr = FixedArray::cast(data());
|
||||||
Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
|
Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
|
||||||
// Smi : Not compiled yet (-1).
|
// Smi : Not compiled yet (-1).
|
||||||
// Code: Compiled irregexp code or trampoline to the interpreter.
|
// InstructionStream: Compiled irregexp code or trampoline to the
|
||||||
|
// interpreter.
|
||||||
CHECK((one_byte_data.IsSmi() &&
|
CHECK((one_byte_data.IsSmi() &&
|
||||||
Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
|
Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
|
||||||
one_byte_data.IsCodeDataContainer());
|
one_byte_data.IsCodeDataContainer());
|
||||||
|
@ -210,8 +210,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
|
|||||||
WasmExceptionPackage::cast(*this).WasmExceptionPackagePrint(os);
|
WasmExceptionPackage::cast(*this).WasmExceptionPackagePrint(os);
|
||||||
break;
|
break;
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
case CODE_TYPE:
|
case INSTRUCTION_STREAM_TYPE:
|
||||||
Code::cast(*this).CodePrint(os);
|
InstructionStream::cast(*this).InstructionStreamPrint(os);
|
||||||
break;
|
break;
|
||||||
case CODE_DATA_CONTAINER_TYPE:
|
case CODE_DATA_CONTAINER_TYPE:
|
||||||
CodeDataContainer::cast(*this).CodeDataContainerPrint(os);
|
CodeDataContainer::cast(*this).CodeDataContainerPrint(os);
|
||||||
@ -1792,8 +1792,8 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) {
|
|||||||
os << "\n";
|
os << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
void Code::CodePrint(std::ostream& os) {
|
void InstructionStream::InstructionStreamPrint(std::ostream& os) {
|
||||||
PrintHeader(os, "Code");
|
PrintHeader(os, "InstructionStream");
|
||||||
os << "\n - code_data_container: "
|
os << "\n - code_data_container: "
|
||||||
<< Brief(code_data_container(kAcquireLoad));
|
<< Brief(code_data_container(kAcquireLoad));
|
||||||
if (is_builtin()) {
|
if (is_builtin()) {
|
||||||
@ -1812,7 +1812,7 @@ void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
|
|||||||
os << "\n - builtin: " << Builtins::name(builtin_id());
|
os << "\n - builtin: " << Builtins::name(builtin_id());
|
||||||
}
|
}
|
||||||
os << "\n - is_off_heap_trampoline: " << is_off_heap_trampoline();
|
os << "\n - is_off_heap_trampoline: " << is_off_heap_trampoline();
|
||||||
os << "\n - code: " << Brief(raw_code());
|
os << "\n - instruction_stream: " << Brief(raw_instruction_stream());
|
||||||
os << "\n - code_entry_point: "
|
os << "\n - code_entry_point: "
|
||||||
<< reinterpret_cast<void*>(code_entry_point());
|
<< reinterpret_cast<void*>(code_entry_point());
|
||||||
os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
|
os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
|
||||||
@ -3052,13 +3052,14 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
|
|||||||
i::CodeDataContainer::cast(lookup_result.code_data_container());
|
i::CodeDataContainer::cast(lookup_result.code_data_container());
|
||||||
code.Disassemble(nullptr, os, isolate, address);
|
code.Disassemble(nullptr, os, isolate, address);
|
||||||
} else {
|
} else {
|
||||||
lookup_result.code().Disassemble(nullptr, os, isolate, address);
|
lookup_result.instruction_stream().Disassemble(nullptr, os, isolate,
|
||||||
|
address);
|
||||||
}
|
}
|
||||||
#else // ENABLE_DISASSEMBLER
|
#else // ENABLE_DISASSEMBLER
|
||||||
if (lookup_result.IsCodeDataContainer()) {
|
if (lookup_result.IsCodeDataContainer()) {
|
||||||
lookup_result.code_data_container().Print();
|
lookup_result.code_data_container().Print();
|
||||||
} else {
|
} else {
|
||||||
lookup_result.code().Print();
|
lookup_result.instruction_stream().Print();
|
||||||
}
|
}
|
||||||
#endif // ENABLE_DISASSEMBLER
|
#endif // ENABLE_DISASSEMBLER
|
||||||
}
|
}
|
||||||
|
@ -231,9 +231,11 @@ void LinuxPerfJitLogger::LogRecordedBuffer(
|
|||||||
if (perf_output_handle_ == nullptr) return;
|
if (perf_output_handle_ == nullptr) return;
|
||||||
|
|
||||||
// We only support non-interpreted functions.
|
// We only support non-interpreted functions.
|
||||||
if (!abstract_code->IsCode(isolate_)) return;
|
if (!abstract_code->IsInstructionStream(isolate_)) return;
|
||||||
Handle<Code> code = Handle<Code>::cast(abstract_code);
|
Handle<InstructionStream> code =
|
||||||
DCHECK(code->raw_instruction_start() == code->address() + Code::kHeaderSize);
|
Handle<InstructionStream>::cast(abstract_code);
|
||||||
|
DCHECK(code->raw_instruction_start() ==
|
||||||
|
code->address() + InstructionStream::kHeaderSize);
|
||||||
|
|
||||||
// Debug info has to be emitted first.
|
// Debug info has to be emitted first.
|
||||||
Handle<SharedFunctionInfo> shared;
|
Handle<SharedFunctionInfo> shared;
|
||||||
@ -320,7 +322,7 @@ base::Vector<const char> GetScriptName(Object maybeScript,
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
|
SourcePositionInfo GetSourcePositionInfo(Handle<InstructionStream> code,
|
||||||
Handle<SharedFunctionInfo> function,
|
Handle<SharedFunctionInfo> function,
|
||||||
SourcePosition pos) {
|
SourcePosition pos) {
|
||||||
DisallowGarbageCollection disallow;
|
DisallowGarbageCollection disallow;
|
||||||
@ -333,7 +335,7 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
|
void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<InstructionStream> code,
|
||||||
Handle<SharedFunctionInfo> shared) {
|
Handle<SharedFunctionInfo> shared) {
|
||||||
// Line ends of all scripts have been initialized prior to this.
|
// Line ends of all scripts have been initialized prior to this.
|
||||||
DisallowGarbageCollection no_gc;
|
DisallowGarbageCollection no_gc;
|
||||||
@ -484,7 +486,7 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(const wasm::WasmCode* code) {
|
|||||||
}
|
}
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
void LinuxPerfJitLogger::LogWriteUnwindingInfo(Code code) {
|
void LinuxPerfJitLogger::LogWriteUnwindingInfo(InstructionStream code) {
|
||||||
PerfJitCodeUnwindingInfo unwinding_info_header;
|
PerfJitCodeUnwindingInfo unwinding_info_header;
|
||||||
unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
|
unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
|
||||||
unwinding_info_header.time_stamp_ = GetTimestamp();
|
unwinding_info_header.time_stamp_ = GetTimestamp();
|
||||||
|
@ -76,11 +76,12 @@ class LinuxPerfJitLogger : public CodeEventLogger {
|
|||||||
|
|
||||||
void LogWriteBytes(const char* bytes, int size);
|
void LogWriteBytes(const char* bytes, int size);
|
||||||
void LogWriteHeader();
|
void LogWriteHeader();
|
||||||
void LogWriteDebugInfo(Handle<Code> code, Handle<SharedFunctionInfo> shared);
|
void LogWriteDebugInfo(Handle<InstructionStream> code,
|
||||||
|
Handle<SharedFunctionInfo> shared);
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void LogWriteDebugInfo(const wasm::WasmCode* code);
|
void LogWriteDebugInfo(const wasm::WasmCode* code);
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
void LogWriteUnwindingInfo(Code code);
|
void LogWriteUnwindingInfo(InstructionStream code);
|
||||||
|
|
||||||
static const uint32_t kElfMachIA32 = 3;
|
static const uint32_t kElfMachIA32 = 3;
|
||||||
static const uint32_t kElfMachX64 = 62;
|
static const uint32_t kElfMachX64 = 62;
|
||||||
|
@ -311,7 +311,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
|
|||||||
}
|
}
|
||||||
CodeLookupResult interpreter_entry_trampoline =
|
CodeLookupResult interpreter_entry_trampoline =
|
||||||
isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
|
isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
|
||||||
return interpreter_entry_trampoline.code()
|
return interpreter_entry_trampoline.instruction_stream()
|
||||||
.is_interpreter_trampoline_builtin();
|
.is_interpreter_trampoline_builtin();
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
@ -571,8 +571,8 @@ CodeLookupResult StackFrame::LookupCodeDataContainer() const {
|
|||||||
CodeLookupResult result = GetContainingCode(isolate(), pc());
|
CodeLookupResult result = GetContainingCode(isolate(), pc());
|
||||||
if (DEBUG_BOOL) {
|
if (DEBUG_BOOL) {
|
||||||
CHECK(result.IsFound());
|
CHECK(result.IsFound());
|
||||||
if (result.IsCode()) {
|
if (result.IsInstructionStream()) {
|
||||||
Code code = result.code();
|
InstructionStream code = result.instruction_stream();
|
||||||
CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
|
CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
|
||||||
CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
|
CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
|
||||||
} else {
|
} else {
|
||||||
@ -594,7 +594,7 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
|
|||||||
v->VisitRunningCode(FullObjectSlot(&code));
|
v->VisitRunningCode(FullObjectSlot(&code));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Code holder = lookup_result.code();
|
InstructionStream holder = lookup_result.instruction_stream();
|
||||||
Address old_pc = ReadPC(pc_address);
|
Address old_pc = ReadPC(pc_address);
|
||||||
DCHECK(ReadOnlyHeap::Contains(holder) ||
|
DCHECK(ReadOnlyHeap::Contains(holder) ||
|
||||||
holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
|
holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
|
||||||
@ -602,7 +602,7 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
|
|||||||
Object code = holder;
|
Object code = holder;
|
||||||
v->VisitRunningCode(FullObjectSlot(&code));
|
v->VisitRunningCode(FullObjectSlot(&code));
|
||||||
if (code == holder) return;
|
if (code == holder) return;
|
||||||
holder = Code::unchecked_cast(code);
|
holder = InstructionStream::unchecked_cast(code);
|
||||||
Address pc = holder.InstructionStart(isolate_, old_pc) + pc_offset;
|
Address pc = holder.InstructionStart(isolate_, old_pc) + pc_offset;
|
||||||
// TODO(v8:10026): avoid replacing a signed pointer.
|
// TODO(v8:10026): avoid replacing a signed pointer.
|
||||||
PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize);
|
PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize);
|
||||||
@ -631,7 +631,7 @@ inline StackFrame::Type ComputeBuiltinFrameType(CodeOrCodeDataContainer code) {
|
|||||||
return StackFrame::BASELINE;
|
return StackFrame::BASELINE;
|
||||||
}
|
}
|
||||||
if (code.is_turbofanned()) {
|
if (code.is_turbofanned()) {
|
||||||
// TODO(bmeurer): We treat frames for BUILTIN Code objects as
|
// TODO(bmeurer): We treat frames for BUILTIN InstructionStream objects as
|
||||||
// OptimizedFrame for now (all the builtins with JavaScript
|
// OptimizedFrame for now (all the builtins with JavaScript
|
||||||
// linkage are actually generated with TurboFan currently, so
|
// linkage are actually generated with TurboFan currently, so
|
||||||
// this is sound).
|
// this is sound).
|
||||||
@ -709,7 +709,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
|
|||||||
return ComputeBuiltinFrameType(
|
return ComputeBuiltinFrameType(
|
||||||
CodeDataContainer::cast(lookup_result.code_data_container()));
|
CodeDataContainer::cast(lookup_result.code_data_container()));
|
||||||
}
|
}
|
||||||
return ComputeBuiltinFrameType(lookup_result.code());
|
return ComputeBuiltinFrameType(lookup_result.instruction_stream());
|
||||||
}
|
}
|
||||||
case CodeKind::BASELINE:
|
case CodeKind::BASELINE:
|
||||||
return BASELINE;
|
return BASELINE;
|
||||||
@ -740,7 +740,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
|
|||||||
return WASM_TO_JS_FUNCTION;
|
return WASM_TO_JS_FUNCTION;
|
||||||
case CodeKind::WASM_FUNCTION:
|
case CodeKind::WASM_FUNCTION:
|
||||||
case CodeKind::WASM_TO_CAPI_FUNCTION:
|
case CodeKind::WASM_TO_CAPI_FUNCTION:
|
||||||
// Never appear as on-heap {Code} objects.
|
// Never appear as on-heap {InstructionStream} objects.
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
default:
|
default:
|
||||||
@ -1092,10 +1092,10 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
|
|||||||
// FullMaybeObjectSlots here.
|
// FullMaybeObjectSlots here.
|
||||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||||
// When external code space is enabled the spill slot could contain both
|
// When external code space is enabled the spill slot could contain both
|
||||||
// Code and non-Code references, which have different cage bases. So
|
// InstructionStream and non-InstructionStream references, which have
|
||||||
// unconditional decompression of the value might corrupt Code pointers.
|
// different cage bases. So unconditional decompression of the value might
|
||||||
// However, given that
|
// corrupt InstructionStream pointers. However, given that 1) the
|
||||||
// 1) the Code pointers are never compressed by design (because
|
// InstructionStream pointers are never compressed by design (because
|
||||||
// otherwise we wouldn't know which cage base to apply for
|
// otherwise we wouldn't know which cage base to apply for
|
||||||
// decompression, see respective DCHECKs in
|
// decompression, see respective DCHECKs in
|
||||||
// RelocInfo::target_object()),
|
// RelocInfo::target_object()),
|
||||||
@ -1104,7 +1104,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
|
|||||||
// we can avoid updating upper part of the spill slot if it already
|
// we can avoid updating upper part of the spill slot if it already
|
||||||
// contains full value.
|
// contains full value.
|
||||||
// TODO(v8:11880): Remove this special handling by enforcing builtins
|
// TODO(v8:11880): Remove this special handling by enforcing builtins
|
||||||
// to use CodeTs instead of Code objects.
|
// to use CodeTs instead of InstructionStream objects.
|
||||||
Address value = *spill_slot.location();
|
Address value = *spill_slot.location();
|
||||||
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
|
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
|
||||||
// We don't need to update smi values or full pointers.
|
// We don't need to update smi values or full pointers.
|
||||||
@ -1551,8 +1551,8 @@ HeapObject TurbofanStubWithContextFrame::unchecked_code() const {
|
|||||||
if (code_lookup.IsCodeDataContainer()) {
|
if (code_lookup.IsCodeDataContainer()) {
|
||||||
return code_lookup.code_data_container();
|
return code_lookup.code_data_container();
|
||||||
}
|
}
|
||||||
if (code_lookup.IsCode()) {
|
if (code_lookup.IsInstructionStream()) {
|
||||||
return code_lookup.code();
|
return code_lookup.instruction_stream();
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1649,8 +1649,8 @@ HeapObject StubFrame::unchecked_code() const {
|
|||||||
if (code_lookup.IsCodeDataContainer()) {
|
if (code_lookup.IsCodeDataContainer()) {
|
||||||
return code_lookup.code_data_container();
|
return code_lookup.code_data_container();
|
||||||
}
|
}
|
||||||
if (code_lookup.IsCode()) {
|
if (code_lookup.IsInstructionStream()) {
|
||||||
return code_lookup.code();
|
return code_lookup.instruction_stream();
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -2451,12 +2451,12 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int BaselineFrame::GetBytecodeOffset() const {
|
int BaselineFrame::GetBytecodeOffset() const {
|
||||||
Code code = LookupCodeDataContainer().code();
|
InstructionStream code = LookupCodeDataContainer().instruction_stream();
|
||||||
return code.GetBytecodeOffsetForBaselinePC(this->pc(), GetBytecodeArray());
|
return code.GetBytecodeOffsetForBaselinePC(this->pc(), GetBytecodeArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
|
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
|
||||||
Code code = LookupCodeDataContainer().code();
|
InstructionStream code = LookupCodeDataContainer().instruction_stream();
|
||||||
return code.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
|
return code.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
|
||||||
GetBytecodeArray());
|
GetBytecodeArray());
|
||||||
}
|
}
|
||||||
@ -2984,7 +2984,8 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
|
|||||||
// the code has been computed.
|
// the code has been computed.
|
||||||
entry->code =
|
entry->code =
|
||||||
isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer);
|
isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer);
|
||||||
if (entry->code.IsCode() && entry->code.code().is_maglevved()) {
|
if (entry->code.IsInstructionStream() &&
|
||||||
|
entry->code.instruction_stream().is_maglevved()) {
|
||||||
entry->maglev_safepoint_entry.Reset();
|
entry->maglev_safepoint_entry.Reset();
|
||||||
} else {
|
} else {
|
||||||
entry->safepoint_entry.Reset();
|
entry->safepoint_entry.Reset();
|
||||||
|
@ -297,8 +297,8 @@ class StackFrame {
|
|||||||
// Get the type of this frame.
|
// Get the type of this frame.
|
||||||
virtual Type type() const = 0;
|
virtual Type type() const = 0;
|
||||||
|
|
||||||
// Get the code associated with this frame. The result might be a Code object,
|
// Get the code associated with this frame. The result might be a
|
||||||
// a CodeDataContainer object or an empty value.
|
// InstructionStream object, a CodeDataContainer object or an empty value.
|
||||||
// This method is used by Isolate::PushStackTraceAndDie() for collecting a
|
// This method is used by Isolate::PushStackTraceAndDie() for collecting a
|
||||||
// stack trace on fatal error and thus it might be called in the middle of GC
|
// stack trace on fatal error and thus it might be called in the middle of GC
|
||||||
// and should be as safe as possible.
|
// and should be as safe as possible.
|
||||||
|
@ -243,8 +243,9 @@ class IsolateData final {
|
|||||||
ThreadLocalTop thread_local_top_;
|
ThreadLocalTop thread_local_top_;
|
||||||
|
|
||||||
// The entry points for builtins. This corresponds to
|
// The entry points for builtins. This corresponds to
|
||||||
// Code::InstructionStart() for each Code object in the builtins table below.
|
// InstructionStream::InstructionStart() for each InstructionStream object in
|
||||||
// The entry table is in IsolateData for easy access through kRootRegister.
|
// the builtins table below. The entry table is in IsolateData for easy access
|
||||||
|
// through kRootRegister.
|
||||||
Address builtin_entry_table_[Builtins::kBuiltinCount] = {};
|
Address builtin_entry_table_[Builtins::kBuiltinCount] = {};
|
||||||
|
|
||||||
// The entries in this array are tagged pointers to CodeDataContainer objects.
|
// The entries in this array are tagged pointers to CodeDataContainer objects.
|
||||||
|
@ -115,8 +115,8 @@ V8_INLINE PtrComprCageBase GetPtrComprCageBaseSlow(HeapObject object) {
|
|||||||
return PtrComprCageBase{isolate};
|
return PtrComprCageBase{isolate};
|
||||||
}
|
}
|
||||||
// If the Isolate can't be obtained then the heap object is a read-only
|
// If the Isolate can't be obtained then the heap object is a read-only
|
||||||
// one and therefore not a Code object, so fallback to auto-computing cage
|
// one and therefore not a InstructionStream object, so fallback to
|
||||||
// base value.
|
// auto-computing cage base value.
|
||||||
}
|
}
|
||||||
return GetPtrComprCageBase(object);
|
return GetPtrComprCageBase(object);
|
||||||
}
|
}
|
||||||
|
@ -181,12 +181,13 @@ uint32_t DefaultEmbeddedBlobDataSize() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
// These variables provide access to the current embedded blob without requiring
|
// These variables provide access to the current embedded blob without requiring
|
||||||
// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
|
// an isolate instance. This is needed e.g. by
|
||||||
// not have access to an isolate but still needs to access the embedded blob.
|
// InstructionStream::InstructionStart, which may not have access to an isolate
|
||||||
// The variables are initialized by each isolate in Init(). Writes and reads are
|
// but still needs to access the embedded blob. The variables are initialized by
|
||||||
// relaxed since we can guarantee that the current thread has initialized these
|
// each isolate in Init(). Writes and reads are relaxed since we can guarantee
|
||||||
// variables before accessing them. Different threads may race, but this is fine
|
// that the current thread has initialized these variables before accessing
|
||||||
// since they all attempt to set the same values of the blob pointer and size.
|
// them. Different threads may race, but this is fine since they all attempt to
|
||||||
|
// set the same values of the blob pointer and size.
|
||||||
|
|
||||||
std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
|
std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
|
||||||
std::atomic<uint32_t> current_embedded_blob_code_size_(0);
|
std::atomic<uint32_t> current_embedded_blob_code_size_(0);
|
||||||
@ -2000,7 +2001,8 @@ Object Isolate::UnwindAndFindHandler() {
|
|||||||
CHECK(frame->is_java_script());
|
CHECK(frame->is_java_script());
|
||||||
|
|
||||||
if (frame->is_turbofan()) {
|
if (frame->is_turbofan()) {
|
||||||
Code code = frame->LookupCodeDataContainer().code();
|
InstructionStream code =
|
||||||
|
frame->LookupCodeDataContainer().instruction_stream();
|
||||||
// The debugger triggers lazy deopt for the "to-be-restarted" frame
|
// The debugger triggers lazy deopt for the "to-be-restarted" frame
|
||||||
// immediately when the CDP event arrives while paused.
|
// immediately when the CDP event arrives while paused.
|
||||||
CHECK(code.marked_for_deoptimization());
|
CHECK(code.marked_for_deoptimization());
|
||||||
@ -2052,7 +2054,8 @@ Object Isolate::UnwindAndFindHandler() {
|
|||||||
case StackFrame::C_WASM_ENTRY: {
|
case StackFrame::C_WASM_ENTRY: {
|
||||||
StackHandler* handler = frame->top_handler();
|
StackHandler* handler = frame->top_handler();
|
||||||
thread_local_top()->handler_ = handler->next_address();
|
thread_local_top()->handler_ = handler->next_address();
|
||||||
Code code = frame->LookupCodeDataContainer().code();
|
InstructionStream code =
|
||||||
|
frame->LookupCodeDataContainer().instruction_stream();
|
||||||
HandlerTable table(code);
|
HandlerTable table(code);
|
||||||
Address instruction_start = code.InstructionStart(this, frame->pc());
|
Address instruction_start = code.InstructionStart(this, frame->pc());
|
||||||
int return_offset = static_cast<int>(frame->pc() - instruction_start);
|
int return_offset = static_cast<int>(frame->pc() - instruction_start);
|
||||||
@ -2198,7 +2201,8 @@ Object Isolate::UnwindAndFindHandler() {
|
|||||||
|
|
||||||
if (frame->is_baseline()) {
|
if (frame->is_baseline()) {
|
||||||
BaselineFrame* sp_frame = BaselineFrame::cast(js_frame);
|
BaselineFrame* sp_frame = BaselineFrame::cast(js_frame);
|
||||||
Code code = sp_frame->LookupCodeDataContainer().code();
|
InstructionStream code =
|
||||||
|
sp_frame->LookupCodeDataContainer().instruction_stream();
|
||||||
DCHECK(!code.is_off_heap_trampoline());
|
DCHECK(!code.is_off_heap_trampoline());
|
||||||
intptr_t pc_offset = sp_frame->GetPCForBytecodeOffset(offset);
|
intptr_t pc_offset = sp_frame->GetPCForBytecodeOffset(offset);
|
||||||
// Patch the context register directly on the frame, so that we don't
|
// Patch the context register directly on the frame, so that we don't
|
||||||
@ -4822,7 +4826,7 @@ bool Isolate::use_optimizer() {
|
|||||||
|
|
||||||
void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
|
void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
|
||||||
PtrComprCageBase cage_base(this);
|
PtrComprCageBase cage_base(this);
|
||||||
DCHECK(code->IsCode(cage_base) || code->IsByteArray(cage_base));
|
DCHECK(code->IsInstructionStream(cage_base) || code->IsByteArray(cage_base));
|
||||||
total_regexp_code_generated_ += code->Size(cage_base);
|
total_regexp_code_generated_ += code->Size(cage_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1715,8 +1715,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hashes bits of the Isolate that are relevant for embedded builtins. In
|
// Hashes bits of the Isolate that are relevant for embedded builtins. In
|
||||||
// particular, the embedded blob requires builtin Code object layout and the
|
// particular, the embedded blob requires builtin InstructionStream object
|
||||||
// builtins constants table to remain unchanged from build-time.
|
// layout and the builtins constants table to remain unchanged from
|
||||||
|
// build-time.
|
||||||
size_t HashIsolateForEmbeddedBlob();
|
size_t HashIsolateForEmbeddedBlob();
|
||||||
|
|
||||||
static const uint8_t* CurrentEmbeddedBlobCode();
|
static const uint8_t* CurrentEmbeddedBlobCode();
|
||||||
|
@ -145,8 +145,8 @@ void StatisticsExtension::GetCounters(
|
|||||||
for (HeapObject obj = iterator.Next(); !obj.is_null();
|
for (HeapObject obj = iterator.Next(); !obj.is_null();
|
||||||
obj = iterator.Next()) {
|
obj = iterator.Next()) {
|
||||||
Object maybe_source_positions;
|
Object maybe_source_positions;
|
||||||
if (obj.IsCode()) {
|
if (obj.IsInstructionStream()) {
|
||||||
Code code = Code::cast(obj);
|
InstructionStream code = InstructionStream::cast(obj);
|
||||||
reloc_info_total += code.relocation_info().Size();
|
reloc_info_total += code.relocation_info().Size();
|
||||||
// Baseline code doesn't have source positions since it uses
|
// Baseline code doesn't have source positions since it uses
|
||||||
// interpreter code positions.
|
// interpreter code positions.
|
||||||
|
@ -105,10 +105,10 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
|
|||||||
|
|
||||||
// When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
|
// When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
|
||||||
// not cross the 4Gb boundary and thus the default compression scheme of
|
// not cross the 4Gb boundary and thus the default compression scheme of
|
||||||
// truncating the Code pointers to 32-bits still works. It's achieved by
|
// truncating the InstructionStream pointers to 32-bits still works. It's
|
||||||
// specifying base_alignment parameter.
|
// achieved by specifying base_alignment parameter. Note that the alignment is
|
||||||
// Note that the alignment is calculated before adjusting the requested size
|
// calculated before adjusting the requested size for
|
||||||
// for GetWritableReservedAreaSize(). The reasons are:
|
// GetWritableReservedAreaSize(). The reasons are:
|
||||||
// - this extra page is used by breakpad on Windows and it's allowed to cross
|
// - this extra page is used by breakpad on Windows and it's allowed to cross
|
||||||
// the 4Gb boundary,
|
// the 4Gb boundary,
|
||||||
// - rounding up the adjusted size would result in requresting unnecessarily
|
// - rounding up the adjusted size would result in requresting unnecessarily
|
||||||
|
@ -32,7 +32,7 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
|
|||||||
// Record code+metadata statistics.
|
// Record code+metadata statistics.
|
||||||
AbstractCode abstract_code = AbstractCode::cast(object);
|
AbstractCode abstract_code = AbstractCode::cast(object);
|
||||||
int size = abstract_code.SizeIncludingMetadata(cage_base);
|
int size = abstract_code.SizeIncludingMetadata(cage_base);
|
||||||
if (abstract_code.IsCode(cage_base)) {
|
if (abstract_code.IsInstructionStream(cage_base)) {
|
||||||
size += isolate->code_and_metadata_size();
|
size += isolate->code_and_metadata_size();
|
||||||
isolate->set_code_and_metadata_size(size);
|
isolate->set_code_and_metadata_size(size);
|
||||||
} else {
|
} else {
|
||||||
@ -204,9 +204,9 @@ void CodeStatistics::CollectCodeCommentStatistics(AbstractCode obj,
|
|||||||
// them in the stats.
|
// them in the stats.
|
||||||
// Only process code objects for code comment statistics.
|
// Only process code objects for code comment statistics.
|
||||||
PtrComprCageBase cage_base(isolate);
|
PtrComprCageBase cage_base(isolate);
|
||||||
if (!obj.IsCode(cage_base)) return;
|
if (!obj.IsInstructionStream(cage_base)) return;
|
||||||
|
|
||||||
Code code = Code::cast(obj);
|
InstructionStream code = InstructionStream::cast(obj);
|
||||||
CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
|
CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
|
||||||
int delta = 0;
|
int delta = 0;
|
||||||
int prev_pc_offset = 0;
|
int prev_pc_offset = 0;
|
||||||
|
@ -122,7 +122,8 @@ void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
|
|||||||
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
|
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
|
||||||
if (space_->identity() == CODE_SPACE) {
|
if (space_->identity() == CODE_SPACE) {
|
||||||
optional_rwx_write_scope.emplace(
|
optional_rwx_write_scope.emplace(
|
||||||
"Marking Code objects requires write access to the Code page header");
|
"Marking InstructionStream objects requires write access to the "
|
||||||
|
"Code page header");
|
||||||
}
|
}
|
||||||
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
|
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
|
||||||
}
|
}
|
||||||
@ -136,7 +137,8 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
|
|||||||
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
|
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
|
||||||
if (space_->identity() == CODE_SPACE) {
|
if (space_->identity() == CODE_SPACE) {
|
||||||
optional_rwx_write_scope.emplace(
|
optional_rwx_write_scope.emplace(
|
||||||
"Marking Code objects requires write access to the Code page header");
|
"Marking InstructionStream objects requires write access to the "
|
||||||
|
"Code page header");
|
||||||
}
|
}
|
||||||
Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
|
Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
|
||||||
limit);
|
limit);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user