Rename Code to InstructionStream

.. as part of the big Code/CodeDataContainer name shuffle.  In the
next step, CodeDataContainer will be renamed to Code.

Bug: v8:13654
Change-Id: Ia80ac984d46dd6c2a108098055a5cd60e22a837c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4171628
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85337}
This commit is contained in:
Jakob Linke 2023-01-17 10:59:51 +01:00 committed by V8 LUCI CQ
parent 503b56efdf
commit 2c7272fdde
273 changed files with 2997 additions and 2376 deletions

View File

@ -53,7 +53,7 @@ class BaselineCompilerTask {
compiler.GenerateCode();
maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle(
compiler.Build(local_isolate));
Handle<Code> code;
Handle<InstructionStream> code;
if (maybe_code_.ToHandle(&code)) {
local_isolate->heap()->RegisterCodeObject(code);
}
@ -63,7 +63,7 @@ class BaselineCompilerTask {
// Executed in the main thread.
void Install(Isolate* isolate) {
shared_function_info_->set_is_sparkplug_compiling(false);
Handle<Code> code;
Handle<InstructionStream> code;
if (!maybe_code_.ToHandle(&code)) return;
if (v8_flags.print_code) {
code->Print();
@ -97,7 +97,7 @@ class BaselineCompilerTask {
private:
Handle<SharedFunctionInfo> shared_function_info_;
Handle<BytecodeArray> bytecode_;
MaybeHandle<Code> maybe_code_;
MaybeHandle<InstructionStream> maybe_code_;
double time_taken_ms_;
};

View File

@ -337,7 +337,8 @@ void BaselineCompiler::GenerateCode() {
}
}
MaybeHandle<Code> BaselineCompiler::Build(LocalIsolate* local_isolate) {
MaybeHandle<InstructionStream> BaselineCompiler::Build(
LocalIsolate* local_isolate) {
CodeDesc desc;
__ GetCode(local_isolate->GetMainThreadIsolateUnsafe(), &desc);

View File

@ -58,7 +58,7 @@ class BaselineCompiler {
Handle<BytecodeArray> bytecode);
void GenerateCode();
MaybeHandle<Code> Build(LocalIsolate* local_isolate);
MaybeHandle<InstructionStream> Build(LocalIsolate* local_isolate);
static int EstimateInstructionSize(BytecodeArray bytecode);
private:

View File

@ -56,14 +56,14 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
return true;
}
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
MaybeHandle<InstructionStream> GenerateBaselineCode(
Isolate* isolate, Handle<SharedFunctionInfo> shared) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
Handle<BytecodeArray> bytecode(shared->GetBytecodeArray(isolate), isolate);
LocalIsolate* local_isolate = isolate->main_thread_local_isolate();
baseline::BaselineCompiler compiler(local_isolate, shared, bytecode);
compiler.GenerateCode();
MaybeHandle<Code> code = compiler.Build(local_isolate);
MaybeHandle<InstructionStream> code = compiler.Build(local_isolate);
if (v8_flags.print_code && !code.is_null()) {
code.ToHandleChecked()->Print();
}
@ -86,8 +86,8 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
return false;
}
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
MaybeHandle<InstructionStream> GenerateBaselineCode(
Isolate* isolate, Handle<SharedFunctionInfo> shared) {
UNREACHABLE();
}

View File

@ -10,14 +10,14 @@
namespace v8 {
namespace internal {
class Code;
class InstructionStream;
class SharedFunctionInfo;
class MacroAssembler;
bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared);
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
MaybeHandle<InstructionStream> GenerateBaselineCode(
Isolate* isolate, Handle<SharedFunctionInfo> shared);
void EmitReturnBaseline(MacroAssembler* masm);

View File

@ -1715,8 +1715,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a Code object, it must NOT
// be marked_for_deoptimization (callers must ensure this).
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ cmp(maybe_target_code, Operand(Smi::zero()));
__ b(ne, &jump_to_optimized_code);
}
@ -1759,16 +1759,20 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerCodeNonBuiltin(r0, r0);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(r0, r0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ldr(r1,
FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
__ ldr(
r1,
FieldMemOperand(
r0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ add(r0, r0,
Operand(InstructionStream::kHeaderSize -
kHeapObjectTag)); // InstructionStream start
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@ -2000,7 +2004,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
// ----------- S t a t e -------------
@ -3262,8 +3267,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
// purpose Code object) to be able to call into C functions that may trigger
// GC and thus move the caller.
// purpose InstructionStream object) to be able to call into C functions that
// may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@ -3557,7 +3562,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
// Get the InstructionStream object from the shared function info.
Register code_obj = r4;
__ ldr(code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@ -3588,7 +3593,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, r3);
}
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r2;
@ -3663,9 +3668,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire());
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(code_obj, code_obj,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable.

View File

@ -1952,8 +1952,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a Code object, it must NOT
// be marked_for_deoptimization (callers must ensure this).
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
}
@ -1994,13 +1994,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeDataContainerCodeNonBuiltin(x0, x0);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(x0, x0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
x1,
FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
FieldMemOperand(
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@ -2011,7 +2012,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add(x0, x0, x1);
Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
Generate_OSREntry(masm, x0, InstructionStream::kHeaderSize - kHeapObjectTag);
}
} // namespace
@ -2333,7 +2334,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
// ----------- S t a t e -------------
@ -5377,8 +5379,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
// purpose Code object) to be able to call into C functions that may trigger
// GC and thus move the caller.
// purpose InstructionStream object) to be able to call into C functions that
// may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@ -5693,7 +5695,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
// Get the InstructionStream object from the shared function info.
Register code_obj = x22;
__ LoadTaggedPointerField(
code_obj,
@ -5726,7 +5728,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, x3);
}
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = x2;
@ -5799,9 +5801,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (is_osr) {
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
Generate_OSREntry(masm, code_obj,
InstructionStream::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Add(code_obj, code_obj, InstructionStream::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable.

View File

@ -229,7 +229,7 @@ type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
type RawPtr<To: type> extends RawPtr;
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
extern class Code extends HeapObject;
extern class InstructionStream extends HeapObject;
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Number = Smi|HeapNumber;

View File

@ -125,9 +125,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
Goto(&maybe_use_sfi_code);
// At this point we have a candidate Code object. It's *not* a cached
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
// At this point we have a candidate InstructionStream object. It's *not* a
// cached optimized InstructionStream object (we'd have tail-called it above).
// A usual case would be the InterpreterEntryTrampoline to start executing
// existing bytecode.
BIND(&maybe_use_sfi_code);
Label tailcall_code(this), baseline(this);
TVARIABLE(CodeDataContainer, code);

View File

@ -285,7 +285,7 @@ Address Builtins::CppEntryOf(Builtin builtin) {
}
// static
bool Builtins::IsBuiltin(const Code code) {
bool Builtins::IsBuiltin(const InstructionStream code) {
return Builtins::IsBuiltinId(code.builtin_id());
}
@ -399,7 +399,7 @@ constexpr int OffHeapTrampolineGenerator::kBufferSize;
} // namespace
// static
Handle<Code> Builtins::GenerateOffHeapTrampolineFor(
Handle<InstructionStream> Builtins::GenerateOffHeapTrampolineFor(
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
bool generate_jump_to_instruction_stream) {
DCHECK_NOT_NULL(isolate->embedded_blob_code());
@ -429,14 +429,14 @@ Handle<ByteArray> Builtins::GenerateOffHeapTrampolineRelocInfo(
Handle<ByteArray> reloc_info = isolate->factory()->NewByteArray(
desc.reloc_size, AllocationType::kReadOnly);
Code::CopyRelocInfoToByteArray(*reloc_info, desc);
InstructionStream::CopyRelocInfoToByteArray(*reloc_info, desc);
return reloc_info;
}
// static
Handle<Code> Builtins::CreateInterpreterEntryTrampolineForProfiling(
Isolate* isolate) {
Handle<InstructionStream>
Builtins::CreateInterpreterEntryTrampolineForProfiling(Isolate* isolate) {
DCHECK_NOT_NULL(isolate->embedded_blob_code());
DCHECK_NE(0, isolate->embedded_blob_code_size());
@ -525,18 +525,19 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
// static
bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
// If the runtime/optimized code always knows when executing a given builtin
// that it is a builtin, then that builtin does not need an executable Code
// object. Such Code objects can go in read_only_space (and can even be
// smaller with no branch instruction), thus saving memory.
// that it is a builtin, then that builtin does not need an executable
// InstructionStream object. Such InstructionStream objects can go in
// read_only_space (and can even be smaller with no branch instruction), thus
// saving memory.
// Builtins with JS linkage will always have executable Code objects since
// they can be called directly from jitted code with no way of determining
// that they are builtins at generation time. E.g.
// Builtins with JS linkage will always have executable InstructionStream
// objects since they can be called directly from jitted code with no way of
// determining that they are builtins at generation time. E.g.
// f = Array.of;
// f(1, 2, 3);
// TODO(delphick): This is probably too loose but for now Wasm can call any JS
// linkage builtin via its Code object. Once Wasm is fixed this can either be
// tighted or removed completely.
// linkage builtin via its InstructionStream object. Once Wasm is fixed this
// can either be tighted or removed completely.
if (Builtins::KindOf(builtin) != BCH && HasJSLinkage(builtin)) {
return true;
}

View File

@ -175,8 +175,8 @@ class Builtins {
static bool IsCpp(Builtin builtin);
// True, iff the given code object is a builtin. Note that this does not
// necessarily mean that its kind is Code::BUILTIN.
static bool IsBuiltin(const Code code);
// necessarily mean that its kind is InstructionStream::BUILTIN.
static bool IsBuiltin(const InstructionStream code);
// As above, but safe to access off the main thread since the check is done
// by handle location. Similar to Heap::IsRootHandle.
@ -232,7 +232,7 @@ class Builtins {
// function.
// TODO(delphick): Come up with a better name since it may not generate an
// executable trampoline.
static Handle<Code> GenerateOffHeapTrampolineFor(
static Handle<InstructionStream> GenerateOffHeapTrampolineFor(
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
bool generate_jump_to_instruction_stream);
@ -241,12 +241,12 @@ class Builtins {
static Handle<ByteArray> GenerateOffHeapTrampolineRelocInfo(Isolate* isolate);
// Creates a copy of InterpreterEntryTrampolineForProfiling in the code space.
static Handle<Code> CreateInterpreterEntryTrampolineForProfiling(
static Handle<InstructionStream> CreateInterpreterEntryTrampolineForProfiling(
Isolate* isolate);
// Only builtins with JS linkage should ever need to be called via their
// trampoline Code object. The remaining builtins have non-executable Code
// objects.
// trampoline InstructionStream object. The remaining builtins have
// non-executable InstructionStream objects.
static bool CodeObjectIsExecutable(Builtin builtin);
static bool IsJSEntryVariant(Builtin builtin) {
@ -336,8 +336,8 @@ class Builtins {
};
V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
// Check for kNoBuiltinId first to abort early when the current Code object
// is not a builtin.
// Check for kNoBuiltinId first to abort early when the current
// InstructionStream object is not a builtin.
return builtin_id != Builtin::kNoBuiltinId &&
(builtin_id == Builtin::kInterpreterEntryTrampoline ||
builtin_id == Builtin::kInterpreterEnterAtBytecode ||
@ -345,8 +345,8 @@ V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
}
V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) {
// Check for kNoBuiltinId first to abort early when the current Code object
// is not a builtin.
// Check for kNoBuiltinId first to abort early when the current
// InstructionStream object is not a builtin.
return builtin_id != Builtin::kNoBuiltinId &&
(builtin_id == Builtin::kBaselineOutOfLinePrologue ||
builtin_id == Builtin::kBaselineOutOfLinePrologueDeopt ||

View File

@ -30,8 +30,8 @@ macro IsCell(o: HeapObject): bool {
}
@export
macro IsCode(o: HeapObject): bool {
return Is<Code>(o);
macro IsInstructionStream(o: HeapObject): bool {
return Is<InstructionStream>(o);
}
@export

View File

@ -43,7 +43,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// All code objects should be loaded through the root register or use
// pc-relative addressing.
DCHECK(!object->IsCode());
DCHECK(!object->IsInstructionStream());
#endif
auto find_result = map_.FindOrInsert(object);
@ -73,7 +73,7 @@ void CheckPreconditionsForPatching(Isolate* isolate,
} // namespace
void BuiltinsConstantsTableBuilder::PatchSelfReference(
Handle<Object> self_reference, Handle<Code> code_object) {
Handle<Object> self_reference, Handle<InstructionStream> code_object) {
CheckPreconditionsForPatching(isolate_, code_object);
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference).kind() ==
@ -81,7 +81,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
uint32_t key;
if (map_.Delete(self_reference, &key)) {
DCHECK(code_object->IsCode());
DCHECK(code_object->IsInstructionStream());
map_.Insert(code_object, key);
}
}
@ -115,12 +115,13 @@ void BuiltinsConstantsTableBuilder::Finalize() {
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
uint32_t index = *it.entry();
Object value = it.key();
if (value.IsCode() && Code::cast(value).kind() == CodeKind::BUILTIN) {
if (value.IsInstructionStream() &&
InstructionStream::cast(value).kind() == CodeKind::BUILTIN) {
// Replace placeholder code objects with the real builtin.
// See also: SetupIsolateDelegate::PopulateWithPlaceholders.
// TODO(jgruber): Deduplicate placeholders and their corresponding
// builtin.
value = builtins->code(Code::cast(value).builtin_id());
value = builtins->code(InstructionStream::cast(value).builtin_id());
}
DCHECK(value.IsHeapObject());
table->set(index, value);

View File

@ -33,10 +33,10 @@ class BuiltinsConstantsTableBuilder final {
uint32_t AddObject(Handle<Object> object);
// Self-references during code generation start out by referencing a handle
// with a temporary dummy object. Once the final Code object exists, such
// entries in the constants map must be patched up.
// with a temporary dummy object. Once the final InstructionStream object
// exists, such entries in the constants map must be patched up.
void PatchSelfReference(Handle<Object> self_reference,
Handle<Code> code_object);
Handle<InstructionStream> code_object);
// References to the array that stores basic block usage counters start out as
// references to a unique oddball. Once the actual array has been allocated,

View File

@ -2052,7 +2052,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
// ----------- S t a t e -------------
@ -2683,8 +2684,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a Code object, it must NOT
// be marked_for_deoptimization (callers must ensure this).
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ cmp(maybe_target_code, Immediate(0));
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
}
@ -2727,11 +2728,13 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ leave();
}
__ LoadCodeDataContainerCodeNonBuiltin(eax, eax);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(eax, eax);
// Load deoptimization data from the code object.
__ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset -
kHeapObjectTag));
__ mov(ecx,
Operand(eax,
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
@ -2740,7 +2743,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ SmiUntag(ecx);
// Compute the target address = code_obj + header_size + osr_offset
__ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
__ lea(eax, Operand(eax, ecx, times_1,
InstructionStream::kHeaderSize - kHeapObjectTag));
Generate_OSREntry(masm, eax);
}
@ -4035,7 +4039,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
__ mov(Operand(esp, 2 * kSystemPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 2 * kSystemPointerSize),
ecx); // InstructionStream address or 0.
__ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta.
__ Move(Operand(esp, 4 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
@ -4197,7 +4202,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = eax;
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
// Get the InstructionStream object from the shared function info.
Register code_obj = esi;
__ mov(code_obj,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@ -4230,7 +4235,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, ecx);
}
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = ecx;
@ -4296,8 +4301,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3);
}
__ lea(code_obj,
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
__ lea(code_obj, FieldOperand(code_obj, kReturnRegister0, times_1,
InstructionStream::kHeaderSize));
__ pop(kInterpreterAccumulatorRegister);
if (is_osr) {

View File

@ -52,8 +52,9 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) {
// PC-relative call/jump instructions can be used for builtin to builtin
// calls/tail calls. The embedded builtins blob generator also ensures that.
// However, there are serializer tests, where we force isolate creation at
// runtime and at this point, Code space isn't restricted to a size s.t.
// PC-relative calls may be used. So, we fall back to an indirect mode.
// runtime and at this point, Code space isn't restricted to a
// size s.t. PC-relative calls may be used. So, we fall back to an indirect
// mode.
options.use_pc_relative_calls_and_jumps_for_mksnapshot =
pc_relative_calls_fit_in_code_range;
@ -75,7 +76,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) {
using MacroAssemblerGenerator = void (*)(MacroAssembler*);
using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*);
Handle<Code> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
Handle<InstructionStream> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
HandleScope scope(isolate);
byte buffer[kBufferSize];
MacroAssembler masm(isolate, CodeObjectRequired::kYes,
@ -90,16 +91,17 @@ Handle<Code> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_self_reference(masm.CodeObject())
.set_builtin(builtin)
.Build();
Handle<InstructionStream> code =
Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_self_reference(masm.CodeObject())
.set_builtin(builtin)
.Build();
return scope.CloseAndEscape(code);
}
Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin,
MacroAssemblerGenerator generator,
const char* s_name) {
InstructionStream BuildWithMacroAssembler(Isolate* isolate, Builtin builtin,
MacroAssemblerGenerator generator,
const char* s_name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@ -130,18 +132,19 @@ Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin,
masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable,
handler_table_offset);
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_self_reference(masm.CodeObject())
.set_builtin(builtin)
.Build();
Handle<InstructionStream> code =
Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_self_reference(masm.CodeObject())
.set_builtin(builtin)
.Build();
#if defined(V8_OS_WIN64)
isolate->SetBuiltinUnwindData(builtin, masm.GetUnwindInfo());
#endif // V8_OS_WIN64
return *code;
}
Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address,
const char* name) {
InstructionStream BuildAdaptor(Isolate* isolate, Builtin builtin,
Address builtin_address, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@ -155,17 +158,19 @@ Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address,
Builtins::Generate_Adaptor(&masm, builtin_address);
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_self_reference(masm.CodeObject())
.set_builtin(builtin)
.Build();
Handle<InstructionStream> code =
Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_self_reference(masm.CodeObject())
.set_builtin(builtin)
.Build();
return *code;
}
// Builder for builtins implemented in TurboFan with JS linkage.
Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
CodeAssemblerGenerator generator, int argc,
const char* name) {
InstructionStream BuildWithCodeStubAssemblerJS(Isolate* isolate,
Builtin builtin,
CodeAssemblerGenerator generator,
int argc, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@ -175,17 +180,16 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
compiler::CodeAssemblerState state(isolate, &zone, argc, CodeKind::BUILTIN,
name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
Handle<InstructionStream> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
ProfileDataFromFile::TryRead(name));
return *code;
}
// Builder for builtins implemented in TurboFan with CallStub linkage.
Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
CodeAssemblerGenerator generator,
CallDescriptors::Key interface_descriptor,
const char* name) {
InstructionStream BuildWithCodeStubAssemblerCS(
Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
CallDescriptors::Key interface_descriptor, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@ -199,7 +203,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
compiler::CodeAssemblerState state(isolate, &zone, descriptor,
CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
Handle<InstructionStream> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
ProfileDataFromFile::TryRead(name));
return *code;
@ -209,7 +213,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
// static
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
Code code) {
InstructionStream code) {
DCHECK_EQ(builtin, code.builtin_id());
builtins->set_code(builtin, ToCodeDataContainer(code));
}
@ -223,7 +227,7 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
HandleScope scope(isolate);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
Handle<Code> placeholder = BuildPlaceholder(isolate, builtin);
Handle<InstructionStream> placeholder = BuildPlaceholder(isolate, builtin);
AddBuiltin(builtins, builtin, *placeholder);
}
}
@ -242,14 +246,15 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
PtrComprCageBase cage_base(isolate);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
Code code = FromCodeDataContainer(builtins->code(builtin));
InstructionStream code = FromCodeDataContainer(builtins->code(builtin));
isolate->heap()->UnprotectAndRegisterMemoryChunk(
code, UnprotectMemoryOrigin::kMainThread);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
InstructionStream target = InstructionStream::GetCodeFromTargetAddress(
rinfo->target_address());
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
Builtins::IsIsolateIndependent(target.builtin_id()));
if (!target.is_builtin()) continue;
@ -277,11 +282,11 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
namespace {
Code GenerateBytecodeHandler(Isolate* isolate, Builtin builtin,
interpreter::OperandScale operand_scale,
interpreter::Bytecode bytecode) {
InstructionStream GenerateBytecodeHandler(
Isolate* isolate, Builtin builtin, interpreter::OperandScale operand_scale,
interpreter::Bytecode bytecode) {
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
Handle<Code> code = interpreter::GenerateBytecodeHandler(
Handle<InstructionStream> code = interpreter::GenerateBytecodeHandler(
isolate, Builtins::name(builtin), bytecode, operand_scale, builtin,
BuiltinAssemblerOptions(isolate, builtin));
return *code;
@ -300,7 +305,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
HandleScope scope(isolate);
int index = 0;
Code code;
InstructionStream code;
#define BUILD_CPP(Name) \
code = BuildAdaptor(isolate, Builtin::k##Name, \
FUNCTION_ADDR(Builtin_##Name), #Name); \

View File

@ -504,7 +504,7 @@ builtin WasmI64AtomicWait(
// Type feedback collection support for `call_ref`.
extern macro GetCodeEntry(Code): RawPtr;
extern macro GetCodeEntry(InstructionStream): RawPtr;
extern macro GetCodeEntry(CodeDataContainer): RawPtr;
struct TargetAndInstance {

View File

@ -2046,7 +2046,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeDataContainer> code) {
// ----------- S t a t e -------------
@ -2621,8 +2622,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a Code object, it must NOT
// be marked_for_deoptimization (callers must ensure this).
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ testq(maybe_target_code, maybe_target_code);
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
}
@ -2673,13 +2674,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ leave();
}
__ LoadCodeDataContainerCodeNonBuiltin(rax, rax);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(rax, rax);
// Load deoptimization data from the code object.
const TaggedRegister deopt_data(rbx);
__ LoadTaggedPointerField(
deopt_data,
FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
FieldOperand(
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntagField(
@ -2688,7 +2690,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
__ leaq(rax, FieldOperand(rax, rbx, times_1, InstructionStream::kHeaderSize));
Generate_OSREntry(masm, rax);
}
@ -2772,13 +2774,14 @@ void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
// before deoptimizing.
{
static constexpr int kCodeStartToCodeDataContainerOffset =
Code::kCodeDataContainerOffset - Code::kHeaderSize;
InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
__ LoadTaggedPointerField(scratch0,
Operand(kJavaScriptCallCodeStartRegister,
kCodeStartToCodeDataContainerOffset));
__ testl(
FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ j(not_zero, &deoptimize);
}
@ -5330,7 +5333,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = rdi;
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
// Get the InstructionStream object from the shared function info.
Register code_obj = rbx;
TaggedRegister shared_function_info(code_obj);
__ LoadTaggedPointerField(
@ -5364,7 +5367,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (v8_flags.debug_code) {
AssertCodeDataContainerIsBaseline(masm, code_obj, r11);
}
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
__ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r11;
@ -5431,8 +5434,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3);
}
__ leaq(code_obj,
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
__ leaq(code_obj, FieldOperand(code_obj, kReturnRegister0, times_1,
InstructionStream::kHeaderSize));
__ popq(kInterpreterAccumulatorRegister);
if (is_osr) {

View File

@ -190,7 +190,7 @@ void Assembler::emit(Instr x) {
}
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code code, Address target) {
Address constant_pool_entry, InstructionStream code, Address target) {
DCHECK(!Builtins::IsIsolateIndependentBuiltin(code));
Memory<Address>(constant_pool_entry) = target;
}

View File

@ -553,13 +553,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
// raw workflow to create Code objects (mostly in tests), add another Align
// call here. It does no harm - the end of the Code object is aligned to the
// (larger) kCodeAlignment anyways.
// raw workflow to create InstructionStream objects (mostly in tests), add
// another Align call here. It does no harm - the end of the InstructionStream
// object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
DataAlign(Code::kMetadataAlignment);
DataAlign(InstructionStream::kMetadataAlignment);
// Emit constant pool if necessary.
CheckConstPool(true, false);
@ -831,7 +831,8 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_1 << 8
// orr dst, dst, #target8_2 << 16
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
uint32_t target24 =
target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
CHECK(is_uint24(target24));
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
@ -1635,7 +1636,8 @@ void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
void Assembler::mov_label_offset(Register dst, Label* label) {
if (label->is_bound()) {
mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
mov(dst, Operand(label->pos() +
(InstructionStream::kHeaderSize - kHeapObjectTag)));
} else {
// Emit the link to the label in the code stream followed by extra nop
// instructions.
@ -5252,7 +5254,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
InstructionStream());
reloc_info_writer.Write(&rinfo);
}

View File

@ -367,7 +367,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code code, Address target);
Address constant_pool_entry, InstructionStream code, Address target);
// Get the size of the special target encoded at 'location'.
inline static int deserialization_special_target_size(Address location);
@ -388,7 +388,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// ---------------------------------------------------------------------------
// Code generation
// InstructionStream generation
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@ -1252,7 +1252,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void emit(Instr x);
// Code generation
// InstructionStream generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large

View File

@ -348,13 +348,14 @@ void TurboAssembler::LoadCodeDataContainerEntry(
CodeDataContainer::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
// Compute the Code object pointer from the code entry point.
// Compute the InstructionStream object pointer from the code entry point.
ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
sub(destination, destination, Operand(Code::kHeaderSize - kHeapObjectTag));
sub(destination, destination,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
@ -379,9 +380,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
// Note that this assumes the caller code (i.e. the Code object currently
// being generated) is immovable or that the callee function cannot trigger
// GC, since the callee function will return to it.
// Note that this assumes the caller code (i.e. the InstructionStream object
// currently being generated) is immovable or that the callee function cannot
// trigger GC, since the callee function will return to it.
// Compute the return address in lr to return to after the jump below. The pc
// is already at '+ 8' from the current instruction; but return is after three
@ -408,7 +409,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container, Register scratch) {
ldr(scratch, FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
}
Operand MacroAssembler::ClearedValue() const {

View File

@ -327,11 +327,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Code object pointer out of it. Must not be used for CodeDataContainers
// corresponding to builtins, because their entry points values point to
// the embedded instruction stream in .text section.
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
Register code_data_container_object);
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
@ -912,7 +912,7 @@ struct MoveCycleState {
VfpRegList scratch_v_reglist = 0;
// Available scratch registers during the move cycle resolution scope.
base::Optional<UseScratchRegisterScope> temps;
// Code of the scratch register picked by {MoveToTempLocation}.
// InstructionStream of the scratch register picked by {MoveToTempLocation}.
int scratch_reg_code = -1;
};

View File

@ -548,7 +548,7 @@ int Assembler::deserialization_special_target_size(Address location) {
}
void Assembler::deserialization_set_special_target_at(Address location,
Code code,
InstructionStream code,
Address target) {
Instruction* instr = reinterpret_cast<Instruction*>(location);
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
@ -661,8 +661,9 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(!HAS_SMI_TAG(compressed));
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
compressed));
// Embedding of compressed Code objects must not happen when external code
// space is enabled, because CodeDataContainers must be used instead.
// Embedding of compressed InstructionStream objects must not happen when
// external code space is enabled, because CodeDataContainers must be used
// instead.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!IsCodeSpaceObject(HeapObject::cast(obj)));
return HeapObject::cast(obj);

View File

@ -377,13 +377,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
// raw workflow to create Code objects (mostly in tests), add another Align
// call here. It does no harm - the end of the Code object is aligned to the
// (larger) kCodeAlignment anyways.
// raw workflow to create InstructionStream objects (mostly in tests), add
// another Align call here. It does no harm - the end of the InstructionStream
// object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
DataAlign(Code::kMetadataAlignment);
DataAlign(InstructionStream::kMetadataAlignment);
// Emit constant pool if necessary.
ForceConstantPoolEmissionWithoutJump();
@ -3577,7 +3577,7 @@ Instr Assembler::ImmNEONFP(double imm) {
return ImmNEONabcdefgh(FPToImm8(imm));
}
// Code generation helpers.
// InstructionStream generation helpers.
void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
MoveWideImmediateOp mov_op) {
// Ignore the top 32 bits of an immediate if we're moving to a W register.
@ -4360,7 +4360,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
DCHECK(constpool_.IsBlocked());
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
InstructionStream());
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
@ -4486,7 +4487,8 @@ intptr_t Assembler::MaxPCOffsetAfterVeneerPoolIfEmittedNow(size_t margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
InstructionStream());
reloc_info_writer.Write(&rinfo);
}

View File

@ -277,9 +277,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination. 'location' here can be either the pc of
// an immediate branch or the address of an entry in the constant pool.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(Address location,
Code code,
Address target);
inline static void deserialization_set_special_target_at(
Address location, InstructionStream code, Address target);
// Get the size of the special target encoded at 'location'.
inline static int deserialization_special_target_size(Address location);
@ -780,12 +779,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void clz(const Register& rd, const Register& rn);
void cls(const Register& rd, const Register& rn);
// Pointer Authentication Code for Instruction address, using key B, with
// address in x17 and modifier in x16 [Armv8.3].
// Pointer Authentication InstructionStream for Instruction address, using key
// B, with address in x17 and modifier in x16 [Armv8.3].
void pacib1716();
// Pointer Authentication Code for Instruction address, using key B, with
// address in LR and modifier in SP [Armv8.3].
// Pointer Authentication InstructionStream for Instruction address, using key
// B, with address in LR and modifier in SP [Armv8.3].
void pacibsp();
// Authenticate Instruction address, using key B, with address in x17 and
@ -2088,7 +2087,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
dc64(data);
}
// Code generation helpers --------------------------------------------------
// InstructionStream generation helpers
// --------------------------------------------------
Instruction* pc() const { return Instruction::Cast(pc_); }
@ -2663,7 +2663,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
std::deque<int> internal_reference_positions_;
protected:
// Code generation
// InstructionStream generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large

View File

@ -2360,13 +2360,14 @@ void TurboAssembler::LoadCodeDataContainerEntry(
CodeDataContainer::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
// Compute the Code object pointer from the code entry point.
// Compute the InstructionStream object pointer from the code entry point.
Ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
Sub(destination, destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
Sub(destination, destination,
Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
@ -2396,9 +2397,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
// Note that this assumes the caller code (i.e. the Code object currently
// being generated) is immovable or that the callee function cannot trigger
// GC, since the callee function will return to it.
// Note that this assumes the caller code (i.e. the InstructionStream object
// currently being generated) is immovable or that the callee function cannot
// trigger GC, since the callee function will return to it.
UseScratchRegisterScope temps(this);
temps.Exclude(x16, x17);
@ -2447,13 +2448,15 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
void TurboAssembler::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
LoadTaggedPointerField(scratch,
MemOperand(kJavaScriptCallCodeStartRegister, offset));
Ldr(scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Label not_deoptimized;
Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, &not_deoptimized);
Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
&not_deoptimized);
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET);
Bind(&not_deoptimized);
@ -2691,7 +2694,7 @@ void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization(
Ldr(scratch.W(),
FieldMemOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
Tbnz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit,
if_marked_for_deoptimization);
}

View File

@ -1000,11 +1000,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Code object pointer out of it. Must not be used for CodeDataContainers
// corresponding to builtins, because their entry points values point to
// the embedded instruction stream in .text section.
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
Register code_data_container_object);
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
@ -1989,7 +1989,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, Register expected_parameter_count,
Register actual_parameter_count, InvokeType type);
// ---- Code generation helpers ----
// ---- InstructionStream generation helpers ----
// ---------------------------------------------------------------------------
// Support functions.

View File

@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
class Code;
class InstructionStream;
// Associates a body of code with an interface descriptor.
class Callable final {

View File

@ -17,7 +17,7 @@ namespace internal {
class Assembler;
// Code comments section layout:
// InstructionStream comments section layout:
// byte count content
// ------------------------------------------------------------------------
// 4 size as uint32_t (only for a check)

View File

@ -63,7 +63,7 @@ class CodeDesc {
int code_comments_size = 0;
// TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have
// been made consistent with Code layout.
// been made consistent with InstructionStream layout.
int body_size() const { return instr_size + unwinding_info_size; }
int instruction_size() const { return safepoint_table_offset; }
int metadata_size() const { return body_size() - instruction_size(); }

View File

@ -33,7 +33,7 @@ struct CodeOrCodeDataContainerOps {
int code_comments_size() const { return code->code_comments_size(); }
};
using CodeOps = CodeOrCodeDataContainerOps<Code>;
using CodeOps = CodeOrCodeDataContainerOps<InstructionStream>;
using CodeDataContainerOps = CodeOrCodeDataContainerOps<CodeDataContainer>;
#if V8_ENABLE_WEBASSEMBLY
@ -96,8 +96,8 @@ struct CodeDescOps {
ret CodeReference::method() const { \
DCHECK(!is_null()); \
switch (kind_) { \
case Kind::CODE: \
return CodeOps{code_}.method(); \
case Kind::INSTRUCTION_STREAM: \
return CodeOps{instruction_stream_}.method(); \
case Kind::CODE_DATA_CONTAINER: \
return CodeDataContainerOps{code_data_container_}.method(); \
case Kind::WASM_CODE: \

View File

@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
class Code;
class InstructionStream;
class CodeDataContainer;
class CodeDesc;
@ -27,7 +27,8 @@ class CodeReference {
: kind_(Kind::WASM_CODE), wasm_code_(wasm_code) {}
explicit CodeReference(const CodeDesc* code_desc)
: kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
explicit CodeReference(Handle<Code> code) : kind_(Kind::CODE), code_(code) {}
explicit CodeReference(Handle<InstructionStream> code)
: kind_(Kind::INSTRUCTION_STREAM), instruction_stream_(code) {}
explicit CodeReference(Handle<CodeDataContainer> code_data_container)
: kind_(Kind::CODE_DATA_CONTAINER),
code_data_container_(code_data_container) {}
@ -43,15 +44,17 @@ class CodeReference {
int code_comments_size() const;
bool is_null() const { return kind_ == Kind::NONE; }
bool is_code() const { return kind_ == Kind::CODE; }
bool is_instruction_stream() const {
return kind_ == Kind::INSTRUCTION_STREAM;
}
bool is_code_data_container() const {
return kind_ == Kind::CODE_DATA_CONTAINER;
}
bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; }
Handle<Code> as_code() const {
DCHECK_EQ(Kind::CODE, kind_);
return code_;
Handle<InstructionStream> as_instruction_stream() const {
DCHECK_EQ(Kind::INSTRUCTION_STREAM, kind_);
return instruction_stream_;
}
Handle<CodeDataContainer> as_code_data_container() const {
@ -67,7 +70,7 @@ class CodeReference {
private:
enum class Kind {
NONE,
CODE,
INSTRUCTION_STREAM,
CODE_DATA_CONTAINER,
WASM_CODE,
CODE_DESC
@ -76,7 +79,7 @@ class CodeReference {
std::nullptr_t null_;
const wasm::WasmCode* wasm_code_;
const CodeDesc* code_desc_;
Handle<Code> code_;
Handle<InstructionStream> instruction_stream_;
Handle<CodeDataContainer> code_data_container_;
};

View File

@ -3158,7 +3158,7 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
#endif // DEBUG
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
FromCodeDataContainerNonBuiltin(code),
Code::kDeoptimizationDataOrInterpreterDataOffset);
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset);
var_result = baseline_data;
}
Goto(&check_for_interpreter_data);
@ -15614,7 +15614,7 @@ TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeDataContainer> code) {
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(
TNode<CodeDataContainer> code_data_container) {
return IsSetWord32<Code::MarkedForDeoptimizationField>(
return IsSetWord32<InstructionStream::MarkedForDeoptimizationField>(
LoadObjectField<Int32T>(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset));
}

View File

@ -834,21 +834,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void FastCheck(TNode<BoolT> condition);
// TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
// is cached in or moved to CodeDataContainer.
TNode<Code> FromCodeDataContainerNonBuiltin(TNode<CodeDataContainer> code) {
// Compute the Code object pointer from the code entry point.
// TODO(v8:11880): remove once InstructionStream::bytecode_or_interpreter_data
// field is cached in or moved to CodeDataContainer.
TNode<InstructionStream> FromCodeDataContainerNonBuiltin(
TNode<CodeDataContainer> code) {
// Compute the InstructionStream object pointer from the code entry point.
TNode<RawPtrT> code_entry = Load<RawPtrT>(
code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
kHeapObjectTag));
TNode<Object> o = BitcastWordToTagged(IntPtrSub(
code_entry, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
code_entry,
IntPtrConstant(InstructionStream::kHeaderSize - kHeapObjectTag)));
return CAST(o);
}
TNode<CodeDataContainer> ToCodeDataContainer(TNode<Code> code) {
return LoadObjectField<CodeDataContainer>(code,
Code::kCodeDataContainerOffset);
TNode<CodeDataContainer> ToCodeDataContainer(TNode<InstructionStream> code) {
return LoadObjectField<CodeDataContainer>(
code, InstructionStream::kCodeDataContainerOffset);
}
TNode<RawPtrT> GetCodeEntry(TNode<CodeDataContainer> code);
@ -857,7 +859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// The following Call wrappers call an object according to the semantics that
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
// JSFunction or proxy) rather than a Code object.
// JSFunction or proxy) rather than a InstructionStream object.
template <class... TArgs>
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
TNode<JSReceiver> receiver, TArgs... args) {

View File

@ -644,7 +644,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
Handle<BytecodeArray> bytecode_array(shared_info->GetBytecodeArray(isolate),
isolate);
Handle<Code> code =
Handle<InstructionStream> code =
Builtins::CreateInterpreterEntryTrampolineForProfiling(isolate);
Handle<InterpreterData> interpreter_data =
@ -1177,7 +1177,8 @@ void RecordMaglevFunctionCompilation(Isolate* isolate,
Handle<JSFunction> function) {
PtrComprCageBase cage_base(isolate);
// TODO(v8:13261): We should be able to pass a CodeDataContainer AbstractCode
// in here, but LinuxPerfJitLogger only supports Code AbstractCode.
// in here, but LinuxPerfJitLogger only supports InstructionStream
// AbstractCode.
Handle<AbstractCode> abstract_code(
AbstractCode::cast(FromCodeDataContainer(function->code(cage_base))),
isolate);
@ -1731,13 +1732,16 @@ class MergeAssumptionChecker final : public ObjectVisitor {
}
// The object graph for a newly compiled Script shouldn't yet contain any
// Code. If any of these functions are called, then that would indicate that
// the graph was not disjoint from the rest of the heap as expected.
// InstructionStream. If any of these functions are called, then that would
// indicate that the graph was not disjoint from the rest of the heap as
// expected.
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
UNREACHABLE();
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
void VisitCodeTarget(InstructionStream host, RelocInfo* rinfo) override {
UNREACHABLE();
}
void VisitEmbeddedPointer(InstructionStream host, RelocInfo* rinfo) override {
UNREACHABLE();
}
@ -2637,7 +2641,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
}
CompilerTracer::TraceStartBaselineCompile(isolate, shared);
Handle<Code> code;
Handle<InstructionStream> code;
base::TimeDelta time_taken;
{
ScopedTimer timer(&time_taken);
@ -3929,7 +3933,7 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
// 2) The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
// 3) The code may have already been invalidated due to dependency change.
// 4) Code generation may have failed.
// 4) InstructionStream generation may have failed.
if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
@ -3991,8 +3995,8 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
ResetTieringState(*function, osr_offset);
if (status == CompilationJob::SUCCEEDED) {
// Note the finalized Code object has already been installed on the
// function by MaglevCompilationJob::FinalizeJobImpl.
// Note the finalized InstructionStream object has already been installed on
// the function by MaglevCompilationJob::FinalizeJobImpl.
OptimizedCodeCache::Insert(isolate, *function, BytecodeOffset::None(),
function->code(),

View File

@ -749,7 +749,7 @@ namespace {
static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
int bytecode_offset,
Address raw_bytecode_array) {
Code code_obj = Code::cast(Object(raw_code_obj));
InstructionStream code_obj = InstructionStream::cast(Object(raw_code_obj));
BytecodeArray bytecode_array =
BytecodeArray::cast(Object(raw_bytecode_array));
return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
@ -759,7 +759,7 @@ static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj,
int bytecode_offset,
Address raw_bytecode_array) {
Code code_obj = Code::cast(Object(raw_code_obj));
InstructionStream code_obj = InstructionStream::cast(Object(raw_code_obj));
BytecodeArray bytecode_array =
BytecodeArray::cast(Object(raw_bytecode_array));
return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset,

View File

@ -19,7 +19,7 @@
namespace v8 {
namespace internal {
HandlerTable::HandlerTable(Code code)
HandlerTable::HandlerTable(InstructionStream code)
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
kReturnAddressBasedEncoding) {}
@ -151,7 +151,7 @@ int HandlerTable::LengthForRange(int entries) {
// static
int HandlerTable::EmitReturnTableStart(Assembler* masm) {
masm->DataAlign(Code::kMetadataAlignment);
masm->DataAlign(InstructionStream::kMetadataAlignment);
masm->RecordComment(";;; Exception handler table.");
int table_start = masm->pc_offset();
return table_start;

View File

@ -15,7 +15,7 @@ namespace internal {
class Assembler;
class ByteArray;
class BytecodeArray;
class Code;
class InstructionStream;
class CodeDataContainer;
namespace wasm {
@ -30,8 +30,9 @@ class WasmCode;
// Layout looks as follows:
// [ range-start , range-end , handler-offset , handler-data ]
// 2) Based on return addresses: Used for turbofanned code. Stored directly in
// the instruction stream of the {Code} object. Contains one entry per
// call-site that could throw an exception. Layout looks as follows:
// the instruction stream of the {InstructionStream} object. Contains one
// entry per call-site that could throw an exception. Layout looks as
// follows:
// [ return-address-offset , handler-offset ]
class V8_EXPORT_PRIVATE HandlerTable {
public:
@ -54,7 +55,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
// Constructors for the various encodings.
explicit HandlerTable(Code code);
explicit HandlerTable(InstructionStream code);
explicit HandlerTable(CodeDataContainer code);
explicit HandlerTable(ByteArray byte_array);
#if V8_ENABLE_WEBASSEMBLY
@ -121,8 +122,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
#endif
// Direct pointer into the encoded data. This pointer potentially points into
// objects on the GC heap (either {ByteArray} or {Code}) and could become
// stale during a collection. Hence we disallow any allocation.
// objects on the GC heap (either {ByteArray} or {InstructionStream}) and
// could become stale during a collection. Hence we disallow any allocation.
const Address raw_encoded_data_;
DISALLOW_GARBAGE_COLLECTION(no_gc_)

View File

@ -188,7 +188,7 @@ void Assembler::emit(const Immediate& x) {
void Assembler::emit_code_relative_offset(Label* label) {
if (label->is_bound()) {
int32_t pos;
pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
pos = label->pos() + InstructionStream::kHeaderSize - kHeapObjectTag;
emit(pos);
} else {
emit_disp(label, Displacement::CODE_RELATIVE);
@ -222,7 +222,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
Address instruction_payload, InstructionStream code, Address target) {
set_target_address_at(instruction_payload,
!code.is_null() ? code.constant_pool() : kNullAddress,
target);

View File

@ -320,13 +320,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
// raw workflow to create Code objects (mostly in tests), add another Align
// call here. It does no harm - the end of the Code object is aligned to the
// (larger) kCodeAlignment anyways.
// raw workflow to create InstructionStream objects (mostly in tests), add
// another Align call here. It does no harm - the end of the InstructionStream
// object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
DataAlign(Code::kMetadataAlignment);
DataAlign(InstructionStream::kMetadataAlignment);
const int code_comments_size = WriteCodeComments();
@ -1537,8 +1537,9 @@ void Assembler::bind_to(Label* L, int pos) {
long_at_put(fixup_pos, reinterpret_cast<int>(buffer_start_ + pos));
internal_reference_positions_.push_back(fixup_pos);
} else if (disp.type() == Displacement::CODE_RELATIVE) {
// Relative to Code heap object pointer.
long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
// Relative to InstructionStream heap object pointer.
long_at_put(fixup_pos,
pos + InstructionStream::kHeaderSize - kHeapObjectTag);
} else {
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected
@ -3406,7 +3407,8 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
InstructionStream());
reloc_info_writer.Write(&rinfo);
}

View File

@ -405,7 +405,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target);
Address instruction_payload, InstructionStream code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@ -433,7 +433,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
// ---------------------------------------------------------------------------
// Code generation
// InstructionStream generation
//
// - function names correspond one-to-one to ia32 instruction mnemonics
// - unless specified otherwise, instructions operate on 32bit operands

View File

@ -710,7 +710,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container) {
test(FieldOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
}
Immediate MacroAssembler::ClearedValue() const {
@ -2058,13 +2058,13 @@ void TurboAssembler::LoadCodeDataContainerEntry(
CodeDataContainer::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
// Compute the Code object pointer from the code entry point.
// Compute the InstructionStream object pointer from the code entry point.
mov(destination, FieldOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
sub(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(

View File

@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
class Code;
class InstructionStream;
class ExternalReference;
class StatsCounter;
@ -162,11 +162,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Code object pointer out of it. Must not be used for CodeDataContainers
// corresponding to builtins, because their entry points values point to
// the embedded instruction stream in .text section.
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
Register code_data_container_object);
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);

View File

@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
Code code)
InstructionStream code)
: MaglevSafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {
DCHECK(code.is_maglevved());
@ -160,7 +160,7 @@ void MaglevSafepointTableBuilder::Emit(Assembler* assembler) {
#endif
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(Code::kMetadataAlignment);
assembler->Align(InstructionStream::kMetadataAlignment);
assembler->RecordComment(";;; Maglev safepoint table.");
set_safepoint_table_offset(assembler->pc_offset());

View File

@ -65,13 +65,14 @@ class MaglevSafepointEntry : public SafepointEntryBase {
uint32_t tagged_register_indexes_ = 0;
};
// A wrapper class for accessing the safepoint table embedded into the Code
// object.
// A wrapper class for accessing the safepoint table embedded into the
// InstructionStream object.
class MaglevSafepointTable {
public:
// The isolate and pc arguments are used for figuring out whether pc
// belongs to the embedded or un-embedded code blob.
explicit MaglevSafepointTable(Isolate* isolate, Address pc, Code code);
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
InstructionStream code);
explicit MaglevSafepointTable(Isolate* isolate, Address pc,
CodeDataContainer code);
MaglevSafepointTable(const MaglevSafepointTable&) = delete;

View File

@ -169,7 +169,7 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
}
}
void OptimizedCompilationInfo::SetCode(Handle<Code> code) {
void OptimizedCompilationInfo::SetCode(Handle<InstructionStream> code) {
DCHECK_EQ(code->kind(), code_kind());
code_ = code;
}

View File

@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
Handle<InstructionStream> code() const { return code_; }
CodeKind code_kind() const { return code_kind_; }
Builtin builtin() const { return builtin_; }
void set_builtin(Builtin builtin) { builtin_ = builtin; }
@ -129,9 +129,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
compiler::NodeObserver* node_observer() const { return node_observer_; }
// Code getters and setters.
// InstructionStream getters and setters.
void SetCode(Handle<Code> code);
void SetCode(Handle<InstructionStream> code);
#if V8_ENABLE_WEBASSEMBLY
void SetWasmCompilationResult(std::unique_ptr<wasm::WasmCompilationResult>);
@ -260,7 +260,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Handle<JSFunction> closure_;
// The compiled code.
Handle<Code> code_;
Handle<InstructionStream> code_;
// Basic block profiling support.
BasicBlockProfilerData* profiler_data_ = nullptr;

View File

@ -253,23 +253,23 @@ void RelocIterator::next() {
done_ = true;
}
RelocIterator::RelocIterator(Code code, int mode_mask)
RelocIterator::RelocIterator(InstructionStream code, int mode_mask)
: RelocIterator(code, code.unchecked_relocation_info(), mode_mask) {}
RelocIterator::RelocIterator(Code code, ByteArray relocation_info,
RelocIterator::RelocIterator(InstructionStream code, ByteArray relocation_info,
int mode_mask)
: RelocIterator(code, code.raw_instruction_start(), code.constant_pool(),
relocation_info.GetDataEndAddress(),
relocation_info.GetDataStartAddress(), mode_mask) {}
RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
: RelocIterator(Code(), code_reference.instruction_start(),
: RelocIterator(InstructionStream(), code_reference.instruction_start(),
code_reference.constant_pool(),
code_reference.relocation_end(),
code_reference.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
int mode_mask)
RelocIterator::RelocIterator(EmbeddedData* embedded_data,
InstructionStream code, int mode_mask)
: RelocIterator(code,
embedded_data->InstructionStartOfBuiltin(code.builtin_id()),
code.constant_pool(),
@ -277,20 +277,22 @@ RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
code.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: RelocIterator(Code(), reinterpret_cast<Address>(desc.buffer), 0,
desc.buffer + desc.buffer_size,
: RelocIterator(InstructionStream(), reinterpret_cast<Address>(desc.buffer),
0, desc.buffer + desc.buffer_size,
desc.buffer + desc.buffer_size - desc.reloc_size,
mode_mask) {}
RelocIterator::RelocIterator(base::Vector<byte> instructions,
base::Vector<const byte> reloc_info,
Address const_pool, int mode_mask)
: RelocIterator(Code(), reinterpret_cast<Address>(instructions.begin()),
const_pool, reloc_info.begin() + reloc_info.size(),
reloc_info.begin(), mode_mask) {}
: RelocIterator(InstructionStream(),
reinterpret_cast<Address>(instructions.begin()), const_pool,
reloc_info.begin() + reloc_info.size(), reloc_info.begin(),
mode_mask) {}
RelocIterator::RelocIterator(Code host, Address pc, Address constant_pool,
const byte* pos, const byte* end, int mode_mask)
RelocIterator::RelocIterator(InstructionStream host, Address pc,
Address constant_pool, const byte* pos,
const byte* end, int mode_mask)
: pos_(pos), end_(end), mode_mask_(mode_mask) {
// Relocation info is read backwards.
DCHECK_GE(pos_, end_);
@ -350,7 +352,8 @@ void RelocInfo::set_target_address(Address target,
icache_flush_mode);
if (!host().is_null() && IsCodeTargetMode(rmode_) &&
!v8_flags.disable_write_barriers) {
Code target_code = Code::GetCodeFromTargetAddress(target);
InstructionStream target_code =
InstructionStream::GetCodeFromTargetAddress(target);
WriteBarrierForCode(host(), this, target_code, write_barrier_mode);
}
}
@ -385,7 +388,7 @@ bool RelocInfo::RequiresRelocationAfterCodegen(const CodeDesc& desc) {
return !it.done();
}
bool RelocInfo::RequiresRelocation(Code code) {
bool RelocInfo::RequiresRelocation(InstructionStream code) {
RelocIterator it(code, RelocInfo::kApplyMask);
return !it.done();
}
@ -462,8 +465,9 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
<< ")";
} else if (IsCodeTargetMode(rmode_)) {
const Address code_target = target_address();
Code code = Code::GetCodeFromTargetAddress(code_target);
DCHECK(code.IsCode());
InstructionStream code =
InstructionStream::GetCodeFromTargetAddress(code_target);
DCHECK(code.IsInstructionStream());
os << " (" << CodeKindToString(code.kind());
if (Builtins::IsBuiltin(code)) {
os << " " << Builtins::name(code.builtin_id());
@ -492,10 +496,11 @@ void RelocInfo::Verify(Isolate* isolate) {
Address addr = target_address();
CHECK_NE(addr, kNullAddress);
// Check that we can find the right code object.
Code code = Code::GetCodeFromTargetAddress(addr);
InstructionStream code =
InstructionStream::GetCodeFromTargetAddress(addr);
CodeLookupResult lookup_result = isolate->FindCodeObject(addr);
CHECK(lookup_result.IsFound());
CHECK_EQ(code.address(), lookup_result.code().address());
CHECK_EQ(code.address(), lookup_result.instruction_stream().address());
break;
}
case INTERNAL_REFERENCE:
@ -504,7 +509,7 @@ void RelocInfo::Verify(Isolate* isolate) {
Address pc = target_internal_reference_address();
CodeLookupResult lookup_result = isolate->FindCodeObject(pc);
CHECK(lookup_result.IsFound());
Code code = lookup_result.code();
InstructionStream code = lookup_result.instruction_stream();
CHECK(target >= code.InstructionStart(isolate, pc));
CHECK(target <= code.InstructionEnd(isolate, pc));
break;

View File

@ -114,7 +114,7 @@ class RelocInfo {
RelocInfo() = default;
RelocInfo(Address pc, Mode rmode, intptr_t data, Code host,
RelocInfo(Address pc, Mode rmode, intptr_t data, InstructionStream host,
Address constant_pool = kNullAddress)
: pc_(pc),
rmode_(rmode),
@ -213,7 +213,7 @@ class RelocInfo {
Address pc() const { return pc_; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code host() const { return host_; }
InstructionStream host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
// Apply a relocation by delta bytes. When the code object is moved, PC
@ -332,7 +332,7 @@ class RelocInfo {
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
static bool RequiresRelocationAfterCodegen(const CodeDesc& desc);
static bool RequiresRelocation(Code code);
static bool RequiresRelocation(InstructionStream code);
#ifdef ENABLE_DISASSEMBLER
// Printing
@ -359,7 +359,7 @@ class RelocInfo {
// In addition to modes covered by the apply mask (which is applied at GC
// time, among others), this covers all modes that are relocated by
// Code::CopyFromNoFlush after code generation.
// InstructionStream::CopyFromNoFlush after code generation.
static int PostCodegenRelocationMask() {
return ModeMask(RelocInfo::CODE_TARGET) |
ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
@ -374,7 +374,7 @@ class RelocInfo {
Address pc_;
Mode rmode_;
intptr_t data_ = 0;
Code host_;
InstructionStream host_;
Address constant_pool_ = kNullAddress;
friend class RelocIterator;
};
@ -432,9 +432,11 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
// the beginning of the reloc info.
// Relocation information with mode k is included in the
// iteration iff bit k of mode_mask is set.
explicit RelocIterator(Code code, int mode_mask = -1);
explicit RelocIterator(Code code, ByteArray relocation_info, int mode_mask);
explicit RelocIterator(EmbeddedData* embedded_data, Code code, int mode_mask);
explicit RelocIterator(InstructionStream code, int mode_mask = -1);
explicit RelocIterator(InstructionStream code, ByteArray relocation_info,
int mode_mask);
explicit RelocIterator(EmbeddedData* embedded_data, InstructionStream code,
int mode_mask);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(const CodeReference code_reference,
int mode_mask = -1);
@ -457,8 +459,8 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
}
private:
RelocIterator(Code host, Address pc, Address constant_pool, const byte* pos,
const byte* end, int mode_mask);
RelocIterator(InstructionStream host, Address pc, Address constant_pool,
const byte* pos, const byte* end, int mode_mask);
// Advance* moves the position before/after reading.
// *Read* reads from current byte(s) into rinfo_.

View File

@ -20,7 +20,8 @@
namespace v8 {
namespace internal {
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
SafepointTable::SafepointTable(Isolate* isolate, Address pc,
InstructionStream code)
: SafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {}
@ -169,7 +170,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
#endif
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(Code::kMetadataAlignment);
assembler->Align(InstructionStream::kMetadataAlignment);
assembler->RecordComment(";;; Safepoint table.");
set_safepoint_table_offset(assembler->pc_offset());

View File

@ -54,13 +54,13 @@ class SafepointEntry : public SafepointEntryBase {
base::Vector<uint8_t> tagged_slots_;
};
// A wrapper class for accessing the safepoint table embedded into the Code
// object.
// A wrapper class for accessing the safepoint table embedded into the
// InstructionStream object.
class SafepointTable {
public:
// The isolate and pc arguments are used for figuring out whether pc
// belongs to the embedded or un-embedded code blob.
explicit SafepointTable(Isolate* isolate, Address pc, Code code);
explicit SafepointTable(Isolate* isolate, Address pc, InstructionStream code);
explicit SafepointTable(Isolate* isolate, Address pc, CodeDataContainer code);
#if V8_ENABLE_WEBASSEMBLY
explicit SafepointTable(const wasm::WasmCode* code);

View File

@ -60,7 +60,7 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
}
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
Handle<Code> code) const {
Handle<InstructionStream> code) const {
Isolate* isolate = code->GetIsolate();
DeoptimizationData deopt_data =
DeoptimizationData::cast(code->deoptimization_data());
@ -79,7 +79,8 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
return stack;
}
SourcePositionInfo SourcePosition::FirstInfo(Handle<Code> code) const {
SourcePositionInfo SourcePosition::FirstInfo(
Handle<InstructionStream> code) const {
DisallowGarbageCollection no_gc;
Isolate* isolate = code->GetIsolate();
DeoptimizationData deopt_data =
@ -127,7 +128,7 @@ void SourcePosition::PrintJson(std::ostream& out) const {
}
}
void SourcePosition::Print(std::ostream& out, Code code) const {
void SourcePosition::Print(std::ostream& out, InstructionStream code) const {
DeoptimizationData deopt_data =
DeoptimizationData::cast(code.deoptimization_data());
if (!isInlined()) {

View File

@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
class Code;
class InstructionStream;
class OptimizedCompilationInfo;
class Script;
class SharedFunctionInfo;
@ -79,12 +79,13 @@ class SourcePosition final {
}
// Assumes that the code object is optimized
std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
std::vector<SourcePositionInfo> InliningStack(
Handle<InstructionStream> code) const;
std::vector<SourcePositionInfo> InliningStack(
OptimizedCompilationInfo* cinfo) const;
SourcePositionInfo FirstInfo(Handle<Code> code) const;
SourcePositionInfo FirstInfo(Handle<InstructionStream> code) const;
void Print(std::ostream& out, Code code) const;
void Print(std::ostream& out, InstructionStream code) const;
void PrintJson(std::ostream& out) const;
int ScriptOffset() const {

View File

@ -215,7 +215,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
Address instruction_payload, InstructionStream code, Address target) {
set_target_address_at(instruction_payload,
!code.is_null() ? code.constant_pool() : kNullAddress,
target);
@ -285,8 +285,9 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(!HAS_SMI_TAG(compressed));
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
compressed));
// Embedding of compressed Code objects must not happen when external code
// space is enabled, because CodeDataContainers must be used instead.
// Embedding of compressed InstructionStream objects must not happen when
// external code space is enabled, because CodeDataContainers must be used
// instead.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!IsCodeSpaceObject(HeapObject::cast(obj)));
return HeapObject::cast(obj);

View File

@ -367,13 +367,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
// raw workflow to create Code objects (mostly in tests), add another Align
// call here. It does no harm - the end of the Code object is aligned to the
// (larger) kCodeAlignment anyways.
// raw workflow to create InstructionStream objects (mostly in tests), add
// another Align call here. It does no harm - the end of the InstructionStream
// object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
DataAlign(Code::kMetadataAlignment);
DataAlign(InstructionStream::kMetadataAlignment);
PatchConstPool();
DCHECK(constpool_.IsEmpty());
@ -4492,7 +4492,8 @@ void Assembler::dq(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data,
InstructionStream());
reloc_info_writer.Write(&rinfo);
}

View File

@ -447,12 +447,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Read/Modify the code target in the relative branch/call instruction at pc.
// On the x64 architecture, we use relative jumps with a 32-bit displacement
// to jump to other Code objects in the Code space in the heap.
// Jumps to C functions are done indirectly through a 64-bit register holding
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
// to jump to other InstructionStream objects in the InstructionStream space
// in the heap. Jumps to C functions are done indirectly through a 64-bit
// register holding the absolute address of the target. These functions
// convert between absolute Addresses of InstructionStream objects and the
// relative displacements stored in the code. The isolate argument is unused
// (and may be nullptr) when skipping flushing.
static inline Address target_address_at(Address pc, Address constant_pool);
static inline void set_target_address_at(
Address pc, Address constant_pool, Address target,
@ -467,7 +467,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target);
Address instruction_payload, InstructionStream code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@ -505,7 +505,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
// ---------------------------------------------------------------------------
// Code generation
// InstructionStream generation
//
// Function names correspond one-to-one to x64 instruction mnemonics.
// Unless specified otherwise, instructions operate on 64-bit operands.
@ -2130,7 +2130,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
WriteUnalignedValue(addr_at(pos), x);
}
// Code emission.
// InstructionStream emission.
V8_NOINLINE V8_PRESERVE_MOST void GrowBuffer();
template <typename T>

View File

@ -2308,13 +2308,13 @@ void TurboAssembler::LoadCodeDataContainerEntry(
CodeDataContainer::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
// Compute the Code object pointer from the code entry point.
// Compute the InstructionStream object pointer from the code entry point.
movq(destination, FieldOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
subq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
subq(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
}
void TurboAssembler::CallCodeDataContainerObject(
@ -2610,7 +2610,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization(
Register code_data_container) {
testl(FieldOperand(code_data_container,
CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
}
Immediate MacroAssembler::ClearedValue() const {
@ -3391,11 +3391,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
LoadTaggedPointerField(scratch,
Operand(kJavaScriptCallCodeStartRegister, offset));
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, not_zero);
}

View File

@ -404,11 +404,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Code object pointer out of it. Must not be used for CodeDataContainers
// corresponding to builtins, because their entry points values point to
// the embedded instruction stream in .text section.
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
Register code_data_container_object);
// InstructionStream object pointer out of it. Must not be used for
// CodeDataContainers corresponding to builtins, because their entry points
// values point to the embedded instruction stream in .text section.
void LoadCodeDataContainerInstructionStreamNonBuiltin(
Register destination, Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);

View File

@ -867,7 +867,7 @@ using RuntimeArguments = Arguments<ArgumentsType::kRuntime>;
using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
class Assembler;
class ClassScope;
class Code;
class InstructionStream;
class CodeDataContainer;
class CodeSpace;
class Context;
@ -989,9 +989,10 @@ using HeapObjectSlot = SlotTraits::THeapObjectSlot;
using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot;
// A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot")
// holding a strong pointer to a Code object. The Code object slots might be
// compressed and since code space might be allocated off the main heap
// the load operations require explicit cage base value for code space.
// holding a strong pointer to a InstructionStream object. The InstructionStream
// object slots might be compressed and since code space might be allocated off
// the main heap the load operations require explicit cage base value for code
// space.
using CodeObjectSlot = SlotTraits::TCodeObjectSlot;
using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
@ -1028,10 +1029,10 @@ constexpr int kSpaceTagSize = 4;
static_assert(FIRST_SPACE == 0);
enum class AllocationType : uint8_t {
kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE
kMap, // Map object allocated in OLD_SPACE
kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
kCode, // InstructionStream object allocated in CODE_SPACE or CODE_LO_SPACE
kMap, // Map object allocated in OLD_SPACE
kReadOnly, // Object allocated in RO_SPACE
kSharedOld, // Regular object allocated in OLD_SPACE in the shared heap
kSharedMap, // Map object in OLD_SPACE in the shared heap
@ -2056,7 +2057,8 @@ enum class IcCheckType { kElement, kProperty };
// Helper stubs can be called in different ways depending on where the target
// code is located and how the call sequence is expected to look like:
// - CodeObject: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
// - CodeObject: Call on-heap {Code} object via
// {RelocInfo::CODE_TARGET}.
// - WasmRuntimeStub: Call native {WasmCode} stub via
// {RelocInfo::WASM_STUB_CALL}.
// - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic

View File

@ -60,9 +60,9 @@ class V8HeapCompressionScheme {
#ifdef V8_EXTERNAL_CODE_SPACE
// Compression scheme used for fields containing Code objects (namely for the
// CodeDataContainer::code field).
// Same as V8HeapCompressionScheme but with a different base value.
// Compression scheme used for fields containing InstructionStream objects
// (namely for the CodeDataContainer::code field). Same as
// V8HeapCompressionScheme but with a different base value.
class ExternalCodeCompressionScheme {
public:
V8_INLINE static Address PrepareCageBaseAddress(Address on_heap_addr);

View File

@ -647,11 +647,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne);
}

View File

@ -3376,8 +3376,8 @@ void CodeGenerator::PrepareForDeoptimizationExits(
false, false,
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
// Check which deopt kinds exist in this Code object, to avoid emitting jumps
// to unused entries.
// Check which deopt kinds exist in this InstructionStream object, to avoid
// emitting jumps to unused entries.
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
for (auto exit : *exits) {
saw_deopt_kind[static_cast<int>(exit->kind())] = true;

View File

@ -411,7 +411,7 @@ void CodeGenerator::AssembleCode() {
unwinding_info_writer_.Finish(tasm()->pc_offset());
// Final alignment before starting on the metadata section.
tasm()->Align(Code::kMetadataAlignment);
tasm()->Align(InstructionStream::kMetadataAlignment);
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
@ -467,10 +467,10 @@ base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
#endif // V8_ENABLE_WEBASSEMBLY
}
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
MaybeHandle<InstructionStream> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) {
tasm()->AbortedCodeGeneration();
return MaybeHandle<Code>();
return MaybeHandle<InstructionStream>();
}
// Allocate the source position table.
@ -494,7 +494,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
}
MaybeHandle<Code> maybe_code =
MaybeHandle<InstructionStream> maybe_code =
Factory::CodeBuilder(isolate(), desc, info()->code_kind())
.set_builtin(info()->builtin())
.set_inlined_bytecode_size(info()->inlined_bytecode_size())
@ -506,10 +506,10 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
.set_osr_offset(info()->osr_offset())
.TryBuild();
Handle<Code> code;
Handle<InstructionStream> code;
if (!maybe_code.ToHandle(&code)) {
tasm()->AbortedCodeGeneration();
return MaybeHandle<Code>();
return MaybeHandle<InstructionStream>();
}
LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(

View File

@ -159,7 +159,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// produce the actual code object. If an error occurs during either phase,
// FinalizeCode returns an empty MaybeHandle.
void AssembleCode(); // Does not need to run on main thread.
MaybeHandle<Code> FinalizeCode();
MaybeHandle<InstructionStream> FinalizeCode();
base::OwnedVector<byte> GetSourcePositionTable();
base::OwnedVector<byte> GetProtectedInstructionsData();
@ -466,8 +466,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// with function size. {jump_deoptimization_entry_labels_} is an optimization
// to that effect, which extracts the (potentially large) instruction
// sequence for the final jump to the deoptimization entry into a single spot
// per Code object. All deopt exits can then near-call to this label. Note:
// not used on all architectures.
// per InstructionStream object. All deopt exits can then near-call to this
// label. Note: not used on all architectures.
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
// The maximal combined height of all frames produced upon deoptimization, and

View File

@ -662,11 +662,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
int offset = InstructionStream::kCodeDataContainerOffset -
InstructionStream::kHeaderSize;
__ push(eax); // Push eax so we can use it as a scratch register.
__ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit));
__ pop(eax); // Restore eax.
Label skip;
@ -827,7 +828,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ PushPC();
int pc = __ pc_offset();
__ pop(scratch);
__ sub(scratch, Immediate(pc + Code::kHeaderSize - kHeapObjectTag));
__ sub(scratch,
Immediate(pc + InstructionStream::kHeaderSize - kHeapObjectTag));
__ add(scratch, Immediate::CodeRelativeOffset(&return_location));
__ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset),
scratch);

View File

@ -162,14 +162,14 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
}
// static
Handle<Code> CodeAssembler::GenerateCode(
Handle<InstructionStream> CodeAssembler::GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data) {
DCHECK(!state->code_generated_);
RawMachineAssembler* rasm = state->raw_assembler_.get();
Handle<Code> code;
Handle<InstructionStream> code;
Graph* graph = rasm->ExportForOptimization();
code = Pipeline::GenerateCodeForCodeStub(

View File

@ -387,9 +387,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CodeAssembler(const CodeAssembler&) = delete;
CodeAssembler& operator=(const CodeAssembler&) = delete;
static Handle<Code> GenerateCode(CodeAssemblerState* state,
const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
static Handle<InstructionStream> GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
bool Is64() const;
bool Is32() const;
bool IsFloat64RoundUpSupported() const;

View File

@ -115,16 +115,16 @@ class PendingDependencies final {
void Register(Handle<HeapObject> object,
DependentCode::DependencyGroup group) {
// Code, which are per-local Isolate, cannot depend on objects in the shared
// heap. Shared heap dependencies are designed to never invalidate
// assumptions. E.g., maps for shared structs do not have transitions or
// change the shape of their fields. See
// InstructionStream, which are per-local Isolate, cannot depend on objects
// in the shared heap. Shared heap dependencies are designed to never
// invalidate assumptions. E.g., maps for shared structs do not have
// transitions or change the shape of their fields. See
// DependentCode::DeoptimizeDependencyGroups for corresponding DCHECK.
if (object->InSharedWritableHeap()) return;
deps_[object] |= group;
}
void InstallAll(Isolate* isolate, Handle<Code> code) {
void InstallAll(Isolate* isolate, Handle<InstructionStream> code) {
if (V8_UNLIKELY(v8_flags.predictable)) {
InstallAllPredictable(isolate, code);
return;
@ -139,7 +139,7 @@ class PendingDependencies final {
}
}
void InstallAllPredictable(Isolate* isolate, Handle<Code> code) {
void InstallAllPredictable(Isolate* isolate, Handle<InstructionStream> code) {
CHECK(v8_flags.predictable);
// First, guarantee predictable iteration order.
using HandleAndGroup =
@ -1189,7 +1189,7 @@ V8_INLINE void TraceInvalidCompilationDependency(
PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
}
bool CompilationDependencies::Commit(Handle<Code> code) {
bool CompilationDependencies::Commit(Handle<InstructionStream> code) {
if (!PrepareInstall()) return false;
{

View File

@ -31,7 +31,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
CompilationDependencies(JSHeapBroker* broker, Zone* zone);
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
V8_WARN_UNUSED_RESULT bool Commit(Handle<InstructionStream> code);
// Return the initial map of {function} and record the assumption that it
// stays the initial map.

View File

@ -134,30 +134,30 @@ class Reducer;
V(Uint64Div) \
V(Uint64Mod)
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(AllocateInOldGenerationStub, Code) \
V(AllocateInYoungGenerationStub, Code) \
V(AllocateRegularInOldGenerationStub, Code) \
V(AllocateRegularInYoungGenerationStub, Code) \
V(BigIntMap, Map) \
V(BooleanMap, Map) \
V(EmptyString, String) \
V(ExternalObjectMap, Map) \
V(False, Boolean) \
V(FixedArrayMap, Map) \
V(FixedDoubleArrayMap, Map) \
V(WeakFixedArrayMap, Map) \
V(HeapNumberMap, Map) \
V(MinusOne, Number) \
V(NaN, Number) \
V(NoContext, Object) \
V(Null, Oddball) \
V(One, Number) \
V(TheHole, Oddball) \
V(ToNumberBuiltin, Code) \
V(PlainPrimitiveToNumberBuiltin, Code) \
V(True, Boolean) \
V(Undefined, Oddball) \
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(AllocateInOldGenerationStub, InstructionStream) \
V(AllocateInYoungGenerationStub, InstructionStream) \
V(AllocateRegularInOldGenerationStub, InstructionStream) \
V(AllocateRegularInYoungGenerationStub, InstructionStream) \
V(BigIntMap, Map) \
V(BooleanMap, Map) \
V(EmptyString, String) \
V(ExternalObjectMap, Map) \
V(False, Boolean) \
V(FixedArrayMap, Map) \
V(FixedDoubleArrayMap, Map) \
V(WeakFixedArrayMap, Map) \
V(HeapNumberMap, Map) \
V(MinusOne, Number) \
V(NaN, Number) \
V(NoContext, Object) \
V(Null, Oddball) \
V(One, Number) \
V(TheHole, Oddball) \
V(ToNumberBuiltin, InstructionStream) \
V(PlainPrimitiveToNumberBuiltin, InstructionStream) \
V(True, Boolean) \
V(Undefined, Oddball) \
V(Zero, Number)
class GraphAssembler;

View File

@ -71,7 +71,8 @@ bool IsReadOnlyHeapObjectForCompiler(PtrComprCageBase cage_base,
// TODO(jgruber): Remove this compiler-specific predicate and use the plain
// heap predicate instead. This would involve removing the special cases for
// builtins.
return (object.IsCode(cage_base) && Code::cast(object).is_builtin()) ||
return (object.IsInstructionStream(cage_base) &&
InstructionStream::cast(object).is_builtin()) ||
ReadOnlyHeap::Contains(object);
}
@ -2286,7 +2287,7 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
namespace {
unsigned GetInlinedBytecodeSizeImpl(Code code) {
unsigned GetInlinedBytecodeSizeImpl(InstructionStream code) {
unsigned value = code.inlined_bytecode_size();
if (value > 0) {
// Don't report inlined bytecode size if the code object was already
@ -2298,7 +2299,7 @@ unsigned GetInlinedBytecodeSizeImpl(Code code) {
} // namespace
unsigned CodeRef::GetInlinedBytecodeSize() const {
unsigned InstructionStreamRef::GetInlinedBytecodeSize() const {
return GetInlinedBytecodeSizeImpl(*object());
}
@ -2308,9 +2309,10 @@ unsigned CodeDataContainerRef::GetInlinedBytecodeSize() const {
return 0;
}
// Safe to do a relaxed conversion to Code here since CodeDataContainer::code
// field is modified only by GC and the CodeDataContainer was acquire-loaded.
Code code = code_data_container.code(kRelaxedLoad);
// Safe to do a relaxed conversion to InstructionStream here since
// CodeDataContainer::code field is modified only by GC and the
// CodeDataContainer was acquire-loaded.
InstructionStream code = code_data_container.instruction_stream(kRelaxedLoad);
return GetInlinedBytecodeSizeImpl(code);
}

View File

@ -111,7 +111,7 @@ enum class RefSerializationKind {
BACKGROUND_SERIALIZED(BigInt) \
NEVER_SERIALIZED(CallHandlerInfo) \
NEVER_SERIALIZED(Cell) \
NEVER_SERIALIZED(Code) \
NEVER_SERIALIZED(InstructionStream) \
NEVER_SERIALIZED(CodeDataContainer) \
NEVER_SERIALIZED(Context) \
NEVER_SERIALIZED(DescriptorArray) \
@ -1010,11 +1010,11 @@ class JSGlobalProxyRef : public JSObjectRef {
Handle<JSGlobalProxy> object() const;
};
class CodeRef : public HeapObjectRef {
class InstructionStreamRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
DEFINE_REF_CONSTRUCTOR(InstructionStream, HeapObjectRef)
Handle<Code> object() const;
Handle<InstructionStream> object() const;
unsigned GetInlinedBytecodeSize() const;
};

View File

@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Isolate* isolate() const { return isolate_; }
// The pointer compression cage base value used for decompression of all
// tagged values except references to Code objects.
// tagged values except references to InstructionStream objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;

View File

@ -167,9 +167,10 @@ Reduction MemoryLowering::ReduceAllocateRaw(
if (v8_flags.single_generation && allocation_type == AllocationType::kYoung) {
allocation_type = AllocationType::kOld;
}
// Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
// guard pages. If we need to support allocating code here we would need to
// call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
// InstructionStream objects may have a maximum size smaller than
// kMaxHeapObjectSize due to guard pages. If we need to support allocating
// code here we would need to call
// MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
DCHECK_NE(allocation_type, AllocationType::kCode);
Node* value;
Node* size = node->InputAt(0);

View File

@ -360,8 +360,8 @@ class PipelineData {
bool verify_graph() const { return verify_graph_; }
void set_verify_graph(bool value) { verify_graph_ = value; }
MaybeHandle<Code> code() { return code_; }
void set_code(MaybeHandle<Code> code) {
MaybeHandle<InstructionStream> code() { return code_; }
void set_code(MaybeHandle<InstructionStream> code) {
DCHECK(code_.is_null());
code_ = code;
}
@ -655,7 +655,7 @@ class PipelineData {
bool verify_graph_ = false;
int start_source_position_ = kNoSourcePosition;
base::Optional<OsrHelper> osr_helper_;
MaybeHandle<Code> code_;
MaybeHandle<InstructionStream> code_;
CodeGenerator* code_generator_ = nullptr;
Typer* typer_ = nullptr;
Typer::Flags typer_flags_ = Typer::kNoFlags;
@ -750,15 +750,15 @@ class PipelineImpl final {
void AssembleCode(Linkage* linkage);
// Step D. Run the code finalization pass.
MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
MaybeHandle<InstructionStream> FinalizeCode(bool retire_broker = true);
// Step E. Install any code dependencies.
bool CommitDependencies(Handle<Code> code);
bool CommitDependencies(Handle<InstructionStream> code);
void VerifyGeneratedCodeIsIdempotent();
void RunPrintAndVerify(const char* phase, bool untyped = false);
bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
MaybeHandle<InstructionStream> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegistersForTopTier(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
bool run_verifier);
@ -945,7 +945,7 @@ void PrintParticipatingSource(OptimizedCompilationInfo* info,
}
// Print the code after compiling it.
void PrintCode(Isolate* isolate, Handle<Code> code,
void PrintCode(Isolate* isolate, Handle<InstructionStream> code,
OptimizedCompilationInfo* info) {
if (v8_flags.print_opt_source && info->IsOptimizing()) {
PrintParticipatingSource(info, isolate);
@ -1145,7 +1145,7 @@ class PipelineCompilationJob final : public TurbofanCompilationJob {
// Registers weak object to optimized code dependencies.
void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
Handle<NativeContext> context,
Handle<Code> code);
Handle<InstructionStream> code);
private:
Zone zone_;
@ -1286,8 +1286,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
// phases happening during PrepareJob.
PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
Handle<Code> code;
MaybeHandle<InstructionStream> maybe_code = pipeline_.FinalizeCode();
Handle<InstructionStream> code;
if (!maybe_code.ToHandle(&code)) {
if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
return AbortOptimization(BailoutReason::kCodeGenerationFailed);
@ -1305,7 +1305,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
}
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
Isolate* isolate, Handle<NativeContext> context,
Handle<InstructionStream> code) {
std::vector<Handle<Map>> maps;
DCHECK(code->is_optimized_code());
{
@ -2916,7 +2917,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
Handle<Code> code;
Handle<InstructionStream> code;
if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
V8::FatalProcessOutOfMemory(isolate,
"WasmHeapStubCompilationJob::FinalizeJobImpl");
@ -3246,7 +3247,7 @@ int HashGraphForPGO(Graph* graph) {
} // namespace
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
MaybeHandle<InstructionStream> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
const char* debug_name, Builtin builtin, const AssemblerOptions& options,
@ -3741,7 +3742,7 @@ void Pipeline::GenerateCodeForWasmFunction(
#endif // V8_ENABLE_WEBASSEMBLY
// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
MaybeHandle<InstructionStream> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker) {
ZoneStats zone_stats(isolate->allocator());
@ -3764,9 +3765,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
{
LocalIsolateScope local_isolate_scope(data.broker(), info,
isolate->main_thread_local_isolate());
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
if (!pipeline.CreateGraph()) return MaybeHandle<InstructionStream>();
// We selectively Unpark inside OptimizeGraph.
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
if (!pipeline.OptimizeGraph(&linkage))
return MaybeHandle<InstructionStream>();
pipeline.AssembleCode(&linkage);
}
@ -3780,17 +3782,17 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
info->DetachPersistentHandles(), info->DetachCanonicalHandles());
}
Handle<Code> code;
Handle<InstructionStream> code;
if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
pipeline.CommitDependencies(code)) {
if (!will_retire_broker) *out_broker = data.ReleaseBroker();
return code;
}
return MaybeHandle<Code>();
return MaybeHandle<InstructionStream>();
}
// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
MaybeHandle<InstructionStream> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
CallDescriptor* call_descriptor, Graph* graph,
const AssemblerOptions& options, Schedule* schedule) {
@ -3822,12 +3824,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
pipeline.ComputeScheduledGraph();
}
Handle<Code> code;
Handle<InstructionStream> code;
if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
pipeline.CommitDependencies(code)) {
return code;
}
return MaybeHandle<Code>();
return MaybeHandle<InstructionStream>();
}
// static
@ -4110,7 +4112,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage) {
data->EndPhaseKind();
}
MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
MaybeHandle<InstructionStream> PipelineImpl::FinalizeCode(bool retire_broker) {
PipelineData* data = this->data_;
data->BeginPhaseKind("V8.TFFinalizeCode");
if (data->broker() && retire_broker) {
@ -4118,8 +4120,8 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
}
Run<FinalizeCodePhase>();
MaybeHandle<Code> maybe_code = data->code();
Handle<Code> code;
MaybeHandle<InstructionStream> maybe_code = data->code();
Handle<InstructionStream> code;
if (!maybe_code.ToHandle(&code)) {
return maybe_code;
}
@ -4174,14 +4176,15 @@ bool PipelineImpl::SelectInstructionsAndAssemble(
return true;
}
MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
MaybeHandle<InstructionStream> PipelineImpl::GenerateCode(
CallDescriptor* call_descriptor) {
if (!SelectInstructionsAndAssemble(call_descriptor)) {
return MaybeHandle<Code>();
return MaybeHandle<InstructionStream>();
}
return FinalizeCode();
}
bool PipelineImpl::CommitDependencies(Handle<Code> code) {
bool PipelineImpl::CommitDependencies(Handle<InstructionStream> code) {
return data_->dependencies() == nullptr ||
data_->dependencies()->Commit(code);
}

View File

@ -75,7 +75,7 @@ class Pipeline : public AllStatic {
SourcePositionTable* source_positions = nullptr);
// Run the pipeline on a machine graph and generate code.
static MaybeHandle<Code> GenerateCodeForCodeStub(
static MaybeHandle<InstructionStream> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
const char* debug_name, Builtin builtin, const AssemblerOptions& options,
@ -88,16 +88,17 @@ class Pipeline : public AllStatic {
// Run the pipeline on JavaScript bytecode and generate code. If requested,
// hands out the heap broker on success, transferring its ownership to the
// caller.
V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker = nullptr);
V8_EXPORT_PRIVATE static MaybeHandle<InstructionStream>
GenerateCodeForTesting(OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker = nullptr);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
CallDescriptor* call_descriptor, Graph* graph,
const AssemblerOptions& options, Schedule* schedule = nullptr);
V8_EXPORT_PRIVATE static MaybeHandle<InstructionStream>
GenerateCodeForTesting(OptimizedCompilationInfo* info, Isolate* isolate,
CallDescriptor* call_descriptor, Graph* graph,
const AssemblerOptions& options,
Schedule* schedule = nullptr);
// Run just the register allocator phases.
V8_EXPORT_PRIVATE static void AllocateRegistersForTesting(

View File

@ -368,7 +368,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
case INSTRUCTION_STREAM_TYPE:
case CODE_DATA_CONTAINER_TYPE:
case PROPERTY_CELL_TYPE:
case SOURCE_TEXT_MODULE_TYPE:

View File

@ -8312,11 +8312,9 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
}
}
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
WasmImportCallKind kind,
int expected_arity,
wasm::Suspend suspend) {
MaybeHandle<InstructionStream> CompileWasmToJSWrapper(
Isolate* isolate, const wasm::FunctionSig* sig, WasmImportCallKind kind,
int expected_arity, wasm::Suspend suspend) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@ -8359,15 +8357,15 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
CompilationJob::FAILED ||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
return Handle<Code>();
return Handle<InstructionStream>();
}
Handle<Code> code = job->compilation_info()->code();
Handle<InstructionStream> code = job->compilation_info()->code();
return code;
}
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
const wasm::WasmModule* module) {
MaybeHandle<InstructionStream> CompileJSToJSWrapper(
Isolate* isolate, const wasm::FunctionSig* sig,
const wasm::WasmModule* module) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
Graph* graph = zone->New<Graph>(zone.get());
@ -8409,7 +8407,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
return {};
}
Handle<Code> code = job->compilation_info()->code();
Handle<InstructionStream> code = job->compilation_info()->code();
return code;
}

View File

@ -142,17 +142,15 @@ std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
const wasm::WasmModule* module, bool is_import,
const wasm::WasmFeatures& enabled_features);
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
WasmImportCallKind kind,
int expected_arity,
wasm::Suspend suspend);
MaybeHandle<InstructionStream> CompileWasmToJSWrapper(
Isolate* isolate, const wasm::FunctionSig* sig, WasmImportCallKind kind,
int expected_arity, wasm::Suspend suspend);
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
MaybeHandle<Code> CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*,
const wasm::WasmModule* module);
MaybeHandle<InstructionStream> CompileJSToJSWrapper(
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
enum CWasmEntryParameters {
kCodeEntry,

View File

@ -1231,7 +1231,8 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
if (state != DebugInfo::kHasNoSideEffect) continue;
Code code = FromCodeDataContainer(isolate->builtins()->code(caller));
InstructionStream code =
FromCodeDataContainer(isolate->builtins()->code(caller));
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);

View File

@ -161,7 +161,7 @@ void BreakLocation::AllAtCurrentStatement(
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
PtrComprCageBase cage_base = GetPtrComprCageBase(*debug_info);
if (abstract_code->IsCode(cage_base)) offset = offset - 1;
if (abstract_code->IsInstructionStream(cage_base)) offset = offset - 1;
int statement_position;
{
BreakIterator it(debug_info);
@ -1941,7 +1941,8 @@ bool Debug::FindSharedFunctionInfosIntersectingRange(
for (const auto& candidate : candidates) {
IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_));
if (!is_compiled_scope.is_compiled()) {
// Code that cannot be compiled lazily are internal and not debuggable.
// InstructionStream that cannot be compiled lazily are internal and not
// debuggable.
DCHECK(candidate->allows_lazy_compilation());
if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
@ -2006,7 +2007,8 @@ Handle<Object> Debug::FindInnermostContainingFunctionInfo(Handle<Script> script,
}
// If not, compile to reveal inner functions.
HandleScope scope(isolate_);
// Code that cannot be compiled lazily are internal and not debuggable.
// InstructionStream that cannot be compiled lazily are internal and not
// debuggable.
DCHECK(shared.allows_lazy_compilation());
if (!Compiler::Compile(isolate_, handle(shared, isolate_),
Compiler::CLEAR_EXCEPTION, &is_compiled_scope)) {

View File

@ -340,8 +340,9 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
// Mark all code, then deoptimize.
{
Code::OptimizedCodeIterator it(isolate);
for (Code code = it.Next(); !code.is_null(); code = it.Next()) {
InstructionStream::OptimizedCodeIterator it(isolate);
for (InstructionStream code = it.Next(); !code.is_null();
code = it.Next()) {
code.set_marked_for_deoptimization(true);
}
}
@ -385,8 +386,9 @@ void Deoptimizer::DeoptimizeAllOptimizedCodeWithFunction(
// Mark all code that inlines this function, then deoptimize.
bool any_marked = false;
{
Code::OptimizedCodeIterator it(isolate);
for (Code code = it.Next(); !code.is_null(); code = it.Next()) {
InstructionStream::OptimizedCodeIterator it(isolate);
for (InstructionStream code = it.Next(); !code.is_null();
code = it.Next()) {
if (code.Inlines(*function)) {
code.set_marked_for_deoptimization(true);
any_marked = true;
@ -500,17 +502,17 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
}
}
Code Deoptimizer::FindOptimizedCode() {
InstructionStream Deoptimizer::FindOptimizedCode() {
CodeLookupResult lookup_result = isolate_->FindCodeObject(from_);
return lookup_result.code();
return lookup_result.instruction_stream();
}
Handle<JSFunction> Deoptimizer::function() const {
return Handle<JSFunction>(function_, isolate());
}
Handle<Code> Deoptimizer::compiled_code() const {
return Handle<Code>(compiled_code_, isolate());
Handle<InstructionStream> Deoptimizer::compiled_code() const {
return Handle<InstructionStream>(compiled_code_, isolate());
}
Deoptimizer::~Deoptimizer() {
@ -626,7 +628,8 @@ void Deoptimizer::TraceDeoptEnd(double deopt_duration) {
}
// static
void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
void Deoptimizer::TraceMarkForDeoptimization(InstructionStream code,
const char* reason) {
if (!v8_flags.trace_deopt && !v8_flags.log_deopt) return;
DisallowGarbageCollection no_gc;
@ -1940,7 +1943,8 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
return parameter_slots * kSystemPointerSize;
}
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(InstructionStream code,
Address pc) {
CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;

View File

@ -43,7 +43,7 @@ class Deoptimizer : public Malloced {
const int deopt_id;
};
static DeoptInfo GetDeoptInfo(Code code, Address from);
static DeoptInfo GetDeoptInfo(InstructionStream code, Address from);
DeoptInfo GetDeoptInfo() const {
return Deoptimizer::GetDeoptInfo(compiled_code_, from_);
}
@ -55,7 +55,7 @@ class Deoptimizer : public Malloced {
static const char* MessageFor(DeoptimizeKind kind);
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
Handle<InstructionStream> compiled_code() const;
DeoptimizeKind deopt_kind() const { return deopt_kind_; }
// Where the deopt exit occurred *in the outermost frame*, i.e in the
@ -115,7 +115,7 @@ class Deoptimizer : public Malloced {
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type_out);
// Code generation support.
// InstructionStream generation support.
static int input_offset() { return offsetof(Deoptimizer, input_); }
static int output_count_offset() {
return offsetof(Deoptimizer, output_count_);
@ -140,7 +140,8 @@ class Deoptimizer : public Malloced {
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
// Tracing.
static void TraceMarkForDeoptimization(Code code, const char* reason);
static void TraceMarkForDeoptimization(InstructionStream code,
const char* reason);
static void TraceEvictFromOptimizedCodeCache(SharedFunctionInfo sfi,
const char* reason);
@ -150,7 +151,7 @@ class Deoptimizer : public Malloced {
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
Address from, int fp_to_sp_delta);
Code FindOptimizedCode();
InstructionStream FindOptimizedCode();
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@ -180,10 +181,10 @@ class Deoptimizer : public Malloced {
static void MarkAllCodeForContext(NativeContext native_context);
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
// Searches the list of known deoptimizing code for a Code object
// Searches the list of known deoptimizing code for a InstructionStream object
// containing the given address (which is supposedly faster than
// searching all code objects).
Code FindDeoptimizingCode(Address addr);
InstructionStream FindDeoptimizingCode(Address addr);
// Tracing.
bool tracing_enabled() const { return trace_scope_ != nullptr; }
@ -206,7 +207,7 @@ class Deoptimizer : public Malloced {
Isolate* isolate_;
JSFunction function_;
Code compiled_code_;
InstructionStream compiled_code_;
unsigned deopt_exit_index_;
BytecodeOffset bytecode_offset_in_outermost_frame_ = BytecodeOffset::None();
DeoptimizeKind deopt_kind_;

View File

@ -382,9 +382,9 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
const CodeReference& host = code;
Address constant_pool =
host.is_null() ? kNullAddress : host.constant_pool();
Code code_pointer;
if (host.is_code()) {
code_pointer = *host.as_code();
InstructionStream code_pointer;
if (host.is_instruction_stream()) {
code_pointer = *host.as_instruction_stream();
}
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], code_pointer,
@ -404,7 +404,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// by IsInConstantPool() below.
if (pcs.empty() && !code.is_null() && !decoding_constant_pool) {
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
RelocInfo::NO_INFO, 0, Code());
RelocInfo::NO_INFO, 0, InstructionStream());
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();

View File

@ -37,8 +37,8 @@ namespace GDBJITInterface {
void EventHandler(const v8::JitCodeEvent* event);
// Expose some functions for unittests. These only exercise the logic to add
// AddressRegion to CodeMap, and checking for overlap. It does not touch the
// actual JITCodeEntry at all.
// AddressRegion to InstructionStreamMap, and checking for overlap. It does not
// touch the actual JITCodeEntry at all.
V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
V8_EXPORT_PRIVATE size_t

View File

@ -243,8 +243,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
TransitionArray::cast(*this).TransitionArrayVerify(isolate);
break;
case CODE_TYPE:
Code::cast(*this).CodeVerify(isolate);
case INSTRUCTION_STREAM_TYPE:
InstructionStream::cast(*this).InstructionStreamVerify(isolate);
break;
case JS_API_OBJECT_TYPE:
case JS_ARRAY_ITERATOR_PROTOTYPE_TYPE:
@ -335,7 +335,7 @@ void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
// If you crashed here and {isolate->is_shared()}, there is a bug causing the
// host of {p} to point to a non-shared object.
CHECK(IsValidHeapObject(isolate->heap(), HeapObject::cast(p)));
CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !p.IsCode());
CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !p.IsInstructionStream());
}
// static
@ -343,7 +343,7 @@ void HeapObject::VerifyCodePointer(Isolate* isolate, Object p) {
CHECK(p.IsHeapObject());
CHECK(IsValidCodeObject(isolate->heap(), HeapObject::cast(p)));
PtrComprCageBase cage_base(isolate);
CHECK(HeapObject::cast(p).IsCode(cage_base));
CHECK(HeapObject::cast(p).IsInstructionStream(cage_base));
}
void Symbol::SymbolVerify(Isolate* isolate) {
@ -1091,36 +1091,38 @@ void PropertyCell::PropertyCellVerify(Isolate* isolate) {
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
CHECK(IsCodeDataContainer());
if (raw_code() != Smi::zero()) {
Code code = this->code();
if (raw_instruction_stream() != Smi::zero()) {
InstructionStream code = this->instruction_stream();
CHECK_EQ(code.kind(), kind());
CHECK_EQ(code.builtin_id(), builtin_id());
// When v8_flags.interpreted_frames_native_stack is enabled each
// interpreted function gets its own copy of the
// InterpreterEntryTrampoline. Thus, there could be Code'ful builtins.
// InterpreterEntryTrampoline. Thus, there could be InstructionStream'ful
// builtins.
CHECK_IMPLIES(isolate->embedded_blob_code() && is_off_heap_trampoline(),
builtin_id() == Builtin::kInterpreterEntryTrampoline);
CHECK_EQ(code.code_data_container(kAcquireLoad), *this);
// Ensure the cached code entry point corresponds to the Code object
// associated with this CodeDataContainer.
// Ensure the cached code entry point corresponds to the InstructionStream
// object associated with this CodeDataContainer.
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
if (V8_SHORT_BUILTIN_CALLS_BOOL) {
if (code.InstructionStart() == code_entry_point()) {
// Most common case, all good.
} else {
// When shared pointer compression cage is enabled and it has the
// embedded code blob copy then the Code::InstructionStart() might
// return the address of the remapped builtin regardless of whether
// the builtins copy existed when the code_entry_point value was
// cached in the CodeDataContainer (see
// Code::OffHeapInstructionStart()). So, do a reverse Code object
// lookup via code_entry_point value to ensure it corresponds to the
// same Code object associated with this CodeDataContainer.
// embedded code blob copy then the
// InstructionStream::InstructionStart() might return the address of the
// remapped builtin regardless of whether the builtins copy existed when
// the code_entry_point value was cached in the CodeDataContainer (see
// InstructionStream::OffHeapInstructionStart()). So, do a reverse
// InstructionStream object lookup via code_entry_point value to ensure
// it corresponds to the same InstructionStream object associated with
// this CodeDataContainer.
CodeLookupResult lookup_result =
isolate->heap()->GcSafeFindCodeForInnerPointer(code_entry_point());
CHECK(lookup_result.IsFound());
CHECK_EQ(lookup_result.ToCode(), code);
CHECK_EQ(lookup_result.ToInstructionStream(), code);
}
} else {
CHECK_EQ(code.InstructionStart(), code_entry_point());
@ -1131,9 +1133,10 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
}
}
void Code::CodeVerify(Isolate* isolate) {
CHECK(IsAligned(InstructionSize(),
static_cast<unsigned>(Code::kMetadataAlignment)));
void InstructionStream::InstructionStreamVerify(Isolate* isolate) {
CHECK(
IsAligned(InstructionSize(),
static_cast<unsigned>(InstructionStream::kMetadataAlignment)));
CHECK_EQ(safepoint_table_offset(), 0);
CHECK_LE(safepoint_table_offset(), handler_table_offset());
CHECK_LE(handler_table_offset(), constant_pool_offset());
@ -1147,11 +1150,11 @@ void Code::CodeVerify(Isolate* isolate) {
#endif // !defined(_MSC_VER) || defined(__clang__)
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
CHECK_EQ(*this, code_data_container(kAcquireLoad).code());
CHECK_EQ(*this, code_data_container(kAcquireLoad).instruction_stream());
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
// following CHECK works builtin trampolines. It currently fails because
// CodeVerify is called halfway through constructing the trampoline and so not
// everything is set up.
// InstructionStreamVerify is called halfway through constructing the
// trampoline and so not everything is set up.
// CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
relocation_info().ObjectVerify(isolate);
CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
@ -1577,7 +1580,8 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
FixedArray arr = FixedArray::cast(data());
Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
// Smi : Not compiled yet (-1).
// Code: Compiled irregexp code or trampoline to the interpreter.
// InstructionStream: Compiled irregexp code or trampoline to the
// interpreter.
CHECK((one_byte_data.IsSmi() &&
Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
one_byte_data.IsCodeDataContainer());

View File

@ -210,8 +210,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
WasmExceptionPackage::cast(*this).WasmExceptionPackagePrint(os);
break;
#endif // V8_ENABLE_WEBASSEMBLY
case CODE_TYPE:
Code::cast(*this).CodePrint(os);
case INSTRUCTION_STREAM_TYPE:
InstructionStream::cast(*this).InstructionStreamPrint(os);
break;
case CODE_DATA_CONTAINER_TYPE:
CodeDataContainer::cast(*this).CodeDataContainerPrint(os);
@ -1792,8 +1792,8 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) {
os << "\n";
}
void Code::CodePrint(std::ostream& os) {
PrintHeader(os, "Code");
void InstructionStream::InstructionStreamPrint(std::ostream& os) {
PrintHeader(os, "InstructionStream");
os << "\n - code_data_container: "
<< Brief(code_data_container(kAcquireLoad));
if (is_builtin()) {
@ -1812,7 +1812,7 @@ void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
os << "\n - builtin: " << Builtins::name(builtin_id());
}
os << "\n - is_off_heap_trampoline: " << is_off_heap_trampoline();
os << "\n - code: " << Brief(raw_code());
os << "\n - instruction_stream: " << Brief(raw_instruction_stream());
os << "\n - code_entry_point: "
<< reinterpret_cast<void*>(code_entry_point());
os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
@ -3052,13 +3052,14 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
i::CodeDataContainer::cast(lookup_result.code_data_container());
code.Disassemble(nullptr, os, isolate, address);
} else {
lookup_result.code().Disassemble(nullptr, os, isolate, address);
lookup_result.instruction_stream().Disassemble(nullptr, os, isolate,
address);
}
#else // ENABLE_DISASSEMBLER
if (lookup_result.IsCodeDataContainer()) {
lookup_result.code_data_container().Print();
} else {
lookup_result.code().Print();
lookup_result.instruction_stream().Print();
}
#endif // ENABLE_DISASSEMBLER
}

View File

@ -231,9 +231,11 @@ void LinuxPerfJitLogger::LogRecordedBuffer(
if (perf_output_handle_ == nullptr) return;
// We only support non-interpreted functions.
if (!abstract_code->IsCode(isolate_)) return;
Handle<Code> code = Handle<Code>::cast(abstract_code);
DCHECK(code->raw_instruction_start() == code->address() + Code::kHeaderSize);
if (!abstract_code->IsInstructionStream(isolate_)) return;
Handle<InstructionStream> code =
Handle<InstructionStream>::cast(abstract_code);
DCHECK(code->raw_instruction_start() ==
code->address() + InstructionStream::kHeaderSize);
// Debug info has to be emitted first.
Handle<SharedFunctionInfo> shared;
@ -320,7 +322,7 @@ base::Vector<const char> GetScriptName(Object maybeScript,
} // namespace
SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
SourcePositionInfo GetSourcePositionInfo(Handle<InstructionStream> code,
Handle<SharedFunctionInfo> function,
SourcePosition pos) {
DisallowGarbageCollection disallow;
@ -333,7 +335,7 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
} // namespace
void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<InstructionStream> code,
Handle<SharedFunctionInfo> shared) {
// Line ends of all scripts have been initialized prior to this.
DisallowGarbageCollection no_gc;
@ -484,7 +486,7 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(const wasm::WasmCode* code) {
}
#endif // V8_ENABLE_WEBASSEMBLY
void LinuxPerfJitLogger::LogWriteUnwindingInfo(Code code) {
void LinuxPerfJitLogger::LogWriteUnwindingInfo(InstructionStream code) {
PerfJitCodeUnwindingInfo unwinding_info_header;
unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
unwinding_info_header.time_stamp_ = GetTimestamp();

View File

@ -76,11 +76,12 @@ class LinuxPerfJitLogger : public CodeEventLogger {
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
void LogWriteDebugInfo(Handle<Code> code, Handle<SharedFunctionInfo> shared);
void LogWriteDebugInfo(Handle<InstructionStream> code,
Handle<SharedFunctionInfo> shared);
#if V8_ENABLE_WEBASSEMBLY
void LogWriteDebugInfo(const wasm::WasmCode* code);
#endif // V8_ENABLE_WEBASSEMBLY
void LogWriteUnwindingInfo(Code code);
void LogWriteUnwindingInfo(InstructionStream code);
static const uint32_t kElfMachIA32 = 3;
static const uint32_t kElfMachX64 = 62;

View File

@ -311,7 +311,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
}
CodeLookupResult interpreter_entry_trampoline =
isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
return interpreter_entry_trampoline.code()
return interpreter_entry_trampoline.instruction_stream()
.is_interpreter_trampoline_builtin();
} else {
return false;
@ -571,8 +571,8 @@ CodeLookupResult StackFrame::LookupCodeDataContainer() const {
CodeLookupResult result = GetContainingCode(isolate(), pc());
if (DEBUG_BOOL) {
CHECK(result.IsFound());
if (result.IsCode()) {
Code code = result.code();
if (result.IsInstructionStream()) {
InstructionStream code = result.instruction_stream();
CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
} else {
@ -594,7 +594,7 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
v->VisitRunningCode(FullObjectSlot(&code));
return;
}
Code holder = lookup_result.code();
InstructionStream holder = lookup_result.instruction_stream();
Address old_pc = ReadPC(pc_address);
DCHECK(ReadOnlyHeap::Contains(holder) ||
holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
@ -602,7 +602,7 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Object code = holder;
v->VisitRunningCode(FullObjectSlot(&code));
if (code == holder) return;
holder = Code::unchecked_cast(code);
holder = InstructionStream::unchecked_cast(code);
Address pc = holder.InstructionStart(isolate_, old_pc) + pc_offset;
// TODO(v8:10026): avoid replacing a signed pointer.
PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize);
@ -631,7 +631,7 @@ inline StackFrame::Type ComputeBuiltinFrameType(CodeOrCodeDataContainer code) {
return StackFrame::BASELINE;
}
if (code.is_turbofanned()) {
// TODO(bmeurer): We treat frames for BUILTIN Code objects as
// TODO(bmeurer): We treat frames for BUILTIN InstructionStream objects as
// OptimizedFrame for now (all the builtins with JavaScript
// linkage are actually generated with TurboFan currently, so
// this is sound).
@ -709,7 +709,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return ComputeBuiltinFrameType(
CodeDataContainer::cast(lookup_result.code_data_container()));
}
return ComputeBuiltinFrameType(lookup_result.code());
return ComputeBuiltinFrameType(lookup_result.instruction_stream());
}
case CodeKind::BASELINE:
return BASELINE;
@ -740,7 +740,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return WASM_TO_JS_FUNCTION;
case CodeKind::WASM_FUNCTION:
case CodeKind::WASM_TO_CAPI_FUNCTION:
// Never appear as on-heap {Code} objects.
// Never appear as on-heap {InstructionStream} objects.
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
default:
@ -1092,10 +1092,10 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
// FullMaybeObjectSlots here.
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// When external code space is enabled the spill slot could contain both
// Code and non-Code references, which have different cage bases. So
// unconditional decompression of the value might corrupt Code pointers.
// However, given that
// 1) the Code pointers are never compressed by design (because
// InstructionStream and non-InstructionStream references, which have
// different cage bases. So unconditional decompression of the value might
// corrupt InstructionStream pointers. However, given that 1) the
// InstructionStream pointers are never compressed by design (because
// otherwise we wouldn't know which cage base to apply for
// decompression, see respective DCHECKs in
// RelocInfo::target_object()),
@ -1104,7 +1104,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
// we can avoid updating upper part of the spill slot if it already
// contains full value.
// TODO(v8:11880): Remove this special handling by enforcing builtins
// to use CodeTs instead of Code objects.
// to use CodeTs instead of InstructionStream objects.
Address value = *spill_slot.location();
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
// We don't need to update smi values or full pointers.
@ -1551,8 +1551,8 @@ HeapObject TurbofanStubWithContextFrame::unchecked_code() const {
if (code_lookup.IsCodeDataContainer()) {
return code_lookup.code_data_container();
}
if (code_lookup.IsCode()) {
return code_lookup.code();
if (code_lookup.IsInstructionStream()) {
return code_lookup.instruction_stream();
}
return {};
}
@ -1649,8 +1649,8 @@ HeapObject StubFrame::unchecked_code() const {
if (code_lookup.IsCodeDataContainer()) {
return code_lookup.code_data_container();
}
if (code_lookup.IsCode()) {
return code_lookup.code();
if (code_lookup.IsInstructionStream()) {
return code_lookup.instruction_stream();
}
return {};
}
@ -2451,12 +2451,12 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
}
int BaselineFrame::GetBytecodeOffset() const {
Code code = LookupCodeDataContainer().code();
InstructionStream code = LookupCodeDataContainer().instruction_stream();
return code.GetBytecodeOffsetForBaselinePC(this->pc(), GetBytecodeArray());
}
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
Code code = LookupCodeDataContainer().code();
InstructionStream code = LookupCodeDataContainer().instruction_stream();
return code.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
GetBytecodeArray());
}
@ -2984,7 +2984,8 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
// the code has been computed.
entry->code =
isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer);
if (entry->code.IsCode() && entry->code.code().is_maglevved()) {
if (entry->code.IsInstructionStream() &&
entry->code.instruction_stream().is_maglevved()) {
entry->maglev_safepoint_entry.Reset();
} else {
entry->safepoint_entry.Reset();

View File

@ -297,8 +297,8 @@ class StackFrame {
// Get the type of this frame.
virtual Type type() const = 0;
// Get the code associated with this frame. The result might be a Code object,
// a CodeDataContainer object or an empty value.
// Get the code associated with this frame. The result might be a
// InstructionStream object, a CodeDataContainer object or an empty value.
// This method is used by Isolate::PushStackTraceAndDie() for collecting a
// stack trace on fatal error and thus it might be called in the middle of GC
// and should be as safe as possible.

View File

@ -243,8 +243,9 @@ class IsolateData final {
ThreadLocalTop thread_local_top_;
// The entry points for builtins. This corresponds to
// Code::InstructionStart() for each Code object in the builtins table below.
// The entry table is in IsolateData for easy access through kRootRegister.
// InstructionStream::InstructionStart() for each InstructionStream object in
// the builtins table below. The entry table is in IsolateData for easy access
// through kRootRegister.
Address builtin_entry_table_[Builtins::kBuiltinCount] = {};
// The entries in this array are tagged pointers to CodeDataContainer objects.

View File

@ -115,8 +115,8 @@ V8_INLINE PtrComprCageBase GetPtrComprCageBaseSlow(HeapObject object) {
return PtrComprCageBase{isolate};
}
// If the Isolate can't be obtained then the heap object is a read-only
// one and therefore not a Code object, so fallback to auto-computing cage
// base value.
// one and therefore not a InstructionStream object, so fallback to
// auto-computing cage base value.
}
return GetPtrComprCageBase(object);
}

View File

@ -181,12 +181,13 @@ uint32_t DefaultEmbeddedBlobDataSize() {
namespace {
// These variables provide access to the current embedded blob without requiring
// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
// not have access to an isolate but still needs to access the embedded blob.
// The variables are initialized by each isolate in Init(). Writes and reads are
// relaxed since we can guarantee that the current thread has initialized these
// variables before accessing them. Different threads may race, but this is fine
// since they all attempt to set the same values of the blob pointer and size.
// an isolate instance. This is needed e.g. by
// InstructionStream::InstructionStart, which may not have access to an isolate
// but still needs to access the embedded blob. The variables are initialized by
// each isolate in Init(). Writes and reads are relaxed since we can guarantee
// that the current thread has initialized these variables before accessing
// them. Different threads may race, but this is fine since they all attempt to
// set the same values of the blob pointer and size.
std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
std::atomic<uint32_t> current_embedded_blob_code_size_(0);
@ -2000,7 +2001,8 @@ Object Isolate::UnwindAndFindHandler() {
CHECK(frame->is_java_script());
if (frame->is_turbofan()) {
Code code = frame->LookupCodeDataContainer().code();
InstructionStream code =
frame->LookupCodeDataContainer().instruction_stream();
// The debugger triggers lazy deopt for the "to-be-restarted" frame
// immediately when the CDP event arrives while paused.
CHECK(code.marked_for_deoptimization());
@ -2052,7 +2054,8 @@ Object Isolate::UnwindAndFindHandler() {
case StackFrame::C_WASM_ENTRY: {
StackHandler* handler = frame->top_handler();
thread_local_top()->handler_ = handler->next_address();
Code code = frame->LookupCodeDataContainer().code();
InstructionStream code =
frame->LookupCodeDataContainer().instruction_stream();
HandlerTable table(code);
Address instruction_start = code.InstructionStart(this, frame->pc());
int return_offset = static_cast<int>(frame->pc() - instruction_start);
@ -2198,7 +2201,8 @@ Object Isolate::UnwindAndFindHandler() {
if (frame->is_baseline()) {
BaselineFrame* sp_frame = BaselineFrame::cast(js_frame);
Code code = sp_frame->LookupCodeDataContainer().code();
InstructionStream code =
sp_frame->LookupCodeDataContainer().instruction_stream();
DCHECK(!code.is_off_heap_trampoline());
intptr_t pc_offset = sp_frame->GetPCForBytecodeOffset(offset);
// Patch the context register directly on the frame, so that we don't
@ -4822,7 +4826,7 @@ bool Isolate::use_optimizer() {
void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
PtrComprCageBase cage_base(this);
DCHECK(code->IsCode(cage_base) || code->IsByteArray(cage_base));
DCHECK(code->IsInstructionStream(cage_base) || code->IsByteArray(cage_base));
total_regexp_code_generated_ += code->Size(cage_base);
}

View File

@ -1715,8 +1715,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
// Hashes bits of the Isolate that are relevant for embedded builtins. In
// particular, the embedded blob requires builtin Code object layout and the
// builtins constants table to remain unchanged from build-time.
// particular, the embedded blob requires builtin InstructionStream object
// layout and the builtins constants table to remain unchanged from
// build-time.
size_t HashIsolateForEmbeddedBlob();
static const uint8_t* CurrentEmbeddedBlobCode();

View File

@ -145,8 +145,8 @@ void StatisticsExtension::GetCounters(
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
Object maybe_source_positions;
if (obj.IsCode()) {
Code code = Code::cast(obj);
if (obj.IsInstructionStream()) {
InstructionStream code = InstructionStream::cast(obj);
reloc_info_total += code.relocation_info().Size();
// Baseline code doesn't have source positions since it uses
// interpreter code positions.

View File

@ -105,10 +105,10 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
// When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
// not cross the 4Gb boundary and thus the default compression scheme of
// truncating the Code pointers to 32-bits still works. It's achieved by
// specifying base_alignment parameter.
// Note that the alignment is calculated before adjusting the requested size
// for GetWritableReservedAreaSize(). The reasons are:
// truncating the InstructionStream pointers to 32-bits still works. It's
// achieved by specifying base_alignment parameter. Note that the alignment is
// calculated before adjusting the requested size for
// GetWritableReservedAreaSize(). The reasons are:
// - this extra page is used by breakpad on Windows and it's allowed to cross
// the 4Gb boundary,
// - rounding up the adjusted size would result in requresting unnecessarily

View File

@ -32,7 +32,7 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
// Record code+metadata statistics.
AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code.SizeIncludingMetadata(cage_base);
if (abstract_code.IsCode(cage_base)) {
if (abstract_code.IsInstructionStream(cage_base)) {
size += isolate->code_and_metadata_size();
isolate->set_code_and_metadata_size(size);
} else {
@ -204,9 +204,9 @@ void CodeStatistics::CollectCodeCommentStatistics(AbstractCode obj,
// them in the stats.
// Only process code objects for code comment statistics.
PtrComprCageBase cage_base(isolate);
if (!obj.IsCode(cage_base)) return;
if (!obj.IsInstructionStream(cage_base)) return;
Code code = Code::cast(obj);
InstructionStream code = InstructionStream::cast(obj);
CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
int delta = 0;
int prev_pc_offset = 0;

View File

@ -122,7 +122,8 @@ void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
if (space_->identity() == CODE_SPACE) {
optional_rwx_write_scope.emplace(
"Marking Code objects requires write access to the Code page header");
"Marking InstructionStream objects requires write access to the "
"Code page header");
}
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
@ -136,7 +137,8 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
if (space_->identity() == CODE_SPACE) {
optional_rwx_write_scope.emplace(
"Marking Code objects requires write access to the Code page header");
"Marking InstructionStream objects requires write access to the "
"Code page header");
}
Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
limit);

Some files were not shown because too many files have changed in this diff Show More