From 2c7272fddee50a161a97c753fcf8cfa6ae7f920f Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 17 Jan 2023 10:59:51 +0100 Subject: [PATCH] Rename Code to InstructionStream .. as part of the big Code/CodeDataContainer name shuffle. In the next step, CodeDataContainer will be renamed to Code. Bug: v8:13654 Change-Id: Ia80ac984d46dd6c2a108098055a5cd60e22a837c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4171628 Auto-Submit: Jakob Linke Reviewed-by: Michael Lippautz Commit-Queue: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#85337} --- src/baseline/baseline-batch-compiler.cc | 6 +- src/baseline/baseline-compiler.cc | 3 +- src/baseline/baseline-compiler.h | 2 +- src/baseline/baseline.cc | 10 +- src/baseline/baseline.h | 6 +- src/builtins/arm/builtins-arm.cc | 32 +- src/builtins/arm64/builtins-arm64.cc | 27 +- src/builtins/base.tq | 2 +- src/builtins/builtins-lazy-gen.cc | 7 +- src/builtins/builtins.cc | 27 +- src/builtins/builtins.h | 20 +- src/builtins/cast.tq | 4 +- src/builtins/constants-table-builder.cc | 11 +- src/builtins/constants-table-builder.h | 6 +- src/builtins/ia32/builtins-ia32.cc | 29 +- src/builtins/setup-builtins-internal.cc | 81 ++- src/builtins/wasm.tq | 2 +- src/builtins/x64/builtins-x64.cc | 27 +- src/codegen/arm/assembler-arm-inl.h | 2 +- src/codegen/arm/assembler-arm.cc | 17 +- src/codegen/arm/assembler-arm.h | 6 +- src/codegen/arm/macro-assembler-arm.cc | 15 +- src/codegen/arm/macro-assembler-arm.h | 12 +- src/codegen/arm64/assembler-arm64-inl.h | 7 +- src/codegen/arm64/assembler-arm64.cc | 16 +- src/codegen/arm64/assembler-arm64.h | 18 +- src/codegen/arm64/macro-assembler-arm64.cc | 21 +- src/codegen/arm64/macro-assembler-arm64.h | 12 +- src/codegen/callable.h | 2 +- src/codegen/code-comments.h | 2 +- src/codegen/code-desc.h | 2 +- src/codegen/code-reference.cc | 6 +- src/codegen/code-reference.h | 19 +- src/codegen/code-stub-assembler.cc | 4 +- src/codegen/code-stub-assembler.h | 20 +- src/codegen/compiler.cc | 24 +- src/codegen/external-reference.cc | 4 +- src/codegen/handler-table.cc | 4 +- src/codegen/handler-table.h | 13 +- src/codegen/ia32/assembler-ia32-inl.h | 4 +- src/codegen/ia32/assembler-ia32.cc | 16 +- src/codegen/ia32/assembler-ia32.h | 4 +- src/codegen/ia32/macro-assembler-ia32.cc | 8 +- src/codegen/ia32/macro-assembler-ia32.h | 12 +- src/codegen/maglev-safepoint-table.cc | 4 +- src/codegen/maglev-safepoint-table.h | 7 +- src/codegen/optimized-compilation-info.cc | 2 +- src/codegen/optimized-compilation-info.h | 8 +- src/codegen/reloc-info.cc | 43 +- src/codegen/reloc-info.h | 22 +- src/codegen/safepoint-table.cc | 5 +- src/codegen/safepoint-table.h | 6 +- src/codegen/source-position.cc | 7 +- src/codegen/source-position.h | 9 +- src/codegen/x64/assembler-x64-inl.h | 7 +- src/codegen/x64/assembler-x64.cc | 11 +- src/codegen/x64/assembler-x64.h | 18 +- src/codegen/x64/macro-assembler-x64.cc | 13 +- src/codegen/x64/macro-assembler-x64.h | 10 +- src/common/globals.h | 20 +- src/common/ptr-compr.h | 6 +- .../backend/arm/code-generator-arm.cc | 5 +- .../backend/arm64/code-generator-arm64.cc | 4 +- src/compiler/backend/code-generator.cc | 12 +- src/compiler/backend/code-generator.h | 6 +- .../backend/ia32/code-generator-ia32.cc | 8 +- src/compiler/code-assembler.cc | 4 +- src/compiler/code-assembler.h | 6 +- src/compiler/compilation-dependencies.cc | 14 +- src/compiler/compilation-dependencies.h | 2 +- src/compiler/graph-assembler.h | 48 +- src/compiler/heap-refs.cc | 14 +- src/compiler/heap-refs.h | 8 +- src/compiler/js-heap-broker.h | 2 +- src/compiler/memory-lowering.cc | 7 +- src/compiler/pipeline.cc | 57 +- src/compiler/pipeline.h | 17 +- src/compiler/types.cc | 2 +- src/compiler/wasm-compiler.cc | 20 +- src/compiler/wasm-compiler.h | 12 +- src/debug/debug-evaluate.cc | 3 +- src/debug/debug.cc | 8 +- src/deoptimizer/deoptimizer.cc | 24 +- src/deoptimizer/deoptimizer.h | 17 +- src/diagnostics/disassembler.cc | 8 +- src/diagnostics/gdb-jit.h | 4 +- src/diagnostics/objects-debug.cc | 52 +- src/diagnostics/objects-printer.cc | 15 +- src/diagnostics/perf-jit.cc | 14 +- src/diagnostics/perf-jit.h | 5 +- src/execution/frames.cc | 41 +- src/execution/frames.h | 4 +- src/execution/isolate-data.h | 5 +- src/execution/isolate-utils-inl.h | 4 +- src/execution/isolate.cc | 24 +- src/execution/isolate.h | 5 +- src/extensions/statistics-extension.cc | 4 +- src/heap/code-range.cc | 8 +- src/heap/code-stats.cc | 6 +- src/heap/concurrent-allocator.cc | 6 +- src/heap/concurrent-marking.cc | 13 +- src/heap/evacuation-verifier.cc | 18 +- src/heap/evacuation-verifier.h | 8 +- src/heap/factory-base.cc | 12 +- src/heap/factory-base.h | 2 +- src/heap/factory.cc | 55 +- src/heap/factory.h | 18 +- src/heap/heap-inl.h | 6 +- src/heap/heap-verifier.cc | 29 +- src/heap/heap-write-barrier-inl.h | 43 +- src/heap/heap-write-barrier.cc | 7 +- src/heap/heap-write-barrier.h | 20 +- src/heap/heap.cc | 45 +- src/heap/heap.h | 44 +- src/heap/local-heap.h | 2 +- src/heap/mark-compact-inl.h | 2 +- src/heap/mark-compact.cc | 130 ++-- src/heap/mark-compact.h | 11 +- src/heap/marking-barrier.cc | 11 +- src/heap/marking-barrier.h | 5 +- src/heap/marking-state.h | 2 +- src/heap/marking-visitor-inl.h | 16 +- src/heap/marking-visitor.h | 22 +- src/heap/object-stats.cc | 17 +- src/heap/object-stats.h | 4 +- src/heap/objects-visiting.h | 2 +- src/heap/paged-spaces-inl.h | 4 +- src/heap/read-only-spaces.cc | 4 +- src/heap/remembered-set-inl.h | 21 +- src/heap/remembered-set.h | 13 +- src/heap/scavenger-inl.h | 26 +- src/heap/scavenger.cc | 21 +- src/heap/setup-heap-internal.cc | 2 +- src/heap/sweeper.cc | 12 +- src/heap/weak-object-worklists.h | 2 +- src/ic/ic.cc | 2 +- src/init/setup-isolate.h | 5 +- src/interpreter/interpreter-generator.cc | 11 +- src/interpreter/interpreter-generator.h | 10 +- src/logging/code-events.h | 13 +- src/logging/log.cc | 42 +- src/logging/log.h | 23 +- src/maglev/maglev-code-generator.cc | 12 +- src/maglev/maglev-code-generator.h | 4 +- src/maglev/maglev-compiler.cc | 2 +- src/maglev/maglev-concurrent-dispatcher.h | 2 +- src/maglev/maglev-ir.h | 9 +- src/objects/call-site-info-inl.h | 8 +- src/objects/code-inl.h | 564 ++++++++------- src/objects/code.cc | 77 ++- src/objects/code.h | 267 +++---- src/objects/compilation-cache-table.cc | 4 +- src/objects/instance-type-inl.h | 2 +- src/objects/instance-type.h | 2 +- src/objects/js-function-inl.h | 11 +- src/objects/js-function.cc | 2 +- src/objects/js-function.h | 8 +- src/objects/js-regexp.cc | 2 +- src/objects/js-regexp.h | 8 +- src/objects/map.cc | 4 +- src/objects/map.h | 2 +- src/objects/object-list-macros.h | 2 +- src/objects/objects-body-descriptors-inl.h | 20 +- src/objects/objects-inl.h | 18 +- src/objects/objects.cc | 15 +- src/objects/objects.h | 16 +- src/objects/tagged-impl.cc | 9 +- src/objects/tagged-impl.h | 16 +- src/objects/visitors.h | 35 +- src/parsing/parser-base.h | 7 +- src/parsing/parser.cc | 2 +- src/profiler/cpu-profiler-inl.h | 41 +- src/profiler/cpu-profiler.cc | 3 +- src/profiler/cpu-profiler.h | 30 +- src/profiler/heap-snapshot-generator.cc | 43 +- src/profiler/heap-snapshot-generator.h | 2 +- src/profiler/profile-generator.cc | 23 +- src/profiler/profile-generator.h | 20 +- src/profiler/profiler-listener.cc | 17 +- src/profiler/profiler-listener.h | 9 +- src/profiler/symbolizer.cc | 3 +- src/profiler/symbolizer.h | 12 +- src/profiler/weak-code-registry.h | 5 +- src/regexp/arm/regexp-macro-assembler-arm.cc | 11 +- .../arm64/regexp-macro-assembler-arm64.cc | 12 +- .../ia32/regexp-macro-assembler-ia32.cc | 9 +- src/regexp/regexp-compiler-tonode.cc | 3 +- src/regexp/regexp-compiler.cc | 4 +- src/regexp/regexp-macro-assembler.cc | 4 +- src/regexp/regexp-macro-assembler.h | 5 +- src/regexp/regexp.cc | 5 +- src/regexp/x64/regexp-macro-assembler-x64.cc | 36 +- src/regexp/x64/regexp-macro-assembler-x64.h | 9 +- src/roots/roots.h | 2 +- src/roots/static-roots.h | 4 +- src/runtime/runtime-compiler.cc | 12 +- src/runtime/runtime-regexp.cc | 9 +- src/runtime/runtime-test.cc | 9 +- src/snapshot/code-serializer.cc | 4 +- src/snapshot/context-serializer.cc | 10 +- src/snapshot/deserializer.cc | 56 +- src/snapshot/deserializer.h | 4 +- src/snapshot/embedded/embedded-data.cc | 32 +- src/snapshot/embedded/embedded-data.h | 14 +- .../embedded/embedded-file-writer-interface.h | 2 +- src/snapshot/embedded/embedded-file-writer.cc | 2 +- .../platform-embedded-file-writer-aix.cc | 2 +- .../platform-embedded-file-writer-generic.cc | 2 +- .../platform-embedded-file-writer-mac.cc | 2 +- src/snapshot/serializer-deserializer.h | 3 +- src/snapshot/serializer-inl.h | 14 +- src/snapshot/serializer.cc | 98 +-- src/snapshot/serializer.h | 16 +- src/snapshot/startup-serializer.cc | 4 +- src/wasm/function-compiler.cc | 2 +- src/wasm/module-compiler.cc | 2 +- src/wasm/wasm-code-manager.cc | 17 +- src/wasm/wasm-code-manager.h | 18 +- src/web-snapshot/web-snapshot.cc | 4 +- test/cctest/assembler-helper-arm.cc | 6 +- test/cctest/assembler-helper-arm.h | 4 +- test/cctest/compiler/codegen-tester.h | 4 +- test/cctest/compiler/function-tester.cc | 9 +- test/cctest/compiler/function-tester.h | 4 +- test/cctest/compiler/test-code-assembler.cc | 2 +- test/cctest/compiler/test-code-generator.cc | 47 +- .../test-concurrent-shared-function-info.cc | 2 +- test/cctest/compiler/test-multiple-return.cc | 35 +- test/cctest/compiler/test-run-native-calls.cc | 9 +- .../cctest/heap/test-concurrent-allocation.cc | 10 +- test/cctest/heap/test-heap.cc | 54 +- test/cctest/test-accessor-assembler.cc | 25 +- test/cctest/test-api.cc | 22 +- test/cctest/test-assembler-arm.cc | 91 +-- test/cctest/test-assembler-arm64.cc | 4 +- test/cctest/test-assembler-ia32.cc | 46 +- test/cctest/test-cpu-profiler.cc | 94 +-- test/cctest/test-disasm-regex-helper.cc | 2 +- test/cctest/test-field-type-tracking.cc | 34 +- test/cctest/test-heap-profiler.cc | 6 +- test/cctest/test-macro-assembler-arm.cc | 4 +- test/cctest/test-profile-generator.cc | 159 +++-- test/cctest/test-ptr-compr-cage.cc | 4 +- test/cctest/test-serialize.cc | 4 +- test/cctest/test-sync-primitives-arm64.cc | 4 +- test/cctest/test-unwinder-code-pages.cc | 4 +- test/cctest/wasm/wasm-run-utils.cc | 5 +- test/cctest/wasm/wasm-run-utils.h | 8 +- test/common/call-tester.h | 5 +- test/common/code-assembler-tester.h | 6 +- test/fuzzer/multi-return.cc | 6 +- test/mjsunit/tools/tickprocessor.mjs | 4 +- .../assembler/assembler-x64-unittest.cc | 44 +- .../assembler/disasm-ia32-unittest.cc | 2 +- .../assembler/disasm-x64-unittest.cc | 4 +- .../macro-assembler-arm64-unittest.cc | 2 +- .../assembler/macro-assembler-x64-unittest.cc | 2 +- .../assembler/turbo-assembler-arm-unittest.cc | 2 +- .../turbo-assembler-arm64-unittest.cc | 2 +- .../unittests/codegen/code-layout-unittest.cc | 4 +- test/unittests/codegen/code-pages-unittest.cc | 12 +- test/unittests/codegen/factory-unittest.cc | 6 +- test/unittests/compiler/codegen-tester.h | 4 +- test/unittests/compiler/function-tester.cc | 6 +- test/unittests/compiler/function-tester.h | 5 +- .../run-bytecode-graph-builder-unittest.cc | 2 +- .../compiler/run-tail-calls-unittest.cc | 16 +- test/unittests/parser/parsing-unittest.cc | 12 +- test/unittests/regexp/regexp-unittest.cc | 25 +- test/unittests/torque/torque-unittest.cc | 2 +- tools/gen-postmortem-metadata.py | 652 ++++++++++++------ tools/torque/vim-torque/syntax/torque.vim | 2 +- tools/v8heapconst.py | 34 +- 273 files changed, 2997 insertions(+), 2376 deletions(-) diff --git a/src/baseline/baseline-batch-compiler.cc b/src/baseline/baseline-batch-compiler.cc index b84df2f3f5..edd9a23588 100644 --- a/src/baseline/baseline-batch-compiler.cc +++ b/src/baseline/baseline-batch-compiler.cc @@ -53,7 +53,7 @@ class BaselineCompilerTask { compiler.GenerateCode(); maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle( compiler.Build(local_isolate)); - Handle code; + Handle code; if (maybe_code_.ToHandle(&code)) { local_isolate->heap()->RegisterCodeObject(code); } @@ -63,7 +63,7 @@ class BaselineCompilerTask { // Executed in the main thread. void Install(Isolate* isolate) { shared_function_info_->set_is_sparkplug_compiling(false); - Handle code; + Handle code; if (!maybe_code_.ToHandle(&code)) return; if (v8_flags.print_code) { code->Print(); @@ -97,7 +97,7 @@ class BaselineCompilerTask { private: Handle shared_function_info_; Handle bytecode_; - MaybeHandle maybe_code_; + MaybeHandle maybe_code_; double time_taken_ms_; }; diff --git a/src/baseline/baseline-compiler.cc b/src/baseline/baseline-compiler.cc index 25123cb7cd..29a46eebd8 100644 --- a/src/baseline/baseline-compiler.cc +++ b/src/baseline/baseline-compiler.cc @@ -337,7 +337,8 @@ void BaselineCompiler::GenerateCode() { } } -MaybeHandle BaselineCompiler::Build(LocalIsolate* local_isolate) { +MaybeHandle BaselineCompiler::Build( + LocalIsolate* local_isolate) { CodeDesc desc; __ GetCode(local_isolate->GetMainThreadIsolateUnsafe(), &desc); diff --git a/src/baseline/baseline-compiler.h b/src/baseline/baseline-compiler.h index 7e8a5725d5..40b5469074 100644 --- a/src/baseline/baseline-compiler.h +++ b/src/baseline/baseline-compiler.h @@ -58,7 +58,7 @@ class BaselineCompiler { Handle bytecode); void GenerateCode(); - MaybeHandle Build(LocalIsolate* local_isolate); + MaybeHandle Build(LocalIsolate* local_isolate); static int EstimateInstructionSize(BytecodeArray bytecode); private: diff --git a/src/baseline/baseline.cc b/src/baseline/baseline.cc index 03ed08a6d9..640170297c 100644 --- a/src/baseline/baseline.cc +++ b/src/baseline/baseline.cc @@ -56,14 +56,14 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { return true; } -MaybeHandle GenerateBaselineCode(Isolate* isolate, - Handle shared) { +MaybeHandle GenerateBaselineCode( + Isolate* isolate, Handle shared) { RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline); Handle bytecode(shared->GetBytecodeArray(isolate), isolate); LocalIsolate* local_isolate = isolate->main_thread_local_isolate(); baseline::BaselineCompiler compiler(local_isolate, shared, bytecode); compiler.GenerateCode(); - MaybeHandle code = compiler.Build(local_isolate); + MaybeHandle code = compiler.Build(local_isolate); if (v8_flags.print_code && !code.is_null()) { code.ToHandleChecked()->Print(); } @@ -86,8 +86,8 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { return false; } -MaybeHandle GenerateBaselineCode(Isolate* isolate, - Handle shared) { +MaybeHandle GenerateBaselineCode( + Isolate* isolate, Handle shared) { UNREACHABLE(); } diff --git a/src/baseline/baseline.h b/src/baseline/baseline.h index 10a6e25e4f..d36d71752d 100644 --- a/src/baseline/baseline.h +++ b/src/baseline/baseline.h @@ -10,14 +10,14 @@ namespace v8 { namespace internal { -class Code; +class InstructionStream; class SharedFunctionInfo; class MacroAssembler; bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared); -MaybeHandle GenerateBaselineCode(Isolate* isolate, - Handle shared); +MaybeHandle GenerateBaselineCode( + Isolate* isolate, Handle shared); void EmitReturnBaseline(MacroAssembler* masm); diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc index d6680be5e2..adb6081f4b 100644 --- a/src/builtins/arm/builtins-arm.cc +++ b/src/builtins/arm/builtins-arm.cc @@ -1715,8 +1715,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a Code object, it must NOT - // be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is a InstructionStream object, + // it must NOT be marked_for_deoptimization (callers must ensure this). __ cmp(maybe_target_code, Operand(Smi::zero())); __ b(ne, &jump_to_optimized_code); } @@ -1759,16 +1759,20 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, __ LeaveFrame(StackFrame::STUB); } - __ LoadCodeDataContainerCodeNonBuiltin(r0, r0); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(r0, r0); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ ldr(r1, - FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset)); + __ ldr( + r1, + FieldMemOperand( + r0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset)); { ConstantPoolUnavailableScope constant_pool_unavailable(masm); - __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start + __ add(r0, r0, + Operand(InstructionStream::kHeaderSize - + kHeapObjectTag)); // InstructionStream start // Load the OSR entrypoint offset from the deoptimization data. // = [#header_size + #osr_pc_offset] @@ -2000,7 +2004,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments( } // namespace // static -// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs +// TODO(v8:11615): Observe InstructionStream::kMaxArguments in +// CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -3262,8 +3267,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { // The sole purpose of DirectCEntry is for movable callers (e.g. any general - // purpose Code object) to be able to call into C functions that may trigger - // GC and thus move the caller. + // purpose InstructionStream object) to be able to call into C functions that + // may trigger GC and thus move the caller. // // DirectCEntry places the return address on the stack (updated by the GC), // making the call GC safe. The irregexp backend relies on this. @@ -3557,7 +3562,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register closure = r1; __ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - // Get the Code object from the shared function info. + // Get the InstructionStream object from the shared function info. Register code_obj = r4; __ ldr(code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); @@ -3588,7 +3593,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, if (v8_flags.debug_code) { AssertCodeDataContainerIsBaseline(masm, code_obj, r3); } - __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj); // Load the feedback vector. Register feedback_vector = r2; @@ -3663,9 +3668,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, UseScratchRegisterScope temps(masm); ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire()); Generate_OSREntry(masm, code_obj, - Operand(Code::kHeaderSize - kHeapObjectTag)); + Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } else { - __ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ add(code_obj, code_obj, + Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); __ Jump(code_obj); } __ Trap(); // Unreachable. diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index 8abe1e749c..fdaf1c7bce 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -1952,8 +1952,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a Code object, it must NOT - // be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is a InstructionStream object, + // it must NOT be marked_for_deoptimization (callers must ensure this). __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code); } @@ -1994,13 +1994,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, __ LeaveFrame(StackFrame::STUB); } - __ LoadCodeDataContainerCodeNonBuiltin(x0, x0); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(x0, x0); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] __ LoadTaggedPointerField( x1, - FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset)); + FieldMemOperand( + x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset)); // Load the OSR entrypoint offset from the deoptimization data. // = [#header_size + #osr_pc_offset] @@ -2011,7 +2012,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Compute the target address = code_obj + header_size + osr_offset // = + #header_size + __ Add(x0, x0, x1); - Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag); + Generate_OSREntry(masm, x0, InstructionStream::kHeaderSize - kHeapObjectTag); } } // namespace @@ -2333,7 +2334,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, } // namespace // static -// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs +// TODO(v8:11615): Observe InstructionStream::kMaxArguments in +// CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -5377,8 +5379,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { // The sole purpose of DirectCEntry is for movable callers (e.g. any general - // purpose Code object) to be able to call into C functions that may trigger - // GC and thus move the caller. + // purpose InstructionStream object) to be able to call into C functions that + // may trigger GC and thus move the caller. // // DirectCEntry places the return address on the stack (updated by the GC), // making the call GC safe. The irregexp backend relies on this. @@ -5693,7 +5695,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register closure = x1; __ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - // Get the Code object from the shared function info. + // Get the InstructionStream object from the shared function info. Register code_obj = x22; __ LoadTaggedPointerField( code_obj, @@ -5726,7 +5728,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, if (v8_flags.debug_code) { AssertCodeDataContainerIsBaseline(masm, code_obj, x3); } - __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj); // Load the feedback vector. Register feedback_vector = x2; @@ -5799,9 +5801,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, if (is_osr) { ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag); + Generate_OSREntry(masm, code_obj, + InstructionStream::kHeaderSize - kHeapObjectTag); } else { - __ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); + __ Add(code_obj, code_obj, InstructionStream::kHeaderSize - kHeapObjectTag); __ Jump(code_obj); } __ Trap(); // Unreachable. diff --git a/src/builtins/base.tq b/src/builtins/base.tq index 970c8362eb..f2c22763cd 100644 --- a/src/builtins/base.tq +++ b/src/builtins/base.tq @@ -229,7 +229,7 @@ type RawPtr generates 'TNode' constexpr 'Address'; type RawPtr extends RawPtr; type ExternalPointer generates 'TNode' constexpr 'ExternalPointer_t'; -extern class Code extends HeapObject; +extern class InstructionStream extends HeapObject; type BuiltinPtr extends Smi generates 'TNode'; type Number = Smi|HeapNumber; diff --git a/src/builtins/builtins-lazy-gen.cc b/src/builtins/builtins-lazy-gen.cc index 400dbde9e7..1dc3f73fe9 100644 --- a/src/builtins/builtins-lazy-gen.cc +++ b/src/builtins/builtins-lazy-gen.cc @@ -125,9 +125,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value)); Goto(&maybe_use_sfi_code); - // At this point we have a candidate Code object. It's *not* a cached - // optimized Code object (we'd have tail-called it above). A usual case would - // be the InterpreterEntryTrampoline to start executing existing bytecode. + // At this point we have a candidate InstructionStream object. It's *not* a + // cached optimized InstructionStream object (we'd have tail-called it above). + // A usual case would be the InterpreterEntryTrampoline to start executing + // existing bytecode. BIND(&maybe_use_sfi_code); Label tailcall_code(this), baseline(this); TVARIABLE(CodeDataContainer, code); diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc index e34044d38d..cdcd56d58d 100644 --- a/src/builtins/builtins.cc +++ b/src/builtins/builtins.cc @@ -285,7 +285,7 @@ Address Builtins::CppEntryOf(Builtin builtin) { } // static -bool Builtins::IsBuiltin(const Code code) { +bool Builtins::IsBuiltin(const InstructionStream code) { return Builtins::IsBuiltinId(code.builtin_id()); } @@ -399,7 +399,7 @@ constexpr int OffHeapTrampolineGenerator::kBufferSize; } // namespace // static -Handle Builtins::GenerateOffHeapTrampolineFor( +Handle Builtins::GenerateOffHeapTrampolineFor( Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags, bool generate_jump_to_instruction_stream) { DCHECK_NOT_NULL(isolate->embedded_blob_code()); @@ -429,14 +429,14 @@ Handle Builtins::GenerateOffHeapTrampolineRelocInfo( Handle reloc_info = isolate->factory()->NewByteArray( desc.reloc_size, AllocationType::kReadOnly); - Code::CopyRelocInfoToByteArray(*reloc_info, desc); + InstructionStream::CopyRelocInfoToByteArray(*reloc_info, desc); return reloc_info; } // static -Handle Builtins::CreateInterpreterEntryTrampolineForProfiling( - Isolate* isolate) { +Handle +Builtins::CreateInterpreterEntryTrampolineForProfiling(Isolate* isolate) { DCHECK_NOT_NULL(isolate->embedded_blob_code()); DCHECK_NE(0, isolate->embedded_blob_code_size()); @@ -525,18 +525,19 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle target, // static bool Builtins::CodeObjectIsExecutable(Builtin builtin) { // If the runtime/optimized code always knows when executing a given builtin - // that it is a builtin, then that builtin does not need an executable Code - // object. Such Code objects can go in read_only_space (and can even be - // smaller with no branch instruction), thus saving memory. + // that it is a builtin, then that builtin does not need an executable + // InstructionStream object. Such InstructionStream objects can go in + // read_only_space (and can even be smaller with no branch instruction), thus + // saving memory. - // Builtins with JS linkage will always have executable Code objects since - // they can be called directly from jitted code with no way of determining - // that they are builtins at generation time. E.g. + // Builtins with JS linkage will always have executable InstructionStream + // objects since they can be called directly from jitted code with no way of + // determining that they are builtins at generation time. E.g. // f = Array.of; // f(1, 2, 3); // TODO(delphick): This is probably too loose but for now Wasm can call any JS - // linkage builtin via its Code object. Once Wasm is fixed this can either be - // tighted or removed completely. + // linkage builtin via its InstructionStream object. Once Wasm is fixed this + // can either be tighted or removed completely. if (Builtins::KindOf(builtin) != BCH && HasJSLinkage(builtin)) { return true; } diff --git a/src/builtins/builtins.h b/src/builtins/builtins.h index ba8e393a67..d8ec589fe1 100644 --- a/src/builtins/builtins.h +++ b/src/builtins/builtins.h @@ -175,8 +175,8 @@ class Builtins { static bool IsCpp(Builtin builtin); // True, iff the given code object is a builtin. Note that this does not - // necessarily mean that its kind is Code::BUILTIN. - static bool IsBuiltin(const Code code); + // necessarily mean that its kind is InstructionStream::BUILTIN. + static bool IsBuiltin(const InstructionStream code); // As above, but safe to access off the main thread since the check is done // by handle location. Similar to Heap::IsRootHandle. @@ -232,7 +232,7 @@ class Builtins { // function. // TODO(delphick): Come up with a better name since it may not generate an // executable trampoline. - static Handle GenerateOffHeapTrampolineFor( + static Handle GenerateOffHeapTrampolineFor( Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags, bool generate_jump_to_instruction_stream); @@ -241,12 +241,12 @@ class Builtins { static Handle GenerateOffHeapTrampolineRelocInfo(Isolate* isolate); // Creates a copy of InterpreterEntryTrampolineForProfiling in the code space. - static Handle CreateInterpreterEntryTrampolineForProfiling( + static Handle CreateInterpreterEntryTrampolineForProfiling( Isolate* isolate); // Only builtins with JS linkage should ever need to be called via their - // trampoline Code object. The remaining builtins have non-executable Code - // objects. + // trampoline InstructionStream object. The remaining builtins have + // non-executable InstructionStream objects. static bool CodeObjectIsExecutable(Builtin builtin); static bool IsJSEntryVariant(Builtin builtin) { @@ -336,8 +336,8 @@ class Builtins { }; V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) { - // Check for kNoBuiltinId first to abort early when the current Code object - // is not a builtin. + // Check for kNoBuiltinId first to abort early when the current + // InstructionStream object is not a builtin. return builtin_id != Builtin::kNoBuiltinId && (builtin_id == Builtin::kInterpreterEntryTrampoline || builtin_id == Builtin::kInterpreterEnterAtBytecode || @@ -345,8 +345,8 @@ V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) { } V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) { - // Check for kNoBuiltinId first to abort early when the current Code object - // is not a builtin. + // Check for kNoBuiltinId first to abort early when the current + // InstructionStream object is not a builtin. return builtin_id != Builtin::kNoBuiltinId && (builtin_id == Builtin::kBaselineOutOfLinePrologue || builtin_id == Builtin::kBaselineOutOfLinePrologueDeopt || diff --git a/src/builtins/cast.tq b/src/builtins/cast.tq index 0d347e3dd3..1f26291d49 100644 --- a/src/builtins/cast.tq +++ b/src/builtins/cast.tq @@ -30,8 +30,8 @@ macro IsCell(o: HeapObject): bool { } @export -macro IsCode(o: HeapObject): bool { - return Is(o); +macro IsInstructionStream(o: HeapObject): bool { + return Is(o); } @export diff --git a/src/builtins/constants-table-builder.cc b/src/builtins/constants-table-builder.cc index 49002a9c31..c4177172fa 100644 --- a/src/builtins/constants-table-builder.cc +++ b/src/builtins/constants-table-builder.cc @@ -43,7 +43,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle object) { // All code objects should be loaded through the root register or use // pc-relative addressing. - DCHECK(!object->IsCode()); + DCHECK(!object->IsInstructionStream()); #endif auto find_result = map_.FindOrInsert(object); @@ -73,7 +73,7 @@ void CheckPreconditionsForPatching(Isolate* isolate, } // namespace void BuiltinsConstantsTableBuilder::PatchSelfReference( - Handle self_reference, Handle code_object) { + Handle self_reference, Handle code_object) { CheckPreconditionsForPatching(isolate_, code_object); DCHECK(self_reference->IsOddball()); DCHECK(Oddball::cast(*self_reference).kind() == @@ -81,7 +81,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference( uint32_t key; if (map_.Delete(self_reference, &key)) { - DCHECK(code_object->IsCode()); + DCHECK(code_object->IsInstructionStream()); map_.Insert(code_object, key); } } @@ -115,12 +115,13 @@ void BuiltinsConstantsTableBuilder::Finalize() { for (auto it = it_scope.begin(); it != it_scope.end(); ++it) { uint32_t index = *it.entry(); Object value = it.key(); - if (value.IsCode() && Code::cast(value).kind() == CodeKind::BUILTIN) { + if (value.IsInstructionStream() && + InstructionStream::cast(value).kind() == CodeKind::BUILTIN) { // Replace placeholder code objects with the real builtin. // See also: SetupIsolateDelegate::PopulateWithPlaceholders. // TODO(jgruber): Deduplicate placeholders and their corresponding // builtin. - value = builtins->code(Code::cast(value).builtin_id()); + value = builtins->code(InstructionStream::cast(value).builtin_id()); } DCHECK(value.IsHeapObject()); table->set(index, value); diff --git a/src/builtins/constants-table-builder.h b/src/builtins/constants-table-builder.h index 56547a445e..e8d79202ef 100644 --- a/src/builtins/constants-table-builder.h +++ b/src/builtins/constants-table-builder.h @@ -33,10 +33,10 @@ class BuiltinsConstantsTableBuilder final { uint32_t AddObject(Handle object); // Self-references during code generation start out by referencing a handle - // with a temporary dummy object. Once the final Code object exists, such - // entries in the constants map must be patched up. + // with a temporary dummy object. Once the final InstructionStream object + // exists, such entries in the constants map must be patched up. void PatchSelfReference(Handle self_reference, - Handle code_object); + Handle code_object); // References to the array that stores basic block usage counters start out as // references to a unique oddball. Once the actual array has been allocated, diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc index 75b21fd37f..31be041290 100644 --- a/src/builtins/ia32/builtins-ia32.cc +++ b/src/builtins/ia32/builtins-ia32.cc @@ -2052,7 +2052,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments( } // namespace // static -// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs +// TODO(v8:11615): Observe InstructionStream::kMaxArguments in +// CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -2683,8 +2684,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a Code object, it must NOT - // be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is a InstructionStream object, + // it must NOT be marked_for_deoptimization (callers must ensure this). __ cmp(maybe_target_code, Immediate(0)); __ j(not_equal, &jump_to_optimized_code, Label::kNear); } @@ -2727,11 +2728,13 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, __ leave(); } - __ LoadCodeDataContainerCodeNonBuiltin(eax, eax); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(eax, eax); // Load deoptimization data from the code object. - __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset - - kHeapObjectTag)); + __ mov(ecx, + Operand(eax, + InstructionStream::kDeoptimizationDataOrInterpreterDataOffset - + kHeapObjectTag)); // Load the OSR entrypoint offset from the deoptimization data. __ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt( @@ -2740,7 +2743,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, __ SmiUntag(ecx); // Compute the target address = code_obj + header_size + osr_offset - __ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag)); + __ lea(eax, Operand(eax, ecx, times_1, + InstructionStream::kHeaderSize - kHeapObjectTag)); Generate_OSREntry(masm, eax); } @@ -4035,7 +4039,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function. __ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(static_cast(deopt_kind))); - __ mov(Operand(esp, 2 * kSystemPointerSize), ecx); // Code address or 0. + __ mov(Operand(esp, 2 * kSystemPointerSize), + ecx); // InstructionStream address or 0. __ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta. __ Move(Operand(esp, 4 * kSystemPointerSize), Immediate(ExternalReference::isolate_address(masm->isolate()))); @@ -4197,7 +4202,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register closure = eax; __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset)); - // Get the Code object from the shared function info. + // Get the InstructionStream object from the shared function info. Register code_obj = esi; __ mov(code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); @@ -4230,7 +4235,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, if (v8_flags.debug_code) { AssertCodeDataContainerIsBaseline(masm, code_obj, ecx); } - __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj); // Load the feedback vector. Register feedback_vector = ecx; @@ -4296,8 +4301,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, kInterpreterBytecodeArrayRegister); __ CallCFunction(get_baseline_pc, 3); } - __ lea(code_obj, - FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize)); + __ lea(code_obj, FieldOperand(code_obj, kReturnRegister0, times_1, + InstructionStream::kHeaderSize)); __ pop(kInterpreterAccumulatorRegister); if (is_osr) { diff --git a/src/builtins/setup-builtins-internal.cc b/src/builtins/setup-builtins-internal.cc index 08a71f0d39..05860d8b20 100644 --- a/src/builtins/setup-builtins-internal.cc +++ b/src/builtins/setup-builtins-internal.cc @@ -52,8 +52,9 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) { // PC-relative call/jump instructions can be used for builtin to builtin // calls/tail calls. The embedded builtins blob generator also ensures that. // However, there are serializer tests, where we force isolate creation at - // runtime and at this point, Code space isn't restricted to a size s.t. - // PC-relative calls may be used. So, we fall back to an indirect mode. + // runtime and at this point, Code space isn't restricted to a + // size s.t. PC-relative calls may be used. So, we fall back to an indirect + // mode. options.use_pc_relative_calls_and_jumps_for_mksnapshot = pc_relative_calls_fit_in_code_range; @@ -75,7 +76,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) { using MacroAssemblerGenerator = void (*)(MacroAssembler*); using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*); -Handle BuildPlaceholder(Isolate* isolate, Builtin builtin) { +Handle BuildPlaceholder(Isolate* isolate, Builtin builtin) { HandleScope scope(isolate); byte buffer[kBufferSize]; MacroAssembler masm(isolate, CodeObjectRequired::kYes, @@ -90,16 +91,17 @@ Handle BuildPlaceholder(Isolate* isolate, Builtin builtin) { } CodeDesc desc; masm.GetCode(isolate, &desc); - Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) - .set_self_reference(masm.CodeObject()) - .set_builtin(builtin) - .Build(); + Handle code = + Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) + .set_self_reference(masm.CodeObject()) + .set_builtin(builtin) + .Build(); return scope.CloseAndEscape(code); } -Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin, - MacroAssemblerGenerator generator, - const char* s_name) { +InstructionStream BuildWithMacroAssembler(Isolate* isolate, Builtin builtin, + MacroAssemblerGenerator generator, + const char* s_name) { HandleScope scope(isolate); // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. @@ -130,18 +132,19 @@ Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin, masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable, handler_table_offset); - Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) - .set_self_reference(masm.CodeObject()) - .set_builtin(builtin) - .Build(); + Handle code = + Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) + .set_self_reference(masm.CodeObject()) + .set_builtin(builtin) + .Build(); #if defined(V8_OS_WIN64) isolate->SetBuiltinUnwindData(builtin, masm.GetUnwindInfo()); #endif // V8_OS_WIN64 return *code; } -Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address, - const char* name) { +InstructionStream BuildAdaptor(Isolate* isolate, Builtin builtin, + Address builtin_address, const char* name) { HandleScope scope(isolate); // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. @@ -155,17 +158,19 @@ Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address, Builtins::Generate_Adaptor(&masm, builtin_address); CodeDesc desc; masm.GetCode(isolate, &desc); - Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) - .set_self_reference(masm.CodeObject()) - .set_builtin(builtin) - .Build(); + Handle code = + Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) + .set_self_reference(masm.CodeObject()) + .set_builtin(builtin) + .Build(); return *code; } // Builder for builtins implemented in TurboFan with JS linkage. -Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin, - CodeAssemblerGenerator generator, int argc, - const char* name) { +InstructionStream BuildWithCodeStubAssemblerJS(Isolate* isolate, + Builtin builtin, + CodeAssemblerGenerator generator, + int argc, const char* name) { HandleScope scope(isolate); // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. @@ -175,17 +180,16 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin, compiler::CodeAssemblerState state(isolate, &zone, argc, CodeKind::BUILTIN, name, builtin); generator(&state); - Handle code = compiler::CodeAssembler::GenerateCode( + Handle code = compiler::CodeAssembler::GenerateCode( &state, BuiltinAssemblerOptions(isolate, builtin), ProfileDataFromFile::TryRead(name)); return *code; } // Builder for builtins implemented in TurboFan with CallStub linkage. -Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin, - CodeAssemblerGenerator generator, - CallDescriptors::Key interface_descriptor, - const char* name) { +InstructionStream BuildWithCodeStubAssemblerCS( + Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator, + CallDescriptors::Key interface_descriptor, const char* name) { HandleScope scope(isolate); // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. @@ -199,7 +203,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin, compiler::CodeAssemblerState state(isolate, &zone, descriptor, CodeKind::BUILTIN, name, builtin); generator(&state); - Handle code = compiler::CodeAssembler::GenerateCode( + Handle code = compiler::CodeAssembler::GenerateCode( &state, BuiltinAssemblerOptions(isolate, builtin), ProfileDataFromFile::TryRead(name)); return *code; @@ -209,7 +213,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin, // static void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin, - Code code) { + InstructionStream code) { DCHECK_EQ(builtin, code.builtin_id()); builtins->set_code(builtin, ToCodeDataContainer(code)); } @@ -223,7 +227,7 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) { HandleScope scope(isolate); for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast; ++builtin) { - Handle placeholder = BuildPlaceholder(isolate, builtin); + Handle placeholder = BuildPlaceholder(isolate, builtin); AddBuiltin(builtins, builtin, *placeholder); } } @@ -242,14 +246,15 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { PtrComprCageBase cage_base(isolate); for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast; ++builtin) { - Code code = FromCodeDataContainer(builtins->code(builtin)); + InstructionStream code = FromCodeDataContainer(builtins->code(builtin)); isolate->heap()->UnprotectAndRegisterMemoryChunk( code, UnprotectMemoryOrigin::kMainThread); bool flush_icache = false; for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) { RelocInfo* rinfo = it.rinfo(); if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) { - Code target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + InstructionStream target = InstructionStream::GetCodeFromTargetAddress( + rinfo->target_address()); DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()), Builtins::IsIsolateIndependent(target.builtin_id())); if (!target.is_builtin()) continue; @@ -277,11 +282,11 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { namespace { -Code GenerateBytecodeHandler(Isolate* isolate, Builtin builtin, - interpreter::OperandScale operand_scale, - interpreter::Bytecode bytecode) { +InstructionStream GenerateBytecodeHandler( + Isolate* isolate, Builtin builtin, interpreter::OperandScale operand_scale, + interpreter::Bytecode bytecode) { DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); - Handle code = interpreter::GenerateBytecodeHandler( + Handle code = interpreter::GenerateBytecodeHandler( isolate, Builtins::name(builtin), bytecode, operand_scale, builtin, BuiltinAssemblerOptions(isolate, builtin)); return *code; @@ -300,7 +305,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { HandleScope scope(isolate); int index = 0; - Code code; + InstructionStream code; #define BUILD_CPP(Name) \ code = BuildAdaptor(isolate, Builtin::k##Name, \ FUNCTION_ADDR(Builtin_##Name), #Name); \ diff --git a/src/builtins/wasm.tq b/src/builtins/wasm.tq index a56fc22efc..27184f49e3 100644 --- a/src/builtins/wasm.tq +++ b/src/builtins/wasm.tq @@ -504,7 +504,7 @@ builtin WasmI64AtomicWait( // Type feedback collection support for `call_ref`. -extern macro GetCodeEntry(Code): RawPtr; +extern macro GetCodeEntry(InstructionStream): RawPtr; extern macro GetCodeEntry(CodeDataContainer): RawPtr; struct TargetAndInstance { diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index 9cf3eeba2a..b53b103876 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -2046,7 +2046,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments( } // namespace // static -// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs +// TODO(v8:11615): Observe InstructionStream::kMaxArguments in +// CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -2621,8 +2622,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a Code object, it must NOT - // be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is a InstructionStream object, + // it must NOT be marked_for_deoptimization (callers must ensure this). __ testq(maybe_target_code, maybe_target_code); __ j(not_equal, &jump_to_optimized_code, Label::kNear); } @@ -2673,13 +2674,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, __ leave(); } - __ LoadCodeDataContainerCodeNonBuiltin(rax, rax); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(rax, rax); // Load deoptimization data from the code object. const TaggedRegister deopt_data(rbx); __ LoadTaggedPointerField( deopt_data, - FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset)); + FieldOperand( + rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset)); // Load the OSR entrypoint offset from the deoptimization data. __ SmiUntagField( @@ -2688,7 +2690,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, DeoptimizationData::kOsrPcOffsetIndex))); // Compute the target address = code_obj + header_size + osr_offset - __ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize)); + __ leaq(rax, FieldOperand(rax, rbx, times_1, InstructionStream::kHeaderSize)); Generate_OSREntry(masm, rax); } @@ -2772,13 +2774,14 @@ void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) { // before deoptimizing. { static constexpr int kCodeStartToCodeDataContainerOffset = - Code::kCodeDataContainerOffset - Code::kHeaderSize; + InstructionStream::kCodeDataContainerOffset - + InstructionStream::kHeaderSize; __ LoadTaggedPointerField(scratch0, Operand(kJavaScriptCallCodeStartRegister, kCodeStartToCodeDataContainerOffset)); __ testl( FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); + Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit)); __ j(not_zero, &deoptimize); } @@ -5330,7 +5333,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register closure = rdi; __ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); - // Get the Code object from the shared function info. + // Get the InstructionStream object from the shared function info. Register code_obj = rbx; TaggedRegister shared_function_info(code_obj); __ LoadTaggedPointerField( @@ -5364,7 +5367,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, if (v8_flags.debug_code) { AssertCodeDataContainerIsBaseline(masm, code_obj, r11); } - __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); + __ LoadCodeDataContainerInstructionStreamNonBuiltin(code_obj, code_obj); // Load the feedback vector. Register feedback_vector = r11; @@ -5431,8 +5434,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ movq(arg_reg_3, kInterpreterBytecodeArrayRegister); __ CallCFunction(get_baseline_pc, 3); } - __ leaq(code_obj, - FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize)); + __ leaq(code_obj, FieldOperand(code_obj, kReturnRegister0, times_1, + InstructionStream::kHeaderSize)); __ popq(kInterpreterAccumulatorRegister); if (is_osr) { diff --git a/src/codegen/arm/assembler-arm-inl.h b/src/codegen/arm/assembler-arm-inl.h index c92eb18466..945acfb187 100644 --- a/src/codegen/arm/assembler-arm-inl.h +++ b/src/codegen/arm/assembler-arm-inl.h @@ -190,7 +190,7 @@ void Assembler::emit(Instr x) { } void Assembler::deserialization_set_special_target_at( - Address constant_pool_entry, Code code, Address target) { + Address constant_pool_entry, InstructionStream code, Address target) { DCHECK(!Builtins::IsIsolateIndependentBuiltin(code)); Memory
(constant_pool_entry) = target; } diff --git a/src/codegen/arm/assembler-arm.cc b/src/codegen/arm/assembler-arm.cc index 3fe769a0ec..8e4e1171e9 100644 --- a/src/codegen/arm/assembler-arm.cc +++ b/src/codegen/arm/assembler-arm.cc @@ -553,13 +553,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilder* safepoint_table_builder, int handler_table_offset) { // As a crutch to avoid having to add manual Align calls wherever we use a - // raw workflow to create Code objects (mostly in tests), add another Align - // call here. It does no harm - the end of the Code object is aligned to the - // (larger) kCodeAlignment anyways. + // raw workflow to create InstructionStream objects (mostly in tests), add + // another Align call here. It does no harm - the end of the InstructionStream + // object is aligned to the (larger) kCodeAlignment anyways. // TODO(jgruber): Consider moving responsibility for proper alignment to // metadata table builders (safepoint, handler, constant pool, code // comments). - DataAlign(Code::kMetadataAlignment); + DataAlign(InstructionStream::kMetadataAlignment); // Emit constant pool if necessary. CheckConstPool(true, false); @@ -831,7 +831,8 @@ void Assembler::target_at_put(int pos, int target_pos) { // orr dst, dst, #target8_1 << 8 // orr dst, dst, #target8_2 << 16 - uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag); + uint32_t target24 = + target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag); CHECK(is_uint24(target24)); if (is_uint8(target24)) { // If the target fits in a byte then only patch with a mov @@ -1635,7 +1636,8 @@ void Assembler::mov(Register dst, Register src, SBit s, Condition cond) { void Assembler::mov_label_offset(Register dst, Label* label) { if (label->is_bound()) { - mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag))); + mov(dst, Operand(label->pos() + + (InstructionStream::kHeaderSize - kHeapObjectTag))); } else { // Emit the link to the label in the code stream followed by extra nop // instructions. @@ -5252,7 +5254,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { if (!ShouldRecordRelocInfo(rmode)) return; DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here - RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, + InstructionStream()); reloc_info_writer.Write(&rinfo); } diff --git a/src/codegen/arm/assembler-arm.h b/src/codegen/arm/assembler-arm.h index 2c45c48a1c..6e65ca4f89 100644 --- a/src/codegen/arm/assembler-arm.h +++ b/src/codegen/arm/assembler-arm.h @@ -367,7 +367,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // This sets the branch destination (which is in the constant pool on ARM). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( - Address constant_pool_entry, Code code, Address target); + Address constant_pool_entry, InstructionStream code, Address target); // Get the size of the special target encoded at 'location'. inline static int deserialization_special_target_size(Address location); @@ -388,7 +388,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } // --------------------------------------------------------------------------- - // Code generation + // InstructionStream generation // Insert the smallest number of nop instructions // possible to align the pc offset to a multiple @@ -1252,7 +1252,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { inline void emit(Instr x); - // Code generation + // InstructionStream generation // The relocation writer's position is at least kGap bytes below the end of // the generated instructions. This is so that multi-instruction sequences do // not have to check for overflow. The same is true for writes of large diff --git a/src/codegen/arm/macro-assembler-arm.cc b/src/codegen/arm/macro-assembler-arm.cc index 79738e006c..d7653c1ad9 100644 --- a/src/codegen/arm/macro-assembler-arm.cc +++ b/src/codegen/arm/macro-assembler-arm.cc @@ -348,13 +348,14 @@ void TurboAssembler::LoadCodeDataContainerEntry( CodeDataContainer::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin( +void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); - // Compute the Code object pointer from the code entry point. + // Compute the InstructionStream object pointer from the code entry point. ldr(destination, FieldMemOperand(code_data_container_object, CodeDataContainer::kCodeEntryPointOffset)); - sub(destination, destination, Operand(Code::kHeaderSize - kHeapObjectTag)); + sub(destination, destination, + Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } void TurboAssembler::CallCodeDataContainerObject( @@ -379,9 +380,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // - // Note that this assumes the caller code (i.e. the Code object currently - // being generated) is immovable or that the callee function cannot trigger - // GC, since the callee function will return to it. + // Note that this assumes the caller code (i.e. the InstructionStream object + // currently being generated) is immovable or that the callee function cannot + // trigger GC, since the callee function will return to it. // Compute the return address in lr to return to after the jump below. The pc // is already at '+ 8' from the current instruction; but return is after three @@ -408,7 +409,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization( Register code_data_container, Register scratch) { ldr(scratch, FieldMemOperand(code_data_container, CodeDataContainer::kKindSpecificFlagsOffset)); - tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit)); + tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit)); } Operand MacroAssembler::ClearedValue() const { diff --git a/src/codegen/arm/macro-assembler-arm.h b/src/codegen/arm/macro-assembler-arm.h index dce5db1a5a..eb111aa358 100644 --- a/src/codegen/arm/macro-assembler-arm.h +++ b/src/codegen/arm/macro-assembler-arm.h @@ -327,11 +327,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void LoadCodeDataContainerEntry(Register destination, Register code_data_container_object); // Load code entry point from the CodeDataContainer object and compute - // Code object pointer out of it. Must not be used for CodeDataContainers - // corresponding to builtins, because their entry points values point to - // the embedded instruction stream in .text section. - void LoadCodeDataContainerCodeNonBuiltin(Register destination, - Register code_data_container_object); + // InstructionStream object pointer out of it. Must not be used for + // CodeDataContainers corresponding to builtins, because their entry points + // values point to the embedded instruction stream in .text section. + void LoadCodeDataContainerInstructionStreamNonBuiltin( + Register destination, Register code_data_container_object); void CallCodeDataContainerObject(Register code_data_container_object); void JumpCodeDataContainerObject(Register code_data_container_object, JumpMode jump_mode = JumpMode::kJump); @@ -912,7 +912,7 @@ struct MoveCycleState { VfpRegList scratch_v_reglist = 0; // Available scratch registers during the move cycle resolution scope. base::Optional temps; - // Code of the scratch register picked by {MoveToTempLocation}. + // InstructionStream of the scratch register picked by {MoveToTempLocation}. int scratch_reg_code = -1; }; diff --git a/src/codegen/arm64/assembler-arm64-inl.h b/src/codegen/arm64/assembler-arm64-inl.h index deacdbeb23..3486606ee8 100644 --- a/src/codegen/arm64/assembler-arm64-inl.h +++ b/src/codegen/arm64/assembler-arm64-inl.h @@ -548,7 +548,7 @@ int Assembler::deserialization_special_target_size(Address location) { } void Assembler::deserialization_set_special_target_at(Address location, - Code code, + InstructionStream code, Address target) { Instruction* instr = reinterpret_cast(location); if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { @@ -661,8 +661,9 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(!HAS_SMI_TAG(compressed)); Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, compressed)); - // Embedding of compressed Code objects must not happen when external code - // space is enabled, because CodeDataContainers must be used instead. + // Embedding of compressed InstructionStream objects must not happen when + // external code space is enabled, because CodeDataContainers must be used + // instead. DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(HeapObject::cast(obj))); return HeapObject::cast(obj); diff --git a/src/codegen/arm64/assembler-arm64.cc b/src/codegen/arm64/assembler-arm64.cc index e73c8be5a8..335566fb39 100644 --- a/src/codegen/arm64/assembler-arm64.cc +++ b/src/codegen/arm64/assembler-arm64.cc @@ -377,13 +377,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset) { // As a crutch to avoid having to add manual Align calls wherever we use a - // raw workflow to create Code objects (mostly in tests), add another Align - // call here. It does no harm - the end of the Code object is aligned to the - // (larger) kCodeAlignment anyways. + // raw workflow to create InstructionStream objects (mostly in tests), add + // another Align call here. It does no harm - the end of the InstructionStream + // object is aligned to the (larger) kCodeAlignment anyways. // TODO(jgruber): Consider moving responsibility for proper alignment to // metadata table builders (safepoint, handler, constant pool, code // comments). - DataAlign(Code::kMetadataAlignment); + DataAlign(InstructionStream::kMetadataAlignment); // Emit constant pool if necessary. ForceConstantPoolEmissionWithoutJump(); @@ -3577,7 +3577,7 @@ Instr Assembler::ImmNEONFP(double imm) { return ImmNEONabcdefgh(FPToImm8(imm)); } -// Code generation helpers. +// InstructionStream generation helpers. void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift, MoveWideImmediateOp mov_op) { // Ignore the top 32 bits of an immediate if we're moving to a W register. @@ -4360,7 +4360,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, DCHECK(constpool_.IsBlocked()); // We do not try to reuse pool constants. - RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, + InstructionStream()); DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here reloc_info_writer.Write(&rinfo); @@ -4486,7 +4487,8 @@ intptr_t Assembler::MaxPCOffsetAfterVeneerPoolIfEmittedNow(size_t margin) { void Assembler::RecordVeneerPool(int location_offset, int size) { Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip); RelocInfo rinfo(reinterpret_cast
(buffer_start_) + location_offset, - RelocInfo::VENEER_POOL, static_cast(size), Code()); + RelocInfo::VENEER_POOL, static_cast(size), + InstructionStream()); reloc_info_writer.Write(&rinfo); } diff --git a/src/codegen/arm64/assembler-arm64.h b/src/codegen/arm64/assembler-arm64.h index 35cf8f8b13..c05834ca15 100644 --- a/src/codegen/arm64/assembler-arm64.h +++ b/src/codegen/arm64/assembler-arm64.h @@ -277,9 +277,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // This sets the branch destination. 'location' here can be either the pc of // an immediate branch or the address of an entry in the constant pool. // This is for calls and branches within generated code. - inline static void deserialization_set_special_target_at(Address location, - Code code, - Address target); + inline static void deserialization_set_special_target_at( + Address location, InstructionStream code, Address target); // Get the size of the special target encoded at 'location'. inline static int deserialization_special_target_size(Address location); @@ -780,12 +779,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void clz(const Register& rd, const Register& rn); void cls(const Register& rd, const Register& rn); - // Pointer Authentication Code for Instruction address, using key B, with - // address in x17 and modifier in x16 [Armv8.3]. + // Pointer Authentication InstructionStream for Instruction address, using key + // B, with address in x17 and modifier in x16 [Armv8.3]. void pacib1716(); - // Pointer Authentication Code for Instruction address, using key B, with - // address in LR and modifier in SP [Armv8.3]. + // Pointer Authentication InstructionStream for Instruction address, using key + // B, with address in LR and modifier in SP [Armv8.3]. void pacibsp(); // Authenticate Instruction address, using key B, with address in x17 and @@ -2088,7 +2087,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { dc64(data); } - // Code generation helpers -------------------------------------------------- + // InstructionStream generation helpers + // -------------------------------------------------- Instruction* pc() const { return Instruction::Cast(pc_); } @@ -2663,7 +2663,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { std::deque internal_reference_positions_; protected: - // Code generation + // InstructionStream generation // The relocation writer's position is at least kGap bytes below the end of // the generated instructions. This is so that multi-instruction sequences do // not have to check for overflow. The same is true for writes of large diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index fc4234868b..36c0127326 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -2360,13 +2360,14 @@ void TurboAssembler::LoadCodeDataContainerEntry( CodeDataContainer::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin( +void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); - // Compute the Code object pointer from the code entry point. + // Compute the InstructionStream object pointer from the code entry point. Ldr(destination, FieldMemOperand(code_data_container_object, CodeDataContainer::kCodeEntryPointOffset)); - Sub(destination, destination, Immediate(Code::kHeaderSize - kHeapObjectTag)); + Sub(destination, destination, + Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } void TurboAssembler::CallCodeDataContainerObject( @@ -2396,9 +2397,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // - // Note that this assumes the caller code (i.e. the Code object currently - // being generated) is immovable or that the callee function cannot trigger - // GC, since the callee function will return to it. + // Note that this assumes the caller code (i.e. the InstructionStream object + // currently being generated) is immovable or that the callee function cannot + // trigger GC, since the callee function will return to it. UseScratchRegisterScope temps(this); temps.Exclude(x16, x17); @@ -2447,13 +2448,15 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) { void TurboAssembler::BailoutIfDeoptimized() { UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + int offset = InstructionStream::kCodeDataContainerOffset - + InstructionStream::kHeaderSize; LoadTaggedPointerField(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); Ldr(scratch.W(), FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); Label not_deoptimized; - Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, ¬_deoptimized); + Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit, + ¬_deoptimized); Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), RelocInfo::CODE_TARGET); Bind(¬_deoptimized); @@ -2691,7 +2694,7 @@ void MacroAssembler::JumpIfCodeDataContainerIsMarkedForDeoptimization( Ldr(scratch.W(), FieldMemOperand(code_data_container, CodeDataContainer::kKindSpecificFlagsOffset)); - Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit, + Tbnz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit, if_marked_for_deoptimization); } diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index 091e61d238..f0c07c9a5d 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -1000,11 +1000,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void LoadCodeDataContainerEntry(Register destination, Register code_data_container_object); // Load code entry point from the CodeDataContainer object and compute - // Code object pointer out of it. Must not be used for CodeDataContainers - // corresponding to builtins, because their entry points values point to - // the embedded instruction stream in .text section. - void LoadCodeDataContainerCodeNonBuiltin(Register destination, - Register code_data_container_object); + // InstructionStream object pointer out of it. Must not be used for + // CodeDataContainers corresponding to builtins, because their entry points + // values point to the embedded instruction stream in .text section. + void LoadCodeDataContainerInstructionStreamNonBuiltin( + Register destination, Register code_data_container_object); void CallCodeDataContainerObject(Register code_data_container_object); void JumpCodeDataContainerObject(Register code_data_container_object, JumpMode jump_mode = JumpMode::kJump); @@ -1989,7 +1989,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type); - // ---- Code generation helpers ---- + // ---- InstructionStream generation helpers ---- // --------------------------------------------------------------------------- // Support functions. diff --git a/src/codegen/callable.h b/src/codegen/callable.h index dee7029d37..7348d04a0f 100644 --- a/src/codegen/callable.h +++ b/src/codegen/callable.h @@ -11,7 +11,7 @@ namespace v8 { namespace internal { -class Code; +class InstructionStream; // Associates a body of code with an interface descriptor. class Callable final { diff --git a/src/codegen/code-comments.h b/src/codegen/code-comments.h index 1c5189aa9a..330b2bb0d6 100644 --- a/src/codegen/code-comments.h +++ b/src/codegen/code-comments.h @@ -17,7 +17,7 @@ namespace internal { class Assembler; -// Code comments section layout: +// InstructionStream comments section layout: // byte count content // ------------------------------------------------------------------------ // 4 size as uint32_t (only for a check) diff --git a/src/codegen/code-desc.h b/src/codegen/code-desc.h index e051bb459c..7aed2eb962 100644 --- a/src/codegen/code-desc.h +++ b/src/codegen/code-desc.h @@ -63,7 +63,7 @@ class CodeDesc { int code_comments_size = 0; // TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have - // been made consistent with Code layout. + // been made consistent with InstructionStream layout. int body_size() const { return instr_size + unwinding_info_size; } int instruction_size() const { return safepoint_table_offset; } int metadata_size() const { return body_size() - instruction_size(); } diff --git a/src/codegen/code-reference.cc b/src/codegen/code-reference.cc index 93fc96431e..4d5aeb586e 100644 --- a/src/codegen/code-reference.cc +++ b/src/codegen/code-reference.cc @@ -33,7 +33,7 @@ struct CodeOrCodeDataContainerOps { int code_comments_size() const { return code->code_comments_size(); } }; -using CodeOps = CodeOrCodeDataContainerOps; +using CodeOps = CodeOrCodeDataContainerOps; using CodeDataContainerOps = CodeOrCodeDataContainerOps; #if V8_ENABLE_WEBASSEMBLY @@ -96,8 +96,8 @@ struct CodeDescOps { ret CodeReference::method() const { \ DCHECK(!is_null()); \ switch (kind_) { \ - case Kind::CODE: \ - return CodeOps{code_}.method(); \ + case Kind::INSTRUCTION_STREAM: \ + return CodeOps{instruction_stream_}.method(); \ case Kind::CODE_DATA_CONTAINER: \ return CodeDataContainerOps{code_data_container_}.method(); \ case Kind::WASM_CODE: \ diff --git a/src/codegen/code-reference.h b/src/codegen/code-reference.h index ce3251e600..60f5b53ce0 100644 --- a/src/codegen/code-reference.h +++ b/src/codegen/code-reference.h @@ -12,7 +12,7 @@ namespace v8 { namespace internal { -class Code; +class InstructionStream; class CodeDataContainer; class CodeDesc; @@ -27,7 +27,8 @@ class CodeReference { : kind_(Kind::WASM_CODE), wasm_code_(wasm_code) {} explicit CodeReference(const CodeDesc* code_desc) : kind_(Kind::CODE_DESC), code_desc_(code_desc) {} - explicit CodeReference(Handle code) : kind_(Kind::CODE), code_(code) {} + explicit CodeReference(Handle code) + : kind_(Kind::INSTRUCTION_STREAM), instruction_stream_(code) {} explicit CodeReference(Handle code_data_container) : kind_(Kind::CODE_DATA_CONTAINER), code_data_container_(code_data_container) {} @@ -43,15 +44,17 @@ class CodeReference { int code_comments_size() const; bool is_null() const { return kind_ == Kind::NONE; } - bool is_code() const { return kind_ == Kind::CODE; } + bool is_instruction_stream() const { + return kind_ == Kind::INSTRUCTION_STREAM; + } bool is_code_data_container() const { return kind_ == Kind::CODE_DATA_CONTAINER; } bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; } - Handle as_code() const { - DCHECK_EQ(Kind::CODE, kind_); - return code_; + Handle as_instruction_stream() const { + DCHECK_EQ(Kind::INSTRUCTION_STREAM, kind_); + return instruction_stream_; } Handle as_code_data_container() const { @@ -67,7 +70,7 @@ class CodeReference { private: enum class Kind { NONE, - CODE, + INSTRUCTION_STREAM, CODE_DATA_CONTAINER, WASM_CODE, CODE_DESC @@ -76,7 +79,7 @@ class CodeReference { std::nullptr_t null_; const wasm::WasmCode* wasm_code_; const CodeDesc* code_desc_; - Handle code_; + Handle instruction_stream_; Handle code_data_container_; }; diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index 3681697188..145f214421 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -3158,7 +3158,7 @@ TNode CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray( #endif // DEBUG TNode baseline_data = LoadObjectField( FromCodeDataContainerNonBuiltin(code), - Code::kDeoptimizationDataOrInterpreterDataOffset); + InstructionStream::kDeoptimizationDataOrInterpreterDataOffset); var_result = baseline_data; } Goto(&check_for_interpreter_data); @@ -15614,7 +15614,7 @@ TNode CodeStubAssembler::GetCodeEntry(TNode code) { TNode CodeStubAssembler::IsMarkedForDeoptimization( TNode code_data_container) { - return IsSetWord32( + return IsSetWord32( LoadObjectField(code_data_container, CodeDataContainer::kKindSpecificFlagsOffset)); } diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index 42e17d9862..3333f180bd 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -834,21 +834,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void FastCheck(TNode condition); - // TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field - // is cached in or moved to CodeDataContainer. - TNode FromCodeDataContainerNonBuiltin(TNode code) { - // Compute the Code object pointer from the code entry point. + // TODO(v8:11880): remove once InstructionStream::bytecode_or_interpreter_data + // field is cached in or moved to CodeDataContainer. + TNode FromCodeDataContainerNonBuiltin( + TNode code) { + // Compute the InstructionStream object pointer from the code entry point. TNode code_entry = Load( code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset - kHeapObjectTag)); TNode o = BitcastWordToTagged(IntPtrSub( - code_entry, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag))); + code_entry, + IntPtrConstant(InstructionStream::kHeaderSize - kHeapObjectTag))); return CAST(o); } - TNode ToCodeDataContainer(TNode code) { - return LoadObjectField(code, - Code::kCodeDataContainerOffset); + TNode ToCodeDataContainer(TNode code) { + return LoadObjectField( + code, InstructionStream::kCodeDataContainerOffset); } TNode GetCodeEntry(TNode code); @@ -857,7 +859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // The following Call wrappers call an object according to the semantics that // one finds in the EcmaScript spec, operating on an Callable (e.g. a - // JSFunction or proxy) rather than a Code object. + // JSFunction or proxy) rather than a InstructionStream object. template TNode Call(TNode context, TNode callable, TNode receiver, TArgs... args) { diff --git a/src/codegen/compiler.cc b/src/codegen/compiler.cc index f3cda76725..2a029d1b0c 100644 --- a/src/codegen/compiler.cc +++ b/src/codegen/compiler.cc @@ -644,7 +644,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate, Handle bytecode_array(shared_info->GetBytecodeArray(isolate), isolate); - Handle code = + Handle code = Builtins::CreateInterpreterEntryTrampolineForProfiling(isolate); Handle interpreter_data = @@ -1177,7 +1177,8 @@ void RecordMaglevFunctionCompilation(Isolate* isolate, Handle function) { PtrComprCageBase cage_base(isolate); // TODO(v8:13261): We should be able to pass a CodeDataContainer AbstractCode - // in here, but LinuxPerfJitLogger only supports Code AbstractCode. + // in here, but LinuxPerfJitLogger only supports InstructionStream + // AbstractCode. Handle abstract_code( AbstractCode::cast(FromCodeDataContainer(function->code(cage_base))), isolate); @@ -1731,13 +1732,16 @@ class MergeAssumptionChecker final : public ObjectVisitor { } // The object graph for a newly compiled Script shouldn't yet contain any - // Code. If any of these functions are called, then that would indicate that - // the graph was not disjoint from the rest of the heap as expected. + // InstructionStream. If any of these functions are called, then that would + // indicate that the graph was not disjoint from the rest of the heap as + // expected. void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override { UNREACHABLE(); } - void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); } - void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override { + void VisitCodeTarget(InstructionStream host, RelocInfo* rinfo) override { + UNREACHABLE(); + } + void VisitEmbeddedPointer(InstructionStream host, RelocInfo* rinfo) override { UNREACHABLE(); } @@ -2637,7 +2641,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate, } CompilerTracer::TraceStartBaselineCompile(isolate, shared); - Handle code; + Handle code; base::TimeDelta time_taken; { ScopedTimer timer(&time_taken); @@ -3929,7 +3933,7 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job, // 2) The function may have already been optimized by OSR. Simply continue. // Except when OSR already disabled optimization for some reason. // 3) The code may have already been invalidated due to dependency change. - // 4) Code generation may have failed. + // 4) InstructionStream generation may have failed. if (job->state() == CompilationJob::State::kReadyToFinalize) { if (shared->optimization_disabled()) { job->RetryOptimization(BailoutReason::kOptimizationDisabled); @@ -3991,8 +3995,8 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job, ResetTieringState(*function, osr_offset); if (status == CompilationJob::SUCCEEDED) { - // Note the finalized Code object has already been installed on the - // function by MaglevCompilationJob::FinalizeJobImpl. + // Note the finalized InstructionStream object has already been installed on + // the function by MaglevCompilationJob::FinalizeJobImpl. OptimizedCodeCache::Insert(isolate, *function, BytecodeOffset::None(), function->code(), diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index 1f733a9d4f..e02bf9fef9 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -749,7 +749,7 @@ namespace { static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj, int bytecode_offset, Address raw_bytecode_array) { - Code code_obj = Code::cast(Object(raw_code_obj)); + InstructionStream code_obj = InstructionStream::cast(Object(raw_code_obj)); BytecodeArray bytecode_array = BytecodeArray::cast(Object(raw_bytecode_array)); return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset, @@ -759,7 +759,7 @@ static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj, static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj, int bytecode_offset, Address raw_bytecode_array) { - Code code_obj = Code::cast(Object(raw_code_obj)); + InstructionStream code_obj = InstructionStream::cast(Object(raw_code_obj)); BytecodeArray bytecode_array = BytecodeArray::cast(Object(raw_bytecode_array)); return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset, diff --git a/src/codegen/handler-table.cc b/src/codegen/handler-table.cc index 43d43ccf61..aec93116ef 100644 --- a/src/codegen/handler-table.cc +++ b/src/codegen/handler-table.cc @@ -19,7 +19,7 @@ namespace v8 { namespace internal { -HandlerTable::HandlerTable(Code code) +HandlerTable::HandlerTable(InstructionStream code) : HandlerTable(code.HandlerTableAddress(), code.handler_table_size(), kReturnAddressBasedEncoding) {} @@ -151,7 +151,7 @@ int HandlerTable::LengthForRange(int entries) { // static int HandlerTable::EmitReturnTableStart(Assembler* masm) { - masm->DataAlign(Code::kMetadataAlignment); + masm->DataAlign(InstructionStream::kMetadataAlignment); masm->RecordComment(";;; Exception handler table."); int table_start = masm->pc_offset(); return table_start; diff --git a/src/codegen/handler-table.h b/src/codegen/handler-table.h index bbbc8ac97d..5c4543fa0e 100644 --- a/src/codegen/handler-table.h +++ b/src/codegen/handler-table.h @@ -15,7 +15,7 @@ namespace internal { class Assembler; class ByteArray; class BytecodeArray; -class Code; +class InstructionStream; class CodeDataContainer; namespace wasm { @@ -30,8 +30,9 @@ class WasmCode; // Layout looks as follows: // [ range-start , range-end , handler-offset , handler-data ] // 2) Based on return addresses: Used for turbofanned code. Stored directly in -// the instruction stream of the {Code} object. Contains one entry per -// call-site that could throw an exception. Layout looks as follows: +// the instruction stream of the {InstructionStream} object. Contains one +// entry per call-site that could throw an exception. Layout looks as +// follows: // [ return-address-offset , handler-offset ] class V8_EXPORT_PRIVATE HandlerTable { public: @@ -54,7 +55,7 @@ class V8_EXPORT_PRIVATE HandlerTable { enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding }; // Constructors for the various encodings. - explicit HandlerTable(Code code); + explicit HandlerTable(InstructionStream code); explicit HandlerTable(CodeDataContainer code); explicit HandlerTable(ByteArray byte_array); #if V8_ENABLE_WEBASSEMBLY @@ -121,8 +122,8 @@ class V8_EXPORT_PRIVATE HandlerTable { #endif // Direct pointer into the encoded data. This pointer potentially points into - // objects on the GC heap (either {ByteArray} or {Code}) and could become - // stale during a collection. Hence we disallow any allocation. + // objects on the GC heap (either {ByteArray} or {InstructionStream}) and + // could become stale during a collection. Hence we disallow any allocation. const Address raw_encoded_data_; DISALLOW_GARBAGE_COLLECTION(no_gc_) diff --git a/src/codegen/ia32/assembler-ia32-inl.h b/src/codegen/ia32/assembler-ia32-inl.h index 353010c00a..63c83118e2 100644 --- a/src/codegen/ia32/assembler-ia32-inl.h +++ b/src/codegen/ia32/assembler-ia32-inl.h @@ -188,7 +188,7 @@ void Assembler::emit(const Immediate& x) { void Assembler::emit_code_relative_offset(Label* label) { if (label->is_bound()) { int32_t pos; - pos = label->pos() + Code::kHeaderSize - kHeapObjectTag; + pos = label->pos() + InstructionStream::kHeaderSize - kHeapObjectTag; emit(pos); } else { emit_disp(label, Displacement::CODE_RELATIVE); @@ -222,7 +222,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, } void Assembler::deserialization_set_special_target_at( - Address instruction_payload, Code code, Address target) { + Address instruction_payload, InstructionStream code, Address target) { set_target_address_at(instruction_payload, !code.is_null() ? code.constant_pool() : kNullAddress, target); diff --git a/src/codegen/ia32/assembler-ia32.cc b/src/codegen/ia32/assembler-ia32.cc index ddbe5b82c0..11b1a08f94 100644 --- a/src/codegen/ia32/assembler-ia32.cc +++ b/src/codegen/ia32/assembler-ia32.cc @@ -320,13 +320,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilder* safepoint_table_builder, int handler_table_offset) { // As a crutch to avoid having to add manual Align calls wherever we use a - // raw workflow to create Code objects (mostly in tests), add another Align - // call here. It does no harm - the end of the Code object is aligned to the - // (larger) kCodeAlignment anyways. + // raw workflow to create InstructionStream objects (mostly in tests), add + // another Align call here. It does no harm - the end of the InstructionStream + // object is aligned to the (larger) kCodeAlignment anyways. // TODO(jgruber): Consider moving responsibility for proper alignment to // metadata table builders (safepoint, handler, constant pool, code // comments). - DataAlign(Code::kMetadataAlignment); + DataAlign(InstructionStream::kMetadataAlignment); const int code_comments_size = WriteCodeComments(); @@ -1537,8 +1537,9 @@ void Assembler::bind_to(Label* L, int pos) { long_at_put(fixup_pos, reinterpret_cast(buffer_start_ + pos)); internal_reference_positions_.push_back(fixup_pos); } else if (disp.type() == Displacement::CODE_RELATIVE) { - // Relative to Code heap object pointer. - long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag); + // Relative to InstructionStream heap object pointer. + long_at_put(fixup_pos, + pos + InstructionStream::kHeaderSize - kHeapObjectTag); } else { if (disp.type() == Displacement::UNCONDITIONAL_JUMP) { DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected @@ -3406,7 +3407,8 @@ void Assembler::dd(Label* label) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { if (!ShouldRecordRelocInfo(rmode)) return; - RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, + InstructionStream()); reloc_info_writer.Write(&rinfo); } diff --git a/src/codegen/ia32/assembler-ia32.h b/src/codegen/ia32/assembler-ia32.h index 782c30627a..0dfec91c3c 100644 --- a/src/codegen/ia32/assembler-ia32.h +++ b/src/codegen/ia32/assembler-ia32.h @@ -405,7 +405,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // This sets the branch destination (which is in the instruction on x86). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( - Address instruction_payload, Code code, Address target); + Address instruction_payload, InstructionStream code, Address target); // Get the size of the special target encoded at 'instruction_payload'. inline static int deserialization_special_target_size( @@ -433,7 +433,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static constexpr byte kJzShortOpcode = kJccShortPrefix | zero; // --------------------------------------------------------------------------- - // Code generation + // InstructionStream generation // // - function names correspond one-to-one to ia32 instruction mnemonics // - unless specified otherwise, instructions operate on 32bit operands diff --git a/src/codegen/ia32/macro-assembler-ia32.cc b/src/codegen/ia32/macro-assembler-ia32.cc index f3507c00f1..611c92a3c6 100644 --- a/src/codegen/ia32/macro-assembler-ia32.cc +++ b/src/codegen/ia32/macro-assembler-ia32.cc @@ -710,7 +710,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization( Register code_data_container) { test(FieldOperand(code_data_container, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); + Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit)); } Immediate MacroAssembler::ClearedValue() const { @@ -2058,13 +2058,13 @@ void TurboAssembler::LoadCodeDataContainerEntry( CodeDataContainer::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin( +void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); - // Compute the Code object pointer from the code entry point. + // Compute the InstructionStream object pointer from the code entry point. mov(destination, FieldOperand(code_data_container_object, CodeDataContainer::kCodeEntryPointOffset)); - sub(destination, Immediate(Code::kHeaderSize - kHeapObjectTag)); + sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } void TurboAssembler::CallCodeDataContainerObject( diff --git a/src/codegen/ia32/macro-assembler-ia32.h b/src/codegen/ia32/macro-assembler-ia32.h index 4e50c7784d..6f10243ed5 100644 --- a/src/codegen/ia32/macro-assembler-ia32.h +++ b/src/codegen/ia32/macro-assembler-ia32.h @@ -36,7 +36,7 @@ namespace v8 { namespace internal { -class Code; +class InstructionStream; class ExternalReference; class StatsCounter; @@ -162,11 +162,11 @@ class V8_EXPORT_PRIVATE TurboAssembler void LoadCodeDataContainerEntry(Register destination, Register code_data_container_object); // Load code entry point from the CodeDataContainer object and compute - // Code object pointer out of it. Must not be used for CodeDataContainers - // corresponding to builtins, because their entry points values point to - // the embedded instruction stream in .text section. - void LoadCodeDataContainerCodeNonBuiltin(Register destination, - Register code_data_container_object); + // InstructionStream object pointer out of it. Must not be used for + // CodeDataContainers corresponding to builtins, because their entry points + // values point to the embedded instruction stream in .text section. + void LoadCodeDataContainerInstructionStreamNonBuiltin( + Register destination, Register code_data_container_object); void CallCodeDataContainerObject(Register code_data_container_object); void JumpCodeDataContainerObject(Register code_data_container_object, JumpMode jump_mode = JumpMode::kJump); diff --git a/src/codegen/maglev-safepoint-table.cc b/src/codegen/maglev-safepoint-table.cc index 89e358e1e0..58fddfcfd0 100644 --- a/src/codegen/maglev-safepoint-table.cc +++ b/src/codegen/maglev-safepoint-table.cc @@ -13,7 +13,7 @@ namespace v8 { namespace internal { MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc, - Code code) + InstructionStream code) : MaglevSafepointTable(code.InstructionStart(isolate, pc), code.SafepointTableAddress()) { DCHECK(code.is_maglevved()); @@ -160,7 +160,7 @@ void MaglevSafepointTableBuilder::Emit(Assembler* assembler) { #endif // Make sure the safepoint table is properly aligned. Pad with nops. - assembler->Align(Code::kMetadataAlignment); + assembler->Align(InstructionStream::kMetadataAlignment); assembler->RecordComment(";;; Maglev safepoint table."); set_safepoint_table_offset(assembler->pc_offset()); diff --git a/src/codegen/maglev-safepoint-table.h b/src/codegen/maglev-safepoint-table.h index 522fac7a90..6b1b36d874 100644 --- a/src/codegen/maglev-safepoint-table.h +++ b/src/codegen/maglev-safepoint-table.h @@ -65,13 +65,14 @@ class MaglevSafepointEntry : public SafepointEntryBase { uint32_t tagged_register_indexes_ = 0; }; -// A wrapper class for accessing the safepoint table embedded into the Code -// object. +// A wrapper class for accessing the safepoint table embedded into the +// InstructionStream object. class MaglevSafepointTable { public: // The isolate and pc arguments are used for figuring out whether pc // belongs to the embedded or un-embedded code blob. - explicit MaglevSafepointTable(Isolate* isolate, Address pc, Code code); + explicit MaglevSafepointTable(Isolate* isolate, Address pc, + InstructionStream code); explicit MaglevSafepointTable(Isolate* isolate, Address pc, CodeDataContainer code); MaglevSafepointTable(const MaglevSafepointTable&) = delete; diff --git a/src/codegen/optimized-compilation-info.cc b/src/codegen/optimized-compilation-info.cc index 85fea1c902..d77d396cd8 100644 --- a/src/codegen/optimized-compilation-info.cc +++ b/src/codegen/optimized-compilation-info.cc @@ -169,7 +169,7 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const { } } -void OptimizedCompilationInfo::SetCode(Handle code) { +void OptimizedCompilationInfo::SetCode(Handle code) { DCHECK_EQ(code->kind(), code_kind()); code_ = code; } diff --git a/src/codegen/optimized-compilation-info.h b/src/codegen/optimized-compilation-info.h index 560db8484c..ed063196fc 100644 --- a/src/codegen/optimized-compilation-info.h +++ b/src/codegen/optimized-compilation-info.h @@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { Handle bytecode_array() const { return bytecode_array_; } bool has_bytecode_array() const { return !bytecode_array_.is_null(); } Handle closure() const { return closure_; } - Handle code() const { return code_; } + Handle code() const { return code_; } CodeKind code_kind() const { return code_kind_; } Builtin builtin() const { return builtin_; } void set_builtin(Builtin builtin) { builtin_ = builtin; } @@ -129,9 +129,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { } compiler::NodeObserver* node_observer() const { return node_observer_; } - // Code getters and setters. + // InstructionStream getters and setters. - void SetCode(Handle code); + void SetCode(Handle code); #if V8_ENABLE_WEBASSEMBLY void SetWasmCompilationResult(std::unique_ptr); @@ -260,7 +260,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { Handle closure_; // The compiled code. - Handle code_; + Handle code_; // Basic block profiling support. BasicBlockProfilerData* profiler_data_ = nullptr; diff --git a/src/codegen/reloc-info.cc b/src/codegen/reloc-info.cc index a2b1204ff7..7cf0e6c0ed 100644 --- a/src/codegen/reloc-info.cc +++ b/src/codegen/reloc-info.cc @@ -253,23 +253,23 @@ void RelocIterator::next() { done_ = true; } -RelocIterator::RelocIterator(Code code, int mode_mask) +RelocIterator::RelocIterator(InstructionStream code, int mode_mask) : RelocIterator(code, code.unchecked_relocation_info(), mode_mask) {} -RelocIterator::RelocIterator(Code code, ByteArray relocation_info, +RelocIterator::RelocIterator(InstructionStream code, ByteArray relocation_info, int mode_mask) : RelocIterator(code, code.raw_instruction_start(), code.constant_pool(), relocation_info.GetDataEndAddress(), relocation_info.GetDataStartAddress(), mode_mask) {} RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask) - : RelocIterator(Code(), code_reference.instruction_start(), + : RelocIterator(InstructionStream(), code_reference.instruction_start(), code_reference.constant_pool(), code_reference.relocation_end(), code_reference.relocation_start(), mode_mask) {} -RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code, - int mode_mask) +RelocIterator::RelocIterator(EmbeddedData* embedded_data, + InstructionStream code, int mode_mask) : RelocIterator(code, embedded_data->InstructionStartOfBuiltin(code.builtin_id()), code.constant_pool(), @@ -277,20 +277,22 @@ RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code, code.relocation_start(), mode_mask) {} RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) - : RelocIterator(Code(), reinterpret_cast
(desc.buffer), 0, - desc.buffer + desc.buffer_size, + : RelocIterator(InstructionStream(), reinterpret_cast
(desc.buffer), + 0, desc.buffer + desc.buffer_size, desc.buffer + desc.buffer_size - desc.reloc_size, mode_mask) {} RelocIterator::RelocIterator(base::Vector instructions, base::Vector reloc_info, Address const_pool, int mode_mask) - : RelocIterator(Code(), reinterpret_cast
(instructions.begin()), - const_pool, reloc_info.begin() + reloc_info.size(), - reloc_info.begin(), mode_mask) {} + : RelocIterator(InstructionStream(), + reinterpret_cast
(instructions.begin()), const_pool, + reloc_info.begin() + reloc_info.size(), reloc_info.begin(), + mode_mask) {} -RelocIterator::RelocIterator(Code host, Address pc, Address constant_pool, - const byte* pos, const byte* end, int mode_mask) +RelocIterator::RelocIterator(InstructionStream host, Address pc, + Address constant_pool, const byte* pos, + const byte* end, int mode_mask) : pos_(pos), end_(end), mode_mask_(mode_mask) { // Relocation info is read backwards. DCHECK_GE(pos_, end_); @@ -350,7 +352,8 @@ void RelocInfo::set_target_address(Address target, icache_flush_mode); if (!host().is_null() && IsCodeTargetMode(rmode_) && !v8_flags.disable_write_barriers) { - Code target_code = Code::GetCodeFromTargetAddress(target); + InstructionStream target_code = + InstructionStream::GetCodeFromTargetAddress(target); WriteBarrierForCode(host(), this, target_code, write_barrier_mode); } } @@ -385,7 +388,7 @@ bool RelocInfo::RequiresRelocationAfterCodegen(const CodeDesc& desc) { return !it.done(); } -bool RelocInfo::RequiresRelocation(Code code) { +bool RelocInfo::RequiresRelocation(InstructionStream code) { RelocIterator it(code, RelocInfo::kApplyMask); return !it.done(); } @@ -462,8 +465,9 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { << ")"; } else if (IsCodeTargetMode(rmode_)) { const Address code_target = target_address(); - Code code = Code::GetCodeFromTargetAddress(code_target); - DCHECK(code.IsCode()); + InstructionStream code = + InstructionStream::GetCodeFromTargetAddress(code_target); + DCHECK(code.IsInstructionStream()); os << " (" << CodeKindToString(code.kind()); if (Builtins::IsBuiltin(code)) { os << " " << Builtins::name(code.builtin_id()); @@ -492,10 +496,11 @@ void RelocInfo::Verify(Isolate* isolate) { Address addr = target_address(); CHECK_NE(addr, kNullAddress); // Check that we can find the right code object. - Code code = Code::GetCodeFromTargetAddress(addr); + InstructionStream code = + InstructionStream::GetCodeFromTargetAddress(addr); CodeLookupResult lookup_result = isolate->FindCodeObject(addr); CHECK(lookup_result.IsFound()); - CHECK_EQ(code.address(), lookup_result.code().address()); + CHECK_EQ(code.address(), lookup_result.instruction_stream().address()); break; } case INTERNAL_REFERENCE: @@ -504,7 +509,7 @@ void RelocInfo::Verify(Isolate* isolate) { Address pc = target_internal_reference_address(); CodeLookupResult lookup_result = isolate->FindCodeObject(pc); CHECK(lookup_result.IsFound()); - Code code = lookup_result.code(); + InstructionStream code = lookup_result.instruction_stream(); CHECK(target >= code.InstructionStart(isolate, pc)); CHECK(target <= code.InstructionEnd(isolate, pc)); break; diff --git a/src/codegen/reloc-info.h b/src/codegen/reloc-info.h index fd74413fc9..78af0fadce 100644 --- a/src/codegen/reloc-info.h +++ b/src/codegen/reloc-info.h @@ -114,7 +114,7 @@ class RelocInfo { RelocInfo() = default; - RelocInfo(Address pc, Mode rmode, intptr_t data, Code host, + RelocInfo(Address pc, Mode rmode, intptr_t data, InstructionStream host, Address constant_pool = kNullAddress) : pc_(pc), rmode_(rmode), @@ -213,7 +213,7 @@ class RelocInfo { Address pc() const { return pc_; } Mode rmode() const { return rmode_; } intptr_t data() const { return data_; } - Code host() const { return host_; } + InstructionStream host() const { return host_; } Address constant_pool() const { return constant_pool_; } // Apply a relocation by delta bytes. When the code object is moved, PC @@ -332,7 +332,7 @@ class RelocInfo { // Check whether the given code contains relocation information that // either is position-relative or movable by the garbage collector. static bool RequiresRelocationAfterCodegen(const CodeDesc& desc); - static bool RequiresRelocation(Code code); + static bool RequiresRelocation(InstructionStream code); #ifdef ENABLE_DISASSEMBLER // Printing @@ -359,7 +359,7 @@ class RelocInfo { // In addition to modes covered by the apply mask (which is applied at GC // time, among others), this covers all modes that are relocated by - // Code::CopyFromNoFlush after code generation. + // InstructionStream::CopyFromNoFlush after code generation. static int PostCodegenRelocationMask() { return ModeMask(RelocInfo::CODE_TARGET) | ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) | @@ -374,7 +374,7 @@ class RelocInfo { Address pc_; Mode rmode_; intptr_t data_ = 0; - Code host_; + InstructionStream host_; Address constant_pool_ = kNullAddress; friend class RelocIterator; }; @@ -432,9 +432,11 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced { // the beginning of the reloc info. // Relocation information with mode k is included in the // iteration iff bit k of mode_mask is set. - explicit RelocIterator(Code code, int mode_mask = -1); - explicit RelocIterator(Code code, ByteArray relocation_info, int mode_mask); - explicit RelocIterator(EmbeddedData* embedded_data, Code code, int mode_mask); + explicit RelocIterator(InstructionStream code, int mode_mask = -1); + explicit RelocIterator(InstructionStream code, ByteArray relocation_info, + int mode_mask); + explicit RelocIterator(EmbeddedData* embedded_data, InstructionStream code, + int mode_mask); explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1); explicit RelocIterator(const CodeReference code_reference, int mode_mask = -1); @@ -457,8 +459,8 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced { } private: - RelocIterator(Code host, Address pc, Address constant_pool, const byte* pos, - const byte* end, int mode_mask); + RelocIterator(InstructionStream host, Address pc, Address constant_pool, + const byte* pos, const byte* end, int mode_mask); // Advance* moves the position before/after reading. // *Read* reads from current byte(s) into rinfo_. diff --git a/src/codegen/safepoint-table.cc b/src/codegen/safepoint-table.cc index 187d3c5b4a..34cf1782de 100644 --- a/src/codegen/safepoint-table.cc +++ b/src/codegen/safepoint-table.cc @@ -20,7 +20,8 @@ namespace v8 { namespace internal { -SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code) +SafepointTable::SafepointTable(Isolate* isolate, Address pc, + InstructionStream code) : SafepointTable(code.InstructionStart(isolate, pc), code.SafepointTableAddress()) {} @@ -169,7 +170,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) { #endif // Make sure the safepoint table is properly aligned. Pad with nops. - assembler->Align(Code::kMetadataAlignment); + assembler->Align(InstructionStream::kMetadataAlignment); assembler->RecordComment(";;; Safepoint table."); set_safepoint_table_offset(assembler->pc_offset()); diff --git a/src/codegen/safepoint-table.h b/src/codegen/safepoint-table.h index a08633da90..df4dcc5f71 100644 --- a/src/codegen/safepoint-table.h +++ b/src/codegen/safepoint-table.h @@ -54,13 +54,13 @@ class SafepointEntry : public SafepointEntryBase { base::Vector tagged_slots_; }; -// A wrapper class for accessing the safepoint table embedded into the Code -// object. +// A wrapper class for accessing the safepoint table embedded into the +// InstructionStream object. class SafepointTable { public: // The isolate and pc arguments are used for figuring out whether pc // belongs to the embedded or un-embedded code blob. - explicit SafepointTable(Isolate* isolate, Address pc, Code code); + explicit SafepointTable(Isolate* isolate, Address pc, InstructionStream code); explicit SafepointTable(Isolate* isolate, Address pc, CodeDataContainer code); #if V8_ENABLE_WEBASSEMBLY explicit SafepointTable(const wasm::WasmCode* code); diff --git a/src/codegen/source-position.cc b/src/codegen/source-position.cc index e08f2d11a4..a340d214c0 100644 --- a/src/codegen/source-position.cc +++ b/src/codegen/source-position.cc @@ -60,7 +60,7 @@ std::vector SourcePosition::InliningStack( } std::vector SourcePosition::InliningStack( - Handle code) const { + Handle code) const { Isolate* isolate = code->GetIsolate(); DeoptimizationData deopt_data = DeoptimizationData::cast(code->deoptimization_data()); @@ -79,7 +79,8 @@ std::vector SourcePosition::InliningStack( return stack; } -SourcePositionInfo SourcePosition::FirstInfo(Handle code) const { +SourcePositionInfo SourcePosition::FirstInfo( + Handle code) const { DisallowGarbageCollection no_gc; Isolate* isolate = code->GetIsolate(); DeoptimizationData deopt_data = @@ -127,7 +128,7 @@ void SourcePosition::PrintJson(std::ostream& out) const { } } -void SourcePosition::Print(std::ostream& out, Code code) const { +void SourcePosition::Print(std::ostream& out, InstructionStream code) const { DeoptimizationData deopt_data = DeoptimizationData::cast(code.deoptimization_data()); if (!isInlined()) { diff --git a/src/codegen/source-position.h b/src/codegen/source-position.h index c77bad2539..bf5c31a68d 100644 --- a/src/codegen/source-position.h +++ b/src/codegen/source-position.h @@ -15,7 +15,7 @@ namespace v8 { namespace internal { -class Code; +class InstructionStream; class OptimizedCompilationInfo; class Script; class SharedFunctionInfo; @@ -79,12 +79,13 @@ class SourcePosition final { } // Assumes that the code object is optimized - std::vector InliningStack(Handle code) const; + std::vector InliningStack( + Handle code) const; std::vector InliningStack( OptimizedCompilationInfo* cinfo) const; - SourcePositionInfo FirstInfo(Handle code) const; + SourcePositionInfo FirstInfo(Handle code) const; - void Print(std::ostream& out, Code code) const; + void Print(std::ostream& out, InstructionStream code) const; void PrintJson(std::ostream& out) const; int ScriptOffset() const { diff --git a/src/codegen/x64/assembler-x64-inl.h b/src/codegen/x64/assembler-x64-inl.h index 2660c5815b..7ef56ef3b1 100644 --- a/src/codegen/x64/assembler-x64-inl.h +++ b/src/codegen/x64/assembler-x64-inl.h @@ -215,7 +215,7 @@ void Assembler::deserialization_set_target_internal_reference_at( } void Assembler::deserialization_set_special_target_at( - Address instruction_payload, Code code, Address target) { + Address instruction_payload, InstructionStream code, Address target) { set_target_address_at(instruction_payload, !code.is_null() ? code.constant_pool() : kNullAddress, target); @@ -285,8 +285,9 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(!HAS_SMI_TAG(compressed)); Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, compressed)); - // Embedding of compressed Code objects must not happen when external code - // space is enabled, because CodeDataContainers must be used instead. + // Embedding of compressed InstructionStream objects must not happen when + // external code space is enabled, because CodeDataContainers must be used + // instead. DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(HeapObject::cast(obj))); return HeapObject::cast(obj); diff --git a/src/codegen/x64/assembler-x64.cc b/src/codegen/x64/assembler-x64.cc index 4881006a70..3d9d7e529f 100644 --- a/src/codegen/x64/assembler-x64.cc +++ b/src/codegen/x64/assembler-x64.cc @@ -367,13 +367,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset) { // As a crutch to avoid having to add manual Align calls wherever we use a - // raw workflow to create Code objects (mostly in tests), add another Align - // call here. It does no harm - the end of the Code object is aligned to the - // (larger) kCodeAlignment anyways. + // raw workflow to create InstructionStream objects (mostly in tests), add + // another Align call here. It does no harm - the end of the InstructionStream + // object is aligned to the (larger) kCodeAlignment anyways. // TODO(jgruber): Consider moving responsibility for proper alignment to // metadata table builders (safepoint, handler, constant pool, code // comments). - DataAlign(Code::kMetadataAlignment); + DataAlign(InstructionStream::kMetadataAlignment); PatchConstPool(); DCHECK(constpool_.IsEmpty()); @@ -4492,7 +4492,8 @@ void Assembler::dq(Label* label) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { if (!ShouldRecordRelocInfo(rmode)) return; - RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, + InstructionStream()); reloc_info_writer.Write(&rinfo); } diff --git a/src/codegen/x64/assembler-x64.h b/src/codegen/x64/assembler-x64.h index edc5198b21..2a40e55936 100644 --- a/src/codegen/x64/assembler-x64.h +++ b/src/codegen/x64/assembler-x64.h @@ -447,12 +447,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Read/Modify the code target in the relative branch/call instruction at pc. // On the x64 architecture, we use relative jumps with a 32-bit displacement - // to jump to other Code objects in the Code space in the heap. - // Jumps to C functions are done indirectly through a 64-bit register holding - // the absolute address of the target. - // These functions convert between absolute Addresses of Code objects and - // the relative displacements stored in the code. - // The isolate argument is unused (and may be nullptr) when skipping flushing. + // to jump to other InstructionStream objects in the InstructionStream space + // in the heap. Jumps to C functions are done indirectly through a 64-bit + // register holding the absolute address of the target. These functions + // convert between absolute Addresses of InstructionStream objects and the + // relative displacements stored in the code. The isolate argument is unused + // (and may be nullptr) when skipping flushing. static inline Address target_address_at(Address pc, Address constant_pool); static inline void set_target_address_at( Address pc, Address constant_pool, Address target, @@ -467,7 +467,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // This sets the branch destination (which is in the instruction on x64). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( - Address instruction_payload, Code code, Address target); + Address instruction_payload, InstructionStream code, Address target); // Get the size of the special target encoded at 'instruction_payload'. inline static int deserialization_special_target_size( @@ -505,7 +505,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 }; // --------------------------------------------------------------------------- - // Code generation + // InstructionStream generation // // Function names correspond one-to-one to x64 instruction mnemonics. // Unless specified otherwise, instructions operate on 64-bit operands. @@ -2130,7 +2130,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { WriteUnalignedValue(addr_at(pos), x); } - // Code emission. + // InstructionStream emission. V8_NOINLINE V8_PRESERVE_MOST void GrowBuffer(); template diff --git a/src/codegen/x64/macro-assembler-x64.cc b/src/codegen/x64/macro-assembler-x64.cc index 483747cb17..6392adb2f6 100644 --- a/src/codegen/x64/macro-assembler-x64.cc +++ b/src/codegen/x64/macro-assembler-x64.cc @@ -2308,13 +2308,13 @@ void TurboAssembler::LoadCodeDataContainerEntry( CodeDataContainer::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin( +void TurboAssembler::LoadCodeDataContainerInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); - // Compute the Code object pointer from the code entry point. + // Compute the InstructionStream object pointer from the code entry point. movq(destination, FieldOperand(code_data_container_object, CodeDataContainer::kCodeEntryPointOffset)); - subq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag)); + subq(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } void TurboAssembler::CallCodeDataContainerObject( @@ -2610,7 +2610,7 @@ void MacroAssembler::TestCodeDataContainerIsMarkedForDeoptimization( Register code_data_container) { testl(FieldOperand(code_data_container, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); + Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit)); } Immediate MacroAssembler::ClearedValue() const { @@ -3391,11 +3391,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. void TurboAssembler::BailoutIfDeoptimized(Register scratch) { - int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + int offset = InstructionStream::kCodeDataContainerOffset - + InstructionStream::kHeaderSize; LoadTaggedPointerField(scratch, Operand(kJavaScriptCallCodeStartRegister, offset)); testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); + Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit)); Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), RelocInfo::CODE_TARGET, not_zero); } diff --git a/src/codegen/x64/macro-assembler-x64.h b/src/codegen/x64/macro-assembler-x64.h index c06ed63819..4edeebe479 100644 --- a/src/codegen/x64/macro-assembler-x64.h +++ b/src/codegen/x64/macro-assembler-x64.h @@ -404,11 +404,11 @@ class V8_EXPORT_PRIVATE TurboAssembler void LoadCodeDataContainerEntry(Register destination, Register code_data_container_object); // Load code entry point from the CodeDataContainer object and compute - // Code object pointer out of it. Must not be used for CodeDataContainers - // corresponding to builtins, because their entry points values point to - // the embedded instruction stream in .text section. - void LoadCodeDataContainerCodeNonBuiltin(Register destination, - Register code_data_container_object); + // InstructionStream object pointer out of it. Must not be used for + // CodeDataContainers corresponding to builtins, because their entry points + // values point to the embedded instruction stream in .text section. + void LoadCodeDataContainerInstructionStreamNonBuiltin( + Register destination, Register code_data_container_object); void CallCodeDataContainerObject(Register code_data_container_object); void JumpCodeDataContainerObject(Register code_data_container_object, JumpMode jump_mode = JumpMode::kJump); diff --git a/src/common/globals.h b/src/common/globals.h index 17fabafbcd..17e060ac37 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -867,7 +867,7 @@ using RuntimeArguments = Arguments; using JavaScriptArguments = Arguments; class Assembler; class ClassScope; -class Code; +class InstructionStream; class CodeDataContainer; class CodeSpace; class Context; @@ -989,9 +989,10 @@ using HeapObjectSlot = SlotTraits::THeapObjectSlot; using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot; // A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot") -// holding a strong pointer to a Code object. The Code object slots might be -// compressed and since code space might be allocated off the main heap -// the load operations require explicit cage base value for code space. +// holding a strong pointer to a InstructionStream object. The InstructionStream +// object slots might be compressed and since code space might be allocated off +// the main heap the load operations require explicit cage base value for code +// space. using CodeObjectSlot = SlotTraits::TCodeObjectSlot; using WeakSlotCallback = bool (*)(FullObjectSlot pointer); @@ -1028,10 +1029,10 @@ constexpr int kSpaceTagSize = 4; static_assert(FIRST_SPACE == 0); enum class AllocationType : uint8_t { - kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE - kOld, // Regular object allocated in OLD_SPACE or LO_SPACE - kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE - kMap, // Map object allocated in OLD_SPACE + kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE + kOld, // Regular object allocated in OLD_SPACE or LO_SPACE + kCode, // InstructionStream object allocated in CODE_SPACE or CODE_LO_SPACE + kMap, // Map object allocated in OLD_SPACE kReadOnly, // Object allocated in RO_SPACE kSharedOld, // Regular object allocated in OLD_SPACE in the shared heap kSharedMap, // Map object in OLD_SPACE in the shared heap @@ -2056,7 +2057,8 @@ enum class IcCheckType { kElement, kProperty }; // Helper stubs can be called in different ways depending on where the target // code is located and how the call sequence is expected to look like: -// - CodeObject: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}. +// - CodeObject: Call on-heap {Code} object via +// {RelocInfo::CODE_TARGET}. // - WasmRuntimeStub: Call native {WasmCode} stub via // {RelocInfo::WASM_STUB_CALL}. // - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic diff --git a/src/common/ptr-compr.h b/src/common/ptr-compr.h index 16ab0d9ba2..761ab4fdd3 100644 --- a/src/common/ptr-compr.h +++ b/src/common/ptr-compr.h @@ -60,9 +60,9 @@ class V8HeapCompressionScheme { #ifdef V8_EXTERNAL_CODE_SPACE -// Compression scheme used for fields containing Code objects (namely for the -// CodeDataContainer::code field). -// Same as V8HeapCompressionScheme but with a different base value. +// Compression scheme used for fields containing InstructionStream objects +// (namely for the CodeDataContainer::code field). Same as +// V8HeapCompressionScheme but with a different base value. class ExternalCodeCompressionScheme { public: V8_INLINE static Address PrepareCageBaseAddress(Address on_heap_addr); diff --git a/src/compiler/backend/arm/code-generator-arm.cc b/src/compiler/backend/arm/code-generator-arm.cc index 3a14121582..2106cdf544 100644 --- a/src/compiler/backend/arm/code-generator-arm.cc +++ b/src/compiler/backend/arm/code-generator-arm.cc @@ -647,11 +647,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { void CodeGenerator::BailoutIfDeoptimized() { UseScratchRegisterScope temps(tasm()); Register scratch = temps.Acquire(); - int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + int offset = InstructionStream::kCodeDataContainerOffset - + InstructionStream::kHeaderSize; __ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ ldr(scratch, FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); - __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ tst(scratch, Operand(1 << InstructionStream::kMarkedForDeoptimizationBit)); __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), RelocInfo::CODE_TARGET, ne); } diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index 52ddf93a51..27a3ee8eac 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -3376,8 +3376,8 @@ void CodeGenerator::PrepareForDeoptimizationExits( false, false, static_cast(exits->size()) * Deoptimizer::kLazyDeoptExitSize); - // Check which deopt kinds exist in this Code object, to avoid emitting jumps - // to unused entries. + // Check which deopt kinds exist in this InstructionStream object, to avoid + // emitting jumps to unused entries. bool saw_deopt_kind[kDeoptimizeKindCount] = {false}; for (auto exit : *exits) { saw_deopt_kind[static_cast(exit->kind())] = true; diff --git a/src/compiler/backend/code-generator.cc b/src/compiler/backend/code-generator.cc index 929fc7eb77..cd040cafcf 100644 --- a/src/compiler/backend/code-generator.cc +++ b/src/compiler/backend/code-generator.cc @@ -411,7 +411,7 @@ void CodeGenerator::AssembleCode() { unwinding_info_writer_.Finish(tasm()->pc_offset()); // Final alignment before starting on the metadata section. - tasm()->Align(Code::kMetadataAlignment); + tasm()->Align(InstructionStream::kMetadataAlignment); safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount()); @@ -467,10 +467,10 @@ base::OwnedVector CodeGenerator::GetProtectedInstructionsData() { #endif // V8_ENABLE_WEBASSEMBLY } -MaybeHandle CodeGenerator::FinalizeCode() { +MaybeHandle CodeGenerator::FinalizeCode() { if (result_ != kSuccess) { tasm()->AbortedCodeGeneration(); - return MaybeHandle(); + return MaybeHandle(); } // Allocate the source position table. @@ -494,7 +494,7 @@ MaybeHandle CodeGenerator::FinalizeCode() { unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc); } - MaybeHandle maybe_code = + MaybeHandle maybe_code = Factory::CodeBuilder(isolate(), desc, info()->code_kind()) .set_builtin(info()->builtin()) .set_inlined_bytecode_size(info()->inlined_bytecode_size()) @@ -506,10 +506,10 @@ MaybeHandle CodeGenerator::FinalizeCode() { .set_osr_offset(info()->osr_offset()) .TryBuild(); - Handle code; + Handle code; if (!maybe_code.ToHandle(&code)) { tasm()->AbortedCodeGeneration(); - return MaybeHandle(); + return MaybeHandle(); } LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent( diff --git a/src/compiler/backend/code-generator.h b/src/compiler/backend/code-generator.h index 8891c1315b..159daa621b 100644 --- a/src/compiler/backend/code-generator.h +++ b/src/compiler/backend/code-generator.h @@ -159,7 +159,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { // produce the actual code object. If an error occurs during either phase, // FinalizeCode returns an empty MaybeHandle. void AssembleCode(); // Does not need to run on main thread. - MaybeHandle FinalizeCode(); + MaybeHandle FinalizeCode(); base::OwnedVector GetSourcePositionTable(); base::OwnedVector GetProtectedInstructionsData(); @@ -466,8 +466,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { // with function size. {jump_deoptimization_entry_labels_} is an optimization // to that effect, which extracts the (potentially large) instruction // sequence for the final jump to the deoptimization entry into a single spot - // per Code object. All deopt exits can then near-call to this label. Note: - // not used on all architectures. + // per InstructionStream object. All deopt exits can then near-call to this + // label. Note: not used on all architectures. Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount]; // The maximal combined height of all frames produced upon deoptimization, and diff --git a/src/compiler/backend/ia32/code-generator-ia32.cc b/src/compiler/backend/ia32/code-generator-ia32.cc index 58ad77f6bb..460ad8db2c 100644 --- a/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/src/compiler/backend/ia32/code-generator-ia32.cc @@ -662,11 +662,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { - int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + int offset = InstructionStream::kCodeDataContainerOffset - + InstructionStream::kHeaderSize; __ push(eax); // Push eax so we can use it as a scratch register. __ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset)); __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); + Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit)); __ pop(eax); // Restore eax. Label skip; @@ -827,7 +828,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ PushPC(); int pc = __ pc_offset(); __ pop(scratch); - __ sub(scratch, Immediate(pc + Code::kHeaderSize - kHeapObjectTag)); + __ sub(scratch, + Immediate(pc + InstructionStream::kHeaderSize - kHeapObjectTag)); __ add(scratch, Immediate::CodeRelativeOffset(&return_location)); __ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset), scratch); diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc index ccfefd98a6..dc36355329 100644 --- a/src/compiler/code-assembler.cc +++ b/src/compiler/code-assembler.cc @@ -162,14 +162,14 @@ bool CodeAssembler::Word32ShiftIsSafe() const { } // static -Handle CodeAssembler::GenerateCode( +Handle CodeAssembler::GenerateCode( CodeAssemblerState* state, const AssemblerOptions& options, const ProfileDataFromFile* profile_data) { DCHECK(!state->code_generated_); RawMachineAssembler* rasm = state->raw_assembler_.get(); - Handle code; + Handle code; Graph* graph = rasm->ExportForOptimization(); code = Pipeline::GenerateCodeForCodeStub( diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h index 8d9e704729..30c692726e 100644 --- a/src/compiler/code-assembler.h +++ b/src/compiler/code-assembler.h @@ -387,9 +387,9 @@ class V8_EXPORT_PRIVATE CodeAssembler { CodeAssembler(const CodeAssembler&) = delete; CodeAssembler& operator=(const CodeAssembler&) = delete; - static Handle GenerateCode(CodeAssemblerState* state, - const AssemblerOptions& options, - const ProfileDataFromFile* profile_data); + static Handle GenerateCode( + CodeAssemblerState* state, const AssemblerOptions& options, + const ProfileDataFromFile* profile_data); bool Is64() const; bool Is32() const; bool IsFloat64RoundUpSupported() const; diff --git a/src/compiler/compilation-dependencies.cc b/src/compiler/compilation-dependencies.cc index 2c219f5d67..74fb0adff4 100644 --- a/src/compiler/compilation-dependencies.cc +++ b/src/compiler/compilation-dependencies.cc @@ -115,16 +115,16 @@ class PendingDependencies final { void Register(Handle object, DependentCode::DependencyGroup group) { - // Code, which are per-local Isolate, cannot depend on objects in the shared - // heap. Shared heap dependencies are designed to never invalidate - // assumptions. E.g., maps for shared structs do not have transitions or - // change the shape of their fields. See + // InstructionStream, which are per-local Isolate, cannot depend on objects + // in the shared heap. Shared heap dependencies are designed to never + // invalidate assumptions. E.g., maps for shared structs do not have + // transitions or change the shape of their fields. See // DependentCode::DeoptimizeDependencyGroups for corresponding DCHECK. if (object->InSharedWritableHeap()) return; deps_[object] |= group; } - void InstallAll(Isolate* isolate, Handle code) { + void InstallAll(Isolate* isolate, Handle code) { if (V8_UNLIKELY(v8_flags.predictable)) { InstallAllPredictable(isolate, code); return; @@ -139,7 +139,7 @@ class PendingDependencies final { } } - void InstallAllPredictable(Isolate* isolate, Handle code) { + void InstallAllPredictable(Isolate* isolate, Handle code) { CHECK(v8_flags.predictable); // First, guarantee predictable iteration order. using HandleAndGroup = @@ -1189,7 +1189,7 @@ V8_INLINE void TraceInvalidCompilationDependency( PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString()); } -bool CompilationDependencies::Commit(Handle code) { +bool CompilationDependencies::Commit(Handle code) { if (!PrepareInstall()) return false; { diff --git a/src/compiler/compilation-dependencies.h b/src/compiler/compilation-dependencies.h index b6799342d3..bd8edbd99b 100644 --- a/src/compiler/compilation-dependencies.h +++ b/src/compiler/compilation-dependencies.h @@ -31,7 +31,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { public: CompilationDependencies(JSHeapBroker* broker, Zone* zone); - V8_WARN_UNUSED_RESULT bool Commit(Handle code); + V8_WARN_UNUSED_RESULT bool Commit(Handle code); // Return the initial map of {function} and record the assumption that it // stays the initial map. diff --git a/src/compiler/graph-assembler.h b/src/compiler/graph-assembler.h index b491dea958..09c5bd4f32 100644 --- a/src/compiler/graph-assembler.h +++ b/src/compiler/graph-assembler.h @@ -134,30 +134,30 @@ class Reducer; V(Uint64Div) \ V(Uint64Mod) -#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \ - V(AllocateInOldGenerationStub, Code) \ - V(AllocateInYoungGenerationStub, Code) \ - V(AllocateRegularInOldGenerationStub, Code) \ - V(AllocateRegularInYoungGenerationStub, Code) \ - V(BigIntMap, Map) \ - V(BooleanMap, Map) \ - V(EmptyString, String) \ - V(ExternalObjectMap, Map) \ - V(False, Boolean) \ - V(FixedArrayMap, Map) \ - V(FixedDoubleArrayMap, Map) \ - V(WeakFixedArrayMap, Map) \ - V(HeapNumberMap, Map) \ - V(MinusOne, Number) \ - V(NaN, Number) \ - V(NoContext, Object) \ - V(Null, Oddball) \ - V(One, Number) \ - V(TheHole, Oddball) \ - V(ToNumberBuiltin, Code) \ - V(PlainPrimitiveToNumberBuiltin, Code) \ - V(True, Boolean) \ - V(Undefined, Oddball) \ +#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \ + V(AllocateInOldGenerationStub, InstructionStream) \ + V(AllocateInYoungGenerationStub, InstructionStream) \ + V(AllocateRegularInOldGenerationStub, InstructionStream) \ + V(AllocateRegularInYoungGenerationStub, InstructionStream) \ + V(BigIntMap, Map) \ + V(BooleanMap, Map) \ + V(EmptyString, String) \ + V(ExternalObjectMap, Map) \ + V(False, Boolean) \ + V(FixedArrayMap, Map) \ + V(FixedDoubleArrayMap, Map) \ + V(WeakFixedArrayMap, Map) \ + V(HeapNumberMap, Map) \ + V(MinusOne, Number) \ + V(NaN, Number) \ + V(NoContext, Object) \ + V(Null, Oddball) \ + V(One, Number) \ + V(TheHole, Oddball) \ + V(ToNumberBuiltin, InstructionStream) \ + V(PlainPrimitiveToNumberBuiltin, InstructionStream) \ + V(True, Boolean) \ + V(Undefined, Oddball) \ V(Zero, Number) class GraphAssembler; diff --git a/src/compiler/heap-refs.cc b/src/compiler/heap-refs.cc index 40ae942bf0..0628a87785 100644 --- a/src/compiler/heap-refs.cc +++ b/src/compiler/heap-refs.cc @@ -71,7 +71,8 @@ bool IsReadOnlyHeapObjectForCompiler(PtrComprCageBase cage_base, // TODO(jgruber): Remove this compiler-specific predicate and use the plain // heap predicate instead. This would involve removing the special cases for // builtins. - return (object.IsCode(cage_base) && Code::cast(object).is_builtin()) || + return (object.IsInstructionStream(cage_base) && + InstructionStream::cast(object).is_builtin()) || ReadOnlyHeap::Contains(object); } @@ -2286,7 +2287,7 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) { namespace { -unsigned GetInlinedBytecodeSizeImpl(Code code) { +unsigned GetInlinedBytecodeSizeImpl(InstructionStream code) { unsigned value = code.inlined_bytecode_size(); if (value > 0) { // Don't report inlined bytecode size if the code object was already @@ -2298,7 +2299,7 @@ unsigned GetInlinedBytecodeSizeImpl(Code code) { } // namespace -unsigned CodeRef::GetInlinedBytecodeSize() const { +unsigned InstructionStreamRef::GetInlinedBytecodeSize() const { return GetInlinedBytecodeSizeImpl(*object()); } @@ -2308,9 +2309,10 @@ unsigned CodeDataContainerRef::GetInlinedBytecodeSize() const { return 0; } - // Safe to do a relaxed conversion to Code here since CodeDataContainer::code - // field is modified only by GC and the CodeDataContainer was acquire-loaded. - Code code = code_data_container.code(kRelaxedLoad); + // Safe to do a relaxed conversion to InstructionStream here since + // CodeDataContainer::code field is modified only by GC and the + // CodeDataContainer was acquire-loaded. + InstructionStream code = code_data_container.instruction_stream(kRelaxedLoad); return GetInlinedBytecodeSizeImpl(code); } diff --git a/src/compiler/heap-refs.h b/src/compiler/heap-refs.h index 4ced0cdbe0..d1442dd07d 100644 --- a/src/compiler/heap-refs.h +++ b/src/compiler/heap-refs.h @@ -111,7 +111,7 @@ enum class RefSerializationKind { BACKGROUND_SERIALIZED(BigInt) \ NEVER_SERIALIZED(CallHandlerInfo) \ NEVER_SERIALIZED(Cell) \ - NEVER_SERIALIZED(Code) \ + NEVER_SERIALIZED(InstructionStream) \ NEVER_SERIALIZED(CodeDataContainer) \ NEVER_SERIALIZED(Context) \ NEVER_SERIALIZED(DescriptorArray) \ @@ -1010,11 +1010,11 @@ class JSGlobalProxyRef : public JSObjectRef { Handle object() const; }; -class CodeRef : public HeapObjectRef { +class InstructionStreamRef : public HeapObjectRef { public: - DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef) + DEFINE_REF_CONSTRUCTOR(InstructionStream, HeapObjectRef) - Handle object() const; + Handle object() const; unsigned GetInlinedBytecodeSize() const; }; diff --git a/src/compiler/js-heap-broker.h b/src/compiler/js-heap-broker.h index 4e4fffc8cc..0b5b5845f0 100644 --- a/src/compiler/js-heap-broker.h +++ b/src/compiler/js-heap-broker.h @@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { Isolate* isolate() const { return isolate_; } // The pointer compression cage base value used for decompression of all - // tagged values except references to Code objects. + // tagged values except references to InstructionStream objects. PtrComprCageBase cage_base() const { #if V8_COMPRESS_POINTERS return cage_base_; diff --git a/src/compiler/memory-lowering.cc b/src/compiler/memory-lowering.cc index 8624dffbde..3593c6e961 100644 --- a/src/compiler/memory-lowering.cc +++ b/src/compiler/memory-lowering.cc @@ -167,9 +167,10 @@ Reduction MemoryLowering::ReduceAllocateRaw( if (v8_flags.single_generation && allocation_type == AllocationType::kYoung) { allocation_type = AllocationType::kOld; } - // Code objects may have a maximum size smaller than kMaxHeapObjectSize due to - // guard pages. If we need to support allocating code here we would need to - // call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime. + // InstructionStream objects may have a maximum size smaller than + // kMaxHeapObjectSize due to guard pages. If we need to support allocating + // code here we would need to call + // MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime. DCHECK_NE(allocation_type, AllocationType::kCode); Node* value; Node* size = node->InputAt(0); diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 39355c85f1..d68eb27d37 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -360,8 +360,8 @@ class PipelineData { bool verify_graph() const { return verify_graph_; } void set_verify_graph(bool value) { verify_graph_ = value; } - MaybeHandle code() { return code_; } - void set_code(MaybeHandle code) { + MaybeHandle code() { return code_; } + void set_code(MaybeHandle code) { DCHECK(code_.is_null()); code_ = code; } @@ -655,7 +655,7 @@ class PipelineData { bool verify_graph_ = false; int start_source_position_ = kNoSourcePosition; base::Optional osr_helper_; - MaybeHandle code_; + MaybeHandle code_; CodeGenerator* code_generator_ = nullptr; Typer* typer_ = nullptr; Typer::Flags typer_flags_ = Typer::kNoFlags; @@ -750,15 +750,15 @@ class PipelineImpl final { void AssembleCode(Linkage* linkage); // Step D. Run the code finalization pass. - MaybeHandle FinalizeCode(bool retire_broker = true); + MaybeHandle FinalizeCode(bool retire_broker = true); // Step E. Install any code dependencies. - bool CommitDependencies(Handle code); + bool CommitDependencies(Handle code); void VerifyGeneratedCodeIsIdempotent(); void RunPrintAndVerify(const char* phase, bool untyped = false); bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor); - MaybeHandle GenerateCode(CallDescriptor* call_descriptor); + MaybeHandle GenerateCode(CallDescriptor* call_descriptor); void AllocateRegistersForTopTier(const RegisterConfiguration* config, CallDescriptor* call_descriptor, bool run_verifier); @@ -945,7 +945,7 @@ void PrintParticipatingSource(OptimizedCompilationInfo* info, } // Print the code after compiling it. -void PrintCode(Isolate* isolate, Handle code, +void PrintCode(Isolate* isolate, Handle code, OptimizedCompilationInfo* info) { if (v8_flags.print_opt_source && info->IsOptimizing()) { PrintParticipatingSource(info, isolate); @@ -1145,7 +1145,7 @@ class PipelineCompilationJob final : public TurbofanCompilationJob { // Registers weak object to optimized code dependencies. void RegisterWeakObjectsInOptimizedCode(Isolate* isolate, Handle context, - Handle code); + Handle code); private: Zone zone_; @@ -1286,8 +1286,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl( // phases happening during PrepareJob. PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats()); RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob); - MaybeHandle maybe_code = pipeline_.FinalizeCode(); - Handle code; + MaybeHandle maybe_code = pipeline_.FinalizeCode(); + Handle code; if (!maybe_code.ToHandle(&code)) { if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) { return AbortOptimization(BailoutReason::kCodeGenerationFailed); @@ -1305,7 +1305,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl( } void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode( - Isolate* isolate, Handle context, Handle code) { + Isolate* isolate, Handle context, + Handle code) { std::vector> maps; DCHECK(code->is_optimized_code()); { @@ -2916,7 +2917,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl( CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl( Isolate* isolate) { - Handle code; + Handle code; if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) { V8::FatalProcessOutOfMemory(isolate, "WasmHeapStubCompilationJob::FinalizeJobImpl"); @@ -3246,7 +3247,7 @@ int HashGraphForPGO(Graph* graph) { } // namespace -MaybeHandle Pipeline::GenerateCodeForCodeStub( +MaybeHandle Pipeline::GenerateCodeForCodeStub( Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind, const char* debug_name, Builtin builtin, const AssemblerOptions& options, @@ -3741,7 +3742,7 @@ void Pipeline::GenerateCodeForWasmFunction( #endif // V8_ENABLE_WEBASSEMBLY // static -MaybeHandle Pipeline::GenerateCodeForTesting( +MaybeHandle Pipeline::GenerateCodeForTesting( OptimizedCompilationInfo* info, Isolate* isolate, std::unique_ptr* out_broker) { ZoneStats zone_stats(isolate->allocator()); @@ -3764,9 +3765,10 @@ MaybeHandle Pipeline::GenerateCodeForTesting( { LocalIsolateScope local_isolate_scope(data.broker(), info, isolate->main_thread_local_isolate()); - if (!pipeline.CreateGraph()) return MaybeHandle(); + if (!pipeline.CreateGraph()) return MaybeHandle(); // We selectively Unpark inside OptimizeGraph. - if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle(); + if (!pipeline.OptimizeGraph(&linkage)) + return MaybeHandle(); pipeline.AssembleCode(&linkage); } @@ -3780,17 +3782,17 @@ MaybeHandle Pipeline::GenerateCodeForTesting( info->DetachPersistentHandles(), info->DetachCanonicalHandles()); } - Handle code; + Handle code; if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) && pipeline.CommitDependencies(code)) { if (!will_retire_broker) *out_broker = data.ReleaseBroker(); return code; } - return MaybeHandle(); + return MaybeHandle(); } // static -MaybeHandle Pipeline::GenerateCodeForTesting( +MaybeHandle Pipeline::GenerateCodeForTesting( OptimizedCompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, const AssemblerOptions& options, Schedule* schedule) { @@ -3822,12 +3824,12 @@ MaybeHandle Pipeline::GenerateCodeForTesting( pipeline.ComputeScheduledGraph(); } - Handle code; + Handle code; if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) && pipeline.CommitDependencies(code)) { return code; } - return MaybeHandle(); + return MaybeHandle(); } // static @@ -4110,7 +4112,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage) { data->EndPhaseKind(); } -MaybeHandle PipelineImpl::FinalizeCode(bool retire_broker) { +MaybeHandle PipelineImpl::FinalizeCode(bool retire_broker) { PipelineData* data = this->data_; data->BeginPhaseKind("V8.TFFinalizeCode"); if (data->broker() && retire_broker) { @@ -4118,8 +4120,8 @@ MaybeHandle PipelineImpl::FinalizeCode(bool retire_broker) { } Run(); - MaybeHandle maybe_code = data->code(); - Handle code; + MaybeHandle maybe_code = data->code(); + Handle code; if (!maybe_code.ToHandle(&code)) { return maybe_code; } @@ -4174,14 +4176,15 @@ bool PipelineImpl::SelectInstructionsAndAssemble( return true; } -MaybeHandle PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) { +MaybeHandle PipelineImpl::GenerateCode( + CallDescriptor* call_descriptor) { if (!SelectInstructionsAndAssemble(call_descriptor)) { - return MaybeHandle(); + return MaybeHandle(); } return FinalizeCode(); } -bool PipelineImpl::CommitDependencies(Handle code) { +bool PipelineImpl::CommitDependencies(Handle code) { return data_->dependencies() == nullptr || data_->dependencies()->Commit(code); } diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h index ba1decfafc..c0c42131df 100644 --- a/src/compiler/pipeline.h +++ b/src/compiler/pipeline.h @@ -75,7 +75,7 @@ class Pipeline : public AllStatic { SourcePositionTable* source_positions = nullptr); // Run the pipeline on a machine graph and generate code. - static MaybeHandle GenerateCodeForCodeStub( + static MaybeHandle GenerateCodeForCodeStub( Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind, const char* debug_name, Builtin builtin, const AssemblerOptions& options, @@ -88,16 +88,17 @@ class Pipeline : public AllStatic { // Run the pipeline on JavaScript bytecode and generate code. If requested, // hands out the heap broker on success, transferring its ownership to the // caller. - V8_EXPORT_PRIVATE static MaybeHandle GenerateCodeForTesting( - OptimizedCompilationInfo* info, Isolate* isolate, - std::unique_ptr* out_broker = nullptr); + V8_EXPORT_PRIVATE static MaybeHandle + GenerateCodeForTesting(OptimizedCompilationInfo* info, Isolate* isolate, + std::unique_ptr* out_broker = nullptr); // Run the pipeline on a machine graph and generate code. If {schedule} is // {nullptr}, then compute a new schedule for code generation. - V8_EXPORT_PRIVATE static MaybeHandle GenerateCodeForTesting( - OptimizedCompilationInfo* info, Isolate* isolate, - CallDescriptor* call_descriptor, Graph* graph, - const AssemblerOptions& options, Schedule* schedule = nullptr); + V8_EXPORT_PRIVATE static MaybeHandle + GenerateCodeForTesting(OptimizedCompilationInfo* info, Isolate* isolate, + CallDescriptor* call_descriptor, Graph* graph, + const AssemblerOptions& options, + Schedule* schedule = nullptr); // Run just the register allocator phases. V8_EXPORT_PRIVATE static void AllocateRegistersForTesting( diff --git a/src/compiler/types.cc b/src/compiler/types.cc index c81a185da0..020c3242a1 100644 --- a/src/compiler/types.cc +++ b/src/compiler/types.cc @@ -368,7 +368,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case SCRIPT_CONTEXT_TYPE: case WITH_CONTEXT_TYPE: case SCRIPT_TYPE: - case CODE_TYPE: + case INSTRUCTION_STREAM_TYPE: case CODE_DATA_CONTAINER_TYPE: case PROPERTY_CELL_TYPE: case SOURCE_TEXT_MODULE_TYPE: diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 9f1b642189..a4bf0676ee 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -8312,11 +8312,9 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module, } } -MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, - const wasm::FunctionSig* sig, - WasmImportCallKind kind, - int expected_arity, - wasm::Suspend suspend) { +MaybeHandle CompileWasmToJSWrapper( + Isolate* isolate, const wasm::FunctionSig* sig, WasmImportCallKind kind, + int expected_arity, wasm::Suspend suspend) { std::unique_ptr zone = std::make_unique( isolate->allocator(), ZONE_NAME, kCompressGraphZone); @@ -8359,15 +8357,15 @@ MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) == CompilationJob::FAILED || job->FinalizeJob(isolate) == CompilationJob::FAILED) { - return Handle(); + return Handle(); } - Handle code = job->compilation_info()->code(); + Handle code = job->compilation_info()->code(); return code; } -MaybeHandle CompileJSToJSWrapper(Isolate* isolate, - const wasm::FunctionSig* sig, - const wasm::WasmModule* module) { +MaybeHandle CompileJSToJSWrapper( + Isolate* isolate, const wasm::FunctionSig* sig, + const wasm::WasmModule* module) { std::unique_ptr zone = std::make_unique( isolate->allocator(), ZONE_NAME, kCompressGraphZone); Graph* graph = zone->New(zone.get()); @@ -8409,7 +8407,7 @@ MaybeHandle CompileJSToJSWrapper(Isolate* isolate, job->FinalizeJob(isolate) == CompilationJob::FAILED) { return {}; } - Handle code = job->compilation_info()->code(); + Handle code = job->compilation_info()->code(); return code; } diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h index 089129fbff..74b6a73d5c 100644 --- a/src/compiler/wasm-compiler.h +++ b/src/compiler/wasm-compiler.h @@ -142,17 +142,15 @@ std::unique_ptr NewJSToWasmCompilationJob( const wasm::WasmModule* module, bool is_import, const wasm::WasmFeatures& enabled_features); -MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, - const wasm::FunctionSig* sig, - WasmImportCallKind kind, - int expected_arity, - wasm::Suspend suspend); +MaybeHandle CompileWasmToJSWrapper( + Isolate* isolate, const wasm::FunctionSig* sig, WasmImportCallKind kind, + int expected_arity, wasm::Suspend suspend); // Compiles a stub with JS linkage that serves as an adapter for function // objects constructed via {WebAssembly.Function}. It performs a round-trip // simulating a JS-to-Wasm-to-JS coercion of parameter and return values. -MaybeHandle CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*, - const wasm::WasmModule* module); +MaybeHandle CompileJSToJSWrapper( + Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module); enum CWasmEntryParameters { kCodeEntry, diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc index a1ebe998c8..40f831ba31 100644 --- a/src/debug/debug-evaluate.cc +++ b/src/debug/debug-evaluate.cc @@ -1231,7 +1231,8 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) { DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller); if (state != DebugInfo::kHasNoSideEffect) continue; - Code code = FromCodeDataContainer(isolate->builtins()->code(caller)); + InstructionStream code = + FromCodeDataContainer(isolate->builtins()->code(caller)); int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); diff --git a/src/debug/debug.cc b/src/debug/debug.cc index c6748b19c4..335e4ae7c2 100644 --- a/src/debug/debug.cc +++ b/src/debug/debug.cc @@ -161,7 +161,7 @@ void BreakLocation::AllAtCurrentStatement( int offset = summary.code_offset(); Handle abstract_code = summary.abstract_code(); PtrComprCageBase cage_base = GetPtrComprCageBase(*debug_info); - if (abstract_code->IsCode(cage_base)) offset = offset - 1; + if (abstract_code->IsInstructionStream(cage_base)) offset = offset - 1; int statement_position; { BreakIterator it(debug_info); @@ -1941,7 +1941,8 @@ bool Debug::FindSharedFunctionInfosIntersectingRange( for (const auto& candidate : candidates) { IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_)); if (!is_compiled_scope.is_compiled()) { - // Code that cannot be compiled lazily are internal and not debuggable. + // InstructionStream that cannot be compiled lazily are internal and not + // debuggable. DCHECK(candidate->allows_lazy_compilation()); if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION, &is_compiled_scope)) { @@ -2006,7 +2007,8 @@ Handle Debug::FindInnermostContainingFunctionInfo(Handle